summaryrefslogtreecommitdiff
path: root/drivers/dma/fsl_raid.c
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2016-07-25 17:34:03 (GMT)
committerVinod Koul <vinod.koul@intel.com>2016-08-08 02:41:43 (GMT)
commita941106de4434c0173a2c6d5abedb2d1cfc11206 (patch)
tree376cdccd11295928551413901abf1c15f0c14190 /drivers/dma/fsl_raid.c
parent9b335978f7081cd4fe264709599a18073e12fee2 (diff)
downloadlinux-a941106de4434c0173a2c6d5abedb2d1cfc11206.tar.xz
dmaengine: fsl_raid: move unmap to before callback
Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Cc: Xuelin Shi <xuelin.shi@freescale.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/fsl_raid.c')
-rw-r--r--drivers/dma/fsl_raid.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 35d017a..a8c8b9e 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -135,8 +135,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan)
static void fsl_re_desc_done(struct fsl_re_desc *desc)
{
dma_cookie_complete(&desc->async_tx);
- dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
dma_descriptor_unmap(&desc->async_tx);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
}
static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)