aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Merello <andrea.merello@gmail.com>2018-11-20 10:31:46 -0500
committerVinod Koul <vkoul@kernel.org>2019-01-06 23:23:11 -0500
commit5c094d4cac5ba78139f4d7169145b57af7f07981 (patch)
treeda727149d396b2122fd2fc75ad597c0614d15167
parent616f0f81d857e248a72b5af45ab185196556ae2e (diff)
dmaengine: xilinx_dma: in axidma slave_sg and dma_cyclic mode align split descriptors
Whenever a single or cyclic transaction is prepared, the driver could eventually split it over several SG descriptors in order to deal with the HW maximum transfer length. This could end up in DMA operations starting from a misaligned address. This seems fatal for the HW if DRE (Data Realignment Engine) is not enabled. This patch eventually adjusts the transfer size in order to make sure all operations start from an aligned address. Cc: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com> Signed-off-by: Andrea Merello <andrea.merello@gmail.com> Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com> Signed-off-by: Vinod Koul <vkoul@kernel.org>
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index fd9f37bafab0..93435f7002ab 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -977,6 +977,15 @@ static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
977 copy = min_t(size_t, size - done, 977 copy = min_t(size_t, size - done,
978 chan->xdev->max_buffer_len); 978 chan->xdev->max_buffer_len);
979 979
980 if ((copy + done < size) &&
981 chan->xdev->common.copy_align) {
982 /*
983 * If this is not the last descriptor, make sure
984 * the next one will be properly aligned
985 */
986 copy = rounddown(copy,
987 (1 << chan->xdev->common.copy_align));
988 }
980 return copy; 989 return copy;
981} 990}
982 991