aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKedareswara rao Appana <appana.durga.rao@xilinx.com>2017-12-07 00:21:03 -0500
committerVinod Koul <vinod.koul@intel.com>2017-12-18 00:14:09 -0500
commitfe0503e19310bddc892ddbfd6dfc8746abbe7261 (patch)
tree3e3da8707bd46923cc8936062c25be10f5bc191a
parent21e02a3e05cfd3994ca598a5c9c2e1795007b603 (diff)
dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma
VDMA engine default frame buffer configuration is cirular mode. in this mode dmaengine continuously circles through h/w configured fstore frame buffers. When vdma h/w is configured for more than one frame. for example h/w is configured for n number of frames, user submits less than n number of frames and triggered the dmaengine using issue_pending API. since the h/w (or) driver default configuraiton is circular mode h/w tries to write/read from an invalid frame buffer resulting errors from the vdma dmaengine. This patch fixes this issue by enabling the park mode as default mode configuration for frame buffers in s/w, so that driver can handle all cases for "k" frames where n%k==0 (n is a multiple of k) by simply replicating the frame pointers. Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c41
1 files changed, 19 insertions, 22 deletions
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index c2465632f3f0..9063ca04e8d6 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -99,7 +99,9 @@
99#define XILINX_DMA_REG_FRMPTR_STS 0x0024 99#define XILINX_DMA_REG_FRMPTR_STS 0x0024
100#define XILINX_DMA_REG_PARK_PTR 0x0028 100#define XILINX_DMA_REG_PARK_PTR 0x0028
101#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 101#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
102#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 103#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
104#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
103#define XILINX_DMA_REG_VDMA_VERSION 0x002c 105#define XILINX_DMA_REG_VDMA_VERSION 0x002c
104 106
105/* Register Direct Mode Registers */ 107/* Register Direct Mode Registers */
@@ -998,7 +1000,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
998{ 1000{
999 struct xilinx_vdma_config *config = &chan->config; 1001 struct xilinx_vdma_config *config = &chan->config;
1000 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1002 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1001 u32 reg; 1003 u32 reg, j;
1002 struct xilinx_vdma_tx_segment *tail_segment; 1004 struct xilinx_vdma_tx_segment *tail_segment;
1003 1005
1004 /* This function was invoked with lock held */ 1006 /* This function was invoked with lock held */
@@ -1035,10 +1037,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1035 else 1037 else
1036 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1038 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1037 1039
1038 /* Configure channel to allow number frame buffers */
1039 dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
1040 chan->desc_pendingcount);
1041
1042 /* 1040 /*
1043 * With SG, start with circular mode, so that BDs can be fetched. 1041 * With SG, start with circular mode, so that BDs can be fetched.
1044 * In direct register mode, if not parking, enable circular mode 1042 * In direct register mode, if not parking, enable circular mode
@@ -1051,17 +1049,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1051 1049
1052 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1050 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1053 1051
1054 if (config->park && (config->park_frm >= 0) && 1052 j = chan->desc_submitcount;
1055 (config->park_frm < chan->num_frms)) { 1053 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1056 if (chan->direction == DMA_MEM_TO_DEV) 1054 if (chan->direction == DMA_MEM_TO_DEV) {
1057 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1055 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1058 config->park_frm << 1056 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1059 XILINX_DMA_PARK_PTR_RD_REF_SHIFT); 1057 } else {
1060 else 1058 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1061 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1059 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1062 config->park_frm <<
1063 XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
1064 } 1060 }
1061 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1065 1062
1066 /* Start the hardware */ 1063 /* Start the hardware */
1067 xilinx_dma_start(chan); 1064 xilinx_dma_start(chan);
@@ -1073,6 +1070,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1073 if (chan->has_sg) { 1070 if (chan->has_sg) {
1074 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1071 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1075 tail_segment->phys); 1072 tail_segment->phys);
1073 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1074 chan->desc_pendingcount = 0;
1076 } else { 1075 } else {
1077 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1076 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1078 int i = 0; 1077 int i = 0;
@@ -1102,18 +1101,13 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1102 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1101 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1103 last->hw.stride); 1102 last->hw.stride);
1104 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1103 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1105 }
1106 1104
1107 if (!chan->has_sg) {
1108 list_del(&desc->node);
1109 list_add_tail(&desc->node, &chan->active_list);
1110 chan->desc_submitcount++; 1105 chan->desc_submitcount++;
1111 chan->desc_pendingcount--; 1106 chan->desc_pendingcount--;
1107 list_del(&desc->node);
1108 list_add_tail(&desc->node, &chan->active_list);
1112 if (chan->desc_submitcount == chan->num_frms) 1109 if (chan->desc_submitcount == chan->num_frms)
1113 chan->desc_submitcount = 0; 1110 chan->desc_submitcount = 0;
1114 } else {
1115 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1116 chan->desc_pendingcount = 0;
1117 } 1111 }
1118 1112
1119 chan->idle = false; 1113 chan->idle = false;
@@ -1364,6 +1358,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1364 1358
1365 chan->err = false; 1359 chan->err = false;
1366 chan->idle = true; 1360 chan->idle = true;
1361 chan->desc_submitcount = 0;
1367 1362
1368 return err; 1363 return err;
1369} 1364}
@@ -2363,6 +2358,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2363 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2358 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2364 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2359 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2365 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2360 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2361 chan->config.park = 1;
2366 2362
2367 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2363 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2368 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2364 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
@@ -2379,6 +2375,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2379 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2375 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2380 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2376 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2381 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2377 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2378 chan->config.park = 1;
2382 2379
2383 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2380 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2384 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2381 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)