aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Merello <andrea.merello@gmail.com>2018-11-20 10:31:51 -0500
committerVinod Koul <vkoul@kernel.org>2019-01-06 23:23:12 -0500
commitb8349172b4001ea4e8b38a243275aecd90aa7573 (patch)
tree7c2d58be45772b8f0112ac63d19baaacaba99ba9
parent29b9ee4a0c0edf32667e2d95690f6ed810aad6ec (diff)
dmaengine: xilinx_dma: Drop SG support for VDMA IP
xilinx_vdma_start_transfer() is used only for VDMA IP, still it contains conditional code on has_sg variable. has_sg is set only whenever the HW does support SG mode, that is never true for VDMA IP. This patch drops the never-taken branches. Signed-off-by: Andrea Merello <andrea.merello@gmail.com> Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com> Signed-off-by: Vinod Koul <vkoul@kernel.org>
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c84
1 files changed, 32 insertions, 52 deletions
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index b559efe06adb..d9431afa031b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1102,6 +1102,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1102 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1102 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1103 u32 reg, j; 1103 u32 reg, j;
1104 struct xilinx_vdma_tx_segment *tail_segment; 1104 struct xilinx_vdma_tx_segment *tail_segment;
1105 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1106 int i = 0;
1105 1107
1106 /* This function was invoked with lock held */ 1108 /* This function was invoked with lock held */
1107 if (chan->err) 1109 if (chan->err)
@@ -1121,14 +1123,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1121 tail_segment = list_last_entry(&tail_desc->segments, 1123 tail_segment = list_last_entry(&tail_desc->segments,
1122 struct xilinx_vdma_tx_segment, node); 1124 struct xilinx_vdma_tx_segment, node);
1123 1125
1124 /*
1125 * If hardware is idle, then all descriptors on the running lists are
1126 * done, start new transfers
1127 */
1128 if (chan->has_sg)
1129 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1130 desc->async_tx.phys);
1131
1132 /* Configure the hardware using info in the config structure */ 1126 /* Configure the hardware using info in the config structure */
1133 if (chan->has_vflip) { 1127 if (chan->has_vflip) {
1134 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); 1128 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
@@ -1145,15 +1139,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1145 else 1139 else
1146 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1140 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1147 1141
1148 /* 1142 /* If not parking, enable circular mode */
1149 * With SG, start with circular mode, so that BDs can be fetched.
1150 * In direct register mode, if not parking, enable circular mode
1151 */
1152 if (chan->has_sg || !config->park)
1153 reg |= XILINX_DMA_DMACR_CIRC_EN;
1154
1155 if (config->park) 1143 if (config->park)
1156 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1144 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1145 else
1146 reg |= XILINX_DMA_DMACR_CIRC_EN;
1157 1147
1158 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1148 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1159 1149
@@ -1175,48 +1165,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1175 return; 1165 return;
1176 1166
1177 /* Start the transfer */ 1167 /* Start the transfer */
1178 if (chan->has_sg) { 1168 if (chan->desc_submitcount < chan->num_frms)
1179 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1169 i = chan->desc_submitcount;
1180 tail_segment->phys); 1170
1181 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1171 list_for_each_entry(segment, &desc->segments, node) {
1182 chan->desc_pendingcount = 0; 1172 if (chan->ext_addr)
1183 } else { 1173 vdma_desc_write_64(chan,
1184 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1174 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1185 int i = 0; 1175 segment->hw.buf_addr,
1186 1176 segment->hw.buf_addr_msb);
1187 if (chan->desc_submitcount < chan->num_frms) 1177 else
1188 i = chan->desc_submitcount; 1178 vdma_desc_write(chan,
1189
1190 list_for_each_entry(segment, &desc->segments, node) {
1191 if (chan->ext_addr)
1192 vdma_desc_write_64(chan,
1193 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1194 segment->hw.buf_addr,
1195 segment->hw.buf_addr_msb);
1196 else
1197 vdma_desc_write(chan,
1198 XILINX_VDMA_REG_START_ADDRESS(i++), 1179 XILINX_VDMA_REG_START_ADDRESS(i++),
1199 segment->hw.buf_addr); 1180 segment->hw.buf_addr);
1200 1181
1201 last = segment; 1182 last = segment;
1202 } 1183 }
1203
1204 if (!last)
1205 return;
1206 1184
1207 /* HW expects these parameters to be same for one transaction */ 1185 if (!last)
1208 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1186 return;
1209 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1210 last->hw.stride);
1211 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1212 1187
1213 chan->desc_submitcount++; 1188 /* HW expects these parameters to be same for one transaction */
1214 chan->desc_pendingcount--; 1189 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1215 list_del(&desc->node); 1190 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1216 list_add_tail(&desc->node, &chan->active_list); 1191 last->hw.stride);
1217 if (chan->desc_submitcount == chan->num_frms) 1192 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1218 chan->desc_submitcount = 0; 1193
1219 } 1194 chan->desc_submitcount++;
1195 chan->desc_pendingcount--;
1196 list_del(&desc->node);
1197 list_add_tail(&desc->node, &chan->active_list);
1198 if (chan->desc_submitcount == chan->num_frms)
1199 chan->desc_submitcount = 0;
1220 1200
1221 chan->idle = false; 1201 chan->idle = false;
1222} 1202}