aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2017-09-05 04:16:38 -0400
committerVinod Koul <vinod.koul@intel.com>2017-09-17 09:28:18 -0400
commit008913dbeb1775ba365daa39462ca68884bd926f (patch)
treeffeb591891f212ca16b30faae38ec711696ade6a
parent63ab76dbbdb8657e24645b7311ec3911a41039b5 (diff)
dmaengine: axi-dmac: Fix software cyclic mode
When running in software cyclic mode the driver currently does not go back to the first segment once the last segment has been reached. Effectively making the transfer non-cyclic. Fix this by going back to the first segment once the last segment has been reached for cyclic transfers. Special care need to be taken to avoid a segment from being submitted multiple times concurrently, which could happen for transfers with a number of segments that is smaller than the DMA controller's internal queue. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/dma-axi-dmac.c69
1 files changed, 51 insertions, 18 deletions
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index eb289aa187dd..2419fe524daa 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -72,6 +72,9 @@
72 72
73#define AXI_DMAC_FLAG_CYCLIC BIT(0) 73#define AXI_DMAC_FLAG_CYCLIC BIT(0)
74 74
75/* The maximum ID allocated by the hardware is 31 */
76#define AXI_DMAC_SG_UNUSED 32U
77
75struct axi_dmac_sg { 78struct axi_dmac_sg {
76 dma_addr_t src_addr; 79 dma_addr_t src_addr;
77 dma_addr_t dest_addr; 80 dma_addr_t dest_addr;
@@ -80,6 +83,7 @@ struct axi_dmac_sg {
80 unsigned int dest_stride; 83 unsigned int dest_stride;
81 unsigned int src_stride; 84 unsigned int src_stride;
82 unsigned int id; 85 unsigned int id;
86 bool schedule_when_free;
83}; 87};
84 88
85struct axi_dmac_desc { 89struct axi_dmac_desc {
@@ -200,11 +204,21 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
200 } 204 }
201 sg = &desc->sg[desc->num_submitted]; 205 sg = &desc->sg[desc->num_submitted];
202 206
207 /* Already queued in cyclic mode. Wait for it to finish */
208 if (sg->id != AXI_DMAC_SG_UNUSED) {
209 sg->schedule_when_free = true;
210 return;
211 }
212
203 desc->num_submitted++; 213 desc->num_submitted++;
204 if (desc->num_submitted == desc->num_sgs) 214 if (desc->num_submitted == desc->num_sgs) {
205 chan->next_desc = NULL; 215 if (desc->cyclic)
206 else 216 desc->num_submitted = 0; /* Start again */
217 else
218 chan->next_desc = NULL;
219 } else {
207 chan->next_desc = desc; 220 chan->next_desc = desc;
221 }
208 222
209 sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); 223 sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
210 224
@@ -239,37 +253,52 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
239 struct axi_dmac_desc, vdesc.node); 253 struct axi_dmac_desc, vdesc.node);
240} 254}
241 255
242static void axi_dmac_transfer_done(struct axi_dmac_chan *chan, 256static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
243 unsigned int completed_transfers) 257 unsigned int completed_transfers)
244{ 258{
245 struct axi_dmac_desc *active; 259 struct axi_dmac_desc *active;
246 struct axi_dmac_sg *sg; 260 struct axi_dmac_sg *sg;
261 bool start_next = false;
247 262
248 active = axi_dmac_active_desc(chan); 263 active = axi_dmac_active_desc(chan);
249 if (!active) 264 if (!active)
250 return; 265 return false;
251 266
252 if (active->cyclic) { 267 do {
253 vchan_cyclic_callback(&active->vdesc); 268 sg = &active->sg[active->num_completed];
254 } else { 269 if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
255 do { 270 break;
256 sg = &active->sg[active->num_completed]; 271 if (!(BIT(sg->id) & completed_transfers))
257 if (!(BIT(sg->id) & completed_transfers)) 272 break;
258 break; 273 active->num_completed++;
259 active->num_completed++; 274 sg->id = AXI_DMAC_SG_UNUSED;
260 if (active->num_completed == active->num_sgs) { 275 if (sg->schedule_when_free) {
276 sg->schedule_when_free = false;
277 start_next = true;
278 }
279
280 if (active->cyclic)
281 vchan_cyclic_callback(&active->vdesc);
282
283 if (active->num_completed == active->num_sgs) {
284 if (active->cyclic) {
285 active->num_completed = 0; /* wrap around */
286 } else {
261 list_del(&active->vdesc.node); 287 list_del(&active->vdesc.node);
262 vchan_cookie_complete(&active->vdesc); 288 vchan_cookie_complete(&active->vdesc);
263 active = axi_dmac_active_desc(chan); 289 active = axi_dmac_active_desc(chan);
264 } 290 }
265 } while (active); 291 }
266 } 292 } while (active);
293
294 return start_next;
267} 295}
268 296
269static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) 297static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
270{ 298{
271 struct axi_dmac *dmac = devid; 299 struct axi_dmac *dmac = devid;
272 unsigned int pending; 300 unsigned int pending;
301 bool start_next = false;
273 302
274 pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); 303 pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
275 if (!pending) 304 if (!pending)
@@ -283,10 +312,10 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
283 unsigned int completed; 312 unsigned int completed;
284 313
285 completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); 314 completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
286 axi_dmac_transfer_done(&dmac->chan, completed); 315 start_next = axi_dmac_transfer_done(&dmac->chan, completed);
287 } 316 }
288 /* Space has become available in the descriptor queue */ 317 /* Space has become available in the descriptor queue */
289 if (pending & AXI_DMAC_IRQ_SOT) 318 if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
290 axi_dmac_start_transfer(&dmac->chan); 319 axi_dmac_start_transfer(&dmac->chan);
291 spin_unlock(&dmac->chan.vchan.lock); 320 spin_unlock(&dmac->chan.vchan.lock);
292 321
@@ -336,12 +365,16 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
336static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) 365static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
337{ 366{
338 struct axi_dmac_desc *desc; 367 struct axi_dmac_desc *desc;
368 unsigned int i;
339 369
340 desc = kzalloc(sizeof(struct axi_dmac_desc) + 370 desc = kzalloc(sizeof(struct axi_dmac_desc) +
341 sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); 371 sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
342 if (!desc) 372 if (!desc)
343 return NULL; 373 return NULL;
344 374
375 for (i = 0; i < num_sgs; i++)
376 desc->sg[i].id = AXI_DMAC_SG_UNUSED;
377
345 desc->num_sgs = num_sgs; 378 desc->num_sgs = num_sgs;
346 379
347 return desc; 380 return desc;