aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/at_hdmac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r--drivers/dma/at_hdmac.c132
1 files changed, 87 insertions, 45 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 59892126d175..d3629b7482dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -48,6 +48,8 @@
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ 48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50 50
51#define ATC_MAX_DSCR_TRIALS 10
52
51/* 53/*
52 * Initial number of descriptors to allocate for each channel. This could 54 * Initial number of descriptors to allocate for each channel. This could
53 * be increased during dma usage. 55 * be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
285 * 287 *
286 * @current_len: the number of bytes left before reading CTRLA 288 * @current_len: the number of bytes left before reading CTRLA
287 * @ctrla: the value of CTRLA 289 * @ctrla: the value of CTRLA
288 * @desc: the descriptor containing the transfer width
289 */ 290 */
290static inline int atc_calc_bytes_left(int current_len, u32 ctrla, 291static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291 struct at_desc *desc)
292{ 292{
293 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); 293 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
294} 294 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
295 295
296/** 296 /*
297 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according 297 * According to the datasheet, when reading the Control A Register
298 * to the current value of CTRLA. 298 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
299 * 299 * number of transfers completed on the Source Interface.
300 * @current_len: the number of bytes left before reading CTRLA 300 * So btsize is always a number of source width transfers.
301 * @atchan: the channel to read CTRLA for 301 */
302 * @desc: the descriptor containing the transfer width 302 return current_len - (btsize << src_width);
303 */
304static inline int atc_calc_bytes_left_from_reg(int current_len,
305 struct at_dma_chan *atchan, struct at_desc *desc)
306{
307 u32 ctrla = channel_readl(atchan, CTRLA);
308
309 return atc_calc_bytes_left(current_len, ctrla, desc);
310} 303}
311 304
312/** 305/**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
320 struct at_desc *desc_first = atc_first_active(atchan); 313 struct at_desc *desc_first = atc_first_active(atchan);
321 struct at_desc *desc; 314 struct at_desc *desc;
322 int ret; 315 int ret;
323 u32 ctrla, dscr; 316 u32 ctrla, dscr, trials;
324 317
325 /* 318 /*
326 * If the cookie doesn't match to the currently running transfer then 319 * If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
346 * the channel's DSCR register and compare it against the value 339 * the channel's DSCR register and compare it against the value
347 * of the hardware linked list structure of each child 340 * of the hardware linked list structure of each child
348 * descriptor. 341 * descriptor.
342 *
343 * The CTRLA register provides us with the amount of data
344 * already read from the source for the current child
345 * descriptor. So we can compute a more accurate residue by also
346 * removing the number of bytes corresponding to this amount of
347 * data.
348 *
349 * However, the DSCR and CTRLA registers cannot be read both
350 * atomically. Hence a race condition may occur: the first read
351 * register may refer to one child descriptor whereas the second
352 * read may refer to a later child descriptor in the list
353 * because of the DMA transfer progression inbetween the two
354 * reads.
355 *
356 * One solution could have been to pause the DMA transfer, read
357 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
358 * this approach presents some drawbacks:
359 * - If the DMA transfer is paused, RX overruns or TX underruns
360 * are more likey to occur depending on the system latency.
361 * Taking the USART driver as an example, it uses a cyclic DMA
362 * transfer to read data from the Receive Holding Register
363 * (RHR) to avoid RX overruns since the RHR is not protected
364 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
365 * to compute the residue would break the USART driver design.
366 * - The atc_pause() function masks interrupts but we'd rather
367 * avoid to do so for system latency purpose.
368 *
369 * Then we'd rather use another solution: the DSCR is read a
370 * first time, the CTRLA is read in turn, next the DSCR is read
371 * a second time. If the two consecutive read values of the DSCR
372 * are the same then we assume both refers to the very same
373 * child descriptor as well as the CTRLA value read inbetween
374 * does. For cyclic tranfers, the assumption is that a full loop
375 * is "not so fast".
376 * If the two DSCR values are different, we read again the CTRLA
377 * then the DSCR till two consecutive read values from DSCR are
378 * equal or till the maxium trials is reach.
379 * This algorithm is very unlikely not to find a stable value for
380 * DSCR.
349 */ 381 */
350 382
351 ctrla = channel_readl(atchan, CTRLA);
352 rmb(); /* ensure CTRLA is read before DSCR */
353 dscr = channel_readl(atchan, DSCR); 383 dscr = channel_readl(atchan, DSCR);
384 rmb(); /* ensure DSCR is read before CTRLA */
385 ctrla = channel_readl(atchan, CTRLA);
386 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
387 u32 new_dscr;
388
389 rmb(); /* ensure DSCR is read after CTRLA */
390 new_dscr = channel_readl(atchan, DSCR);
391
392 /*
393 * If the DSCR register value has not changed inside the
394 * DMA controller since the previous read, we assume
395 * that both the dscr and ctrla values refers to the
396 * very same descriptor.
397 */
398 if (likely(new_dscr == dscr))
399 break;
400
401 /*
402 * DSCR has changed inside the DMA controller, so the
403 * previouly read value of CTRLA may refer to an already
404 * processed descriptor hence could be outdated.
405 * We need to update ctrla to match the current
406 * descriptor.
407 */
408 dscr = new_dscr;
409 rmb(); /* ensure DSCR is read before CTRLA */
410 ctrla = channel_readl(atchan, CTRLA);
411 }
412 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
413 return -ETIMEDOUT;
354 414
355 /* for the first descriptor we can be more accurate */ 415 /* for the first descriptor we can be more accurate */
356 if (desc_first->lli.dscr == dscr) 416 if (desc_first->lli.dscr == dscr)
357 return atc_calc_bytes_left(ret, ctrla, desc_first); 417 return atc_calc_bytes_left(ret, ctrla);
358 418
359 ret -= desc_first->len; 419 ret -= desc_first->len;
360 list_for_each_entry(desc, &desc_first->tx_list, desc_node) { 420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
365 } 425 }
366 426
367 /* 427 /*
368 * For the last descriptor in the chain we can calculate 428 * For the current descriptor in the chain we can calculate
369 * the remaining bytes using the channel's register. 429 * the remaining bytes using the channel's register.
370 * Note that the transfer width of the first and last
371 * descriptor may differ.
372 */ 430 */
373 if (!desc->lli.dscr) 431 ret = atc_calc_bytes_left(ret, ctrla);
374 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
375 } else { 432 } else {
376 /* single transfer */ 433 /* single transfer */
377 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); 434 ctrla = channel_readl(atchan, CTRLA);
435 ret = atc_calc_bytes_left(ret, ctrla);
378 } 436 }
379 437
380 return ret; 438 return ret;
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
726 784
727 desc->txd.cookie = -EBUSY; 785 desc->txd.cookie = -EBUSY;
728 desc->total_len = desc->len = len; 786 desc->total_len = desc->len = len;
729 desc->tx_width = dwidth;
730 787
731 /* set end-of-link to the last link descriptor of list*/ 788 /* set end-of-link to the last link descriptor of list*/
732 set_desc_eol(desc); 789 set_desc_eol(desc);
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
804 first->txd.cookie = -EBUSY; 861 first->txd.cookie = -EBUSY;
805 first->total_len = len; 862 first->total_len = len;
806 863
807 /* set transfer width for the calculation of the residue */
808 first->tx_width = src_width;
809 prev->tx_width = src_width;
810
811 /* set end-of-link to the last link descriptor of list*/ 864 /* set end-of-link to the last link descriptor of list*/
812 set_desc_eol(desc); 865 set_desc_eol(desc);
813 866
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
956 first->txd.cookie = -EBUSY; 1009 first->txd.cookie = -EBUSY;
957 first->total_len = total_len; 1010 first->total_len = total_len;
958 1011
959 /* set transfer width for the calculation of the residue */
960 first->tx_width = reg_width;
961 prev->tx_width = reg_width;
962
963 /* first link descriptor of list is responsible of flags */ 1012 /* first link descriptor of list is responsible of flags */
964 first->txd.flags = flags; /* client is in control of this ack */ 1013 first->txd.flags = flags; /* client is in control of this ack */
965 1014
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
1077 desc->txd.cookie = 0; 1126 desc->txd.cookie = 0;
1078 desc->len = len; 1127 desc->len = len;
1079 1128
1080 /*
1081 * Although we only need the transfer width for the first and
1082 * the last descriptor, its easier to set it to all descriptors.
1083 */
1084 desc->tx_width = src_width;
1085
1086 atc_desc_chain(&first, &prev, desc); 1129 atc_desc_chain(&first, &prev, desc);
1087 1130
1088 /* update the lengths and addresses for the next loop cycle */ 1131 /* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1256 /* First descriptor of the chain embedds additional information */ 1299 /* First descriptor of the chain embedds additional information */
1257 first->txd.cookie = -EBUSY; 1300 first->txd.cookie = -EBUSY;
1258 first->total_len = buf_len; 1301 first->total_len = buf_len;
1259 first->tx_width = reg_width;
1260 1302
1261 return &first->txd; 1303 return &first->txd;
1262 1304