aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/at_xdmac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/at_xdmac.c')
-rw-r--r--drivers/dma/at_xdmac.c42
1 files changed, 39 insertions, 3 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 64f5d1bdbb48..8e304b1befc5 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -176,6 +176,7 @@
176#define AT_XDMAC_MAX_CHAN 0x20 176#define AT_XDMAC_MAX_CHAN 0x20
177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ 177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ 178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
179#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
179 180
180#define AT_XDMAC_DMA_BUSWIDTHS\ 181#define AT_XDMAC_DMA_BUSWIDTHS\
181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ 182 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -1395,8 +1396,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1395 struct at_xdmac_desc *desc, *_desc; 1396 struct at_xdmac_desc *desc, *_desc;
1396 struct list_head *descs_list; 1397 struct list_head *descs_list;
1397 enum dma_status ret; 1398 enum dma_status ret;
1398 int residue; 1399 int residue, retry;
1399 u32 cur_nda, mask, value; 1400 u32 cur_nda, check_nda, cur_ubc, mask, value;
1400 u8 dwidth = 0; 1401 u8 dwidth = 0;
1401 unsigned long flags; 1402 unsigned long flags;
1402 1403
@@ -1433,7 +1434,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1433 cpu_relax(); 1434 cpu_relax();
1434 } 1435 }
1435 1436
1437 /*
1438 * When processing the residue, we need to read two registers but we
1439 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
1440 * we stand in the descriptor list and AT_XDMAC_CUBC is used
1441 * to know how many data are remaining for the current descriptor.
1442 * Since the dma channel is not paused to not loose data, between the
1443 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
1444 * descriptor.
1445 * For that reason, after reading AT_XDMAC_CUBC, we check if we are
1446 * still using the same descriptor by reading a second time
1447 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
1448 * read again AT_XDMAC_CUBC.
1449 * Memory barriers are used to ensure the read order of the registers.
1450 * A max number of retries is set because unlikely it can never ends if
1451 * we are transferring a lot of data with small buffers.
1452 */
1436 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1453 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1454 rmb();
1455 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1456 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1457 rmb();
1458 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1459
1460 if (likely(cur_nda == check_nda))
1461 break;
1462
1463 cur_nda = check_nda;
1464 rmb();
1465 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1466 }
1467
1468 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1469 ret = DMA_ERROR;
1470 goto spin_unlock;
1471 }
1472
1437 /* 1473 /*
1438 * Remove size of all microblocks already transferred and the current 1474 * Remove size of all microblocks already transferred and the current
1439 * one. Then add the remaining size to transfer of the current 1475 * one. Then add the remaining size to transfer of the current
@@ -1446,7 +1482,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1446 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) 1482 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1447 break; 1483 break;
1448 } 1484 }
1449 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; 1485 residue += cur_ubc << dwidth;
1450 1486
1451 dma_set_residue(txstate, residue); 1487 dma_set_residue(txstate, residue);
1452 1488