diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-19 12:52:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-19 12:52:20 -0400 |
commit | 9af1f5d8f20f97884da55817ab80a6fcd170f296 (patch) | |
tree | bad9c34daa7c4e864e47b1a103b2442995279954 | |
parent | 049a40c0a2d4b458583161ec1b9ce109408cb1aa (diff) | |
parent | 5156463588c3999b630d9ffc6061a54962f3c2d9 (diff) |
Merge tag 'dmaengine-fix-4.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul:
"Some fixes has piled up, so time to send them upstream.
These fixes include:
- at_xdmac fixes for residue and other stuff
- update MAINTAINERS for dma dt bindings
- mv_xor fix for incorrect offset"
* tag 'dmaengine-fix-4.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: mv_xor: Fix incorrect offset in dma_map_page()
dmaengine: at_xdmac: double FIFO flush needed to compute residue
dmaengine: at_xdmac: fix residue corruption
dmaengine: at_xdmac: align descriptors on 64 bits
MAINTAINERS: Add file patterns for dma device tree bindings
-rw-r--r-- | MAINTAINERS | 1 | ||||
-rw-r--r-- | drivers/dma/at_xdmac.c | 82 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 10 |
3 files changed, 64 insertions, 29 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 98f8a5c92314..e1b090f86e0d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3778,6 +3778,7 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ | |||
3778 | S: Maintained | 3778 | S: Maintained |
3779 | F: drivers/dma/ | 3779 | F: drivers/dma/ |
3780 | F: include/linux/dmaengine.h | 3780 | F: include/linux/dmaengine.h |
3781 | F: Documentation/devicetree/bindings/dma/ | ||
3781 | F: Documentation/dmaengine/ | 3782 | F: Documentation/dmaengine/ |
3782 | T: git git://git.infradead.org/users/vkoul/slave-dma.git | 3783 | T: git git://git.infradead.org/users/vkoul/slave-dma.git |
3783 | 3784 | ||
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8e304b1befc5..75bd6621dc5d 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -242,7 +242,7 @@ struct at_xdmac_lld { | |||
242 | u32 mbr_dus; /* Destination Microblock Stride Register */ | 242 | u32 mbr_dus; /* Destination Microblock Stride Register */ |
243 | }; | 243 | }; |
244 | 244 | ||
245 | 245 | /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ | |
246 | struct at_xdmac_desc { | 246 | struct at_xdmac_desc { |
247 | struct at_xdmac_lld lld; | 247 | struct at_xdmac_lld lld; |
248 | enum dma_transfer_direction direction; | 248 | enum dma_transfer_direction direction; |
@@ -253,7 +253,7 @@ struct at_xdmac_desc { | |||
253 | unsigned int xfer_size; | 253 | unsigned int xfer_size; |
254 | struct list_head descs_list; | 254 | struct list_head descs_list; |
255 | struct list_head xfer_node; | 255 | struct list_head xfer_node; |
256 | }; | 256 | } __aligned(sizeof(u64)); |
257 | 257 | ||
258 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) | 258 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) |
259 | { | 259 | { |
@@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
1400 | u32 cur_nda, check_nda, cur_ubc, mask, value; | 1400 | u32 cur_nda, check_nda, cur_ubc, mask, value; |
1401 | u8 dwidth = 0; | 1401 | u8 dwidth = 0; |
1402 | unsigned long flags; | 1402 | unsigned long flags; |
1403 | bool initd; | ||
1403 | 1404 | ||
1404 | ret = dma_cookie_status(chan, cookie, txstate); | 1405 | ret = dma_cookie_status(chan, cookie, txstate); |
1405 | if (ret == DMA_COMPLETE) | 1406 | if (ret == DMA_COMPLETE) |
@@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
1424 | residue = desc->xfer_size; | 1425 | residue = desc->xfer_size; |
1425 | /* | 1426 | /* |
1426 | * Flush FIFO: only relevant when the transfer is source peripheral | 1427 | * Flush FIFO: only relevant when the transfer is source peripheral |
1427 | * synchronized. | 1428 | * synchronized. Flush is needed before reading CUBC because data in |
1429 | * the FIFO are not reported by CUBC. Reporting a residue of the | ||
1430 | * transfer length while we have data in FIFO can cause issue. | ||
1431 | * Usecase: atmel USART has a timeout which means I have received | ||
1432 | * characters but there is no more character received for a while. On | ||
1433 | * timeout, it requests the residue. If the data are in the DMA FIFO, | ||
1434 | * we will return a residue of the transfer length. It means no data | ||
1435 | * received. If an application is waiting for these data, it will hang | ||
1436 | * since we won't have another USART timeout without receiving new | ||
1437 | * data. | ||
1428 | */ | 1438 | */ |
1429 | mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; | 1439 | mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; |
1430 | value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; | 1440 | value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; |
@@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
1435 | } | 1445 | } |
1436 | 1446 | ||
1437 | /* | 1447 | /* |
1438 | * When processing the residue, we need to read two registers but we | 1448 | * The easiest way to compute the residue should be to pause the DMA |
1439 | * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where | 1449 | * but doing this can lead to miss some data as some devices don't |
1440 | * we stand in the descriptor list and AT_XDMAC_CUBC is used | 1450 | * have FIFO. |
1441 | * to know how many data are remaining for the current descriptor. | 1451 | * We need to read several registers because: |
1442 | * Since the dma channel is not paused to not loose data, between the | 1452 | * - DMA is running therefore a descriptor change is possible while |
1443 | * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of | 1453 | * reading these registers |
1444 | * descriptor. | 1454 | * - When the block transfer is done, the value of the CUBC register |
1445 | * For that reason, after reading AT_XDMAC_CUBC, we check if we are | 1455 | * is set to its initial value until the fetch of the next descriptor. |
1446 | * still using the same descriptor by reading a second time | 1456 | * This value will corrupt the residue calculation so we have to skip |
1447 | * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to | 1457 | * it. |
1448 | * read again AT_XDMAC_CUBC. | 1458 | * |
1459 | * INITD -------- ------------ | ||
1460 | * |____________________| | ||
1461 | * _______________________ _______________ | ||
1462 | * NDA @desc2 \/ @desc3 | ||
1463 | * _______________________/\_______________ | ||
1464 | * __________ ___________ _______________ | ||
1465 | * CUBC 0 \/ MAX desc1 \/ MAX desc2 | ||
1466 | * __________/\___________/\_______________ | ||
1467 | * | ||
1468 | * Since descriptors are aligned on 64 bits, we can assume that | ||
1469 | * the update of NDA and CUBC is atomic. | ||
1449 | * Memory barriers are used to ensure the read order of the registers. | 1470 | * Memory barriers are used to ensure the read order of the registers. |
1450 | * A max number of retries is set because unlikely it can never ends if | 1471 | * A max number of retries is set because unlikely it could never ends. |
1451 | * we are transferring a lot of data with small buffers. | ||
1452 | */ | 1472 | */ |
1453 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | ||
1454 | rmb(); | ||
1455 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); | ||
1456 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { | 1473 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { |
1457 | rmb(); | ||
1458 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | 1474 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; |
1459 | 1475 | rmb(); | |
1460 | if (likely(cur_nda == check_nda)) | 1476 | initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); |
1461 | break; | ||
1462 | |||
1463 | cur_nda = check_nda; | ||
1464 | rmb(); | 1477 | rmb(); |
1465 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); | 1478 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); |
1479 | rmb(); | ||
1480 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | ||
1481 | rmb(); | ||
1482 | |||
1483 | if ((check_nda == cur_nda) && initd) | ||
1484 | break; | ||
1466 | } | 1485 | } |
1467 | 1486 | ||
1468 | if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { | 1487 | if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { |
@@ -1471,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
1471 | } | 1490 | } |
1472 | 1491 | ||
1473 | /* | 1492 | /* |
1493 | * Flush FIFO: only relevant when the transfer is source peripheral | ||
1494 | * synchronized. Another flush is needed here because CUBC is updated | ||
1495 | * when the controller sends the data write command. It can lead to | ||
1496 | * report data that are not written in the memory or the device. The | ||
1497 | * FIFO flush ensures that data are really written. | ||
1498 | */ | ||
1499 | if ((desc->lld.mbr_cfg & mask) == value) { | ||
1500 | at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); | ||
1501 | while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) | ||
1502 | cpu_relax(); | ||
1503 | } | ||
1504 | |||
1505 | /* | ||
1474 | * Remove size of all microblocks already transferred and the current | 1506 | * Remove size of all microblocks already transferred and the current |
1475 | * one. Then add the remaining size to transfer of the current | 1507 | * one. Then add the remaining size to transfer of the current |
1476 | * microblock. | 1508 | * microblock. |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 25d1dadcddd1..d0446a75990a 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
703 | goto free_resources; | 703 | goto free_resources; |
704 | } | 704 | } |
705 | 705 | ||
706 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | 706 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), |
707 | PAGE_SIZE, DMA_TO_DEVICE); | 707 | (size_t)src & ~PAGE_MASK, PAGE_SIZE, |
708 | DMA_TO_DEVICE); | ||
708 | unmap->addr[0] = src_dma; | 709 | unmap->addr[0] = src_dma; |
709 | 710 | ||
710 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); | 711 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); |
@@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
714 | } | 715 | } |
715 | unmap->to_cnt = 1; | 716 | unmap->to_cnt = 1; |
716 | 717 | ||
717 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, | 718 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), |
718 | PAGE_SIZE, DMA_FROM_DEVICE); | 719 | (size_t)dest & ~PAGE_MASK, PAGE_SIZE, |
720 | DMA_FROM_DEVICE); | ||
719 | unmap->addr[1] = dest_dma; | 721 | unmap->addr[1] = dest_dma; |
720 | 722 | ||
721 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); | 723 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); |