aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2016-07-20 16:14:07 -0400
committerVinod Koul <vinod.koul@intel.com>2016-08-07 22:41:42 -0400
commit9cabc2691e9d21b840b145a944f09299f895a7e0 (patch)
tree754f36f0c3ca1fef5696de5ccf92769a5d42ea84
parentaed681d1dc72914d448e44a99e1dc89baa32d25c (diff)
ntb: add DMA error handling for TX DMA
Adding support on the tx DMA path to allow recovery of errors when DMA responds with error status and abort all the subsequent ops. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Allen Hubbe <Allen.Hubbe@emc.com> Cc: Jon Mason <jdmason@kudzu.us> Cc: linux-ntb@googlegroups.com Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/ntb/ntb_transport.c110
1 files changed, 83 insertions, 27 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d5c5894f252e..e61db11e6ccc 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -102,6 +102,9 @@ struct ntb_queue_entry {
102 void *buf; 102 void *buf;
103 unsigned int len; 103 unsigned int len;
104 unsigned int flags; 104 unsigned int flags;
105 int retries;
106 int errors;
107 unsigned int tx_index;
105 108
106 struct ntb_transport_qp *qp; 109 struct ntb_transport_qp *qp;
107 union { 110 union {
@@ -259,6 +262,9 @@ enum {
259static void ntb_transport_rxc_db(unsigned long data); 262static void ntb_transport_rxc_db(unsigned long data);
260static const struct ntb_ctx_ops ntb_transport_ops; 263static const struct ntb_ctx_ops ntb_transport_ops;
261static struct ntb_client ntb_transport_client; 264static struct ntb_client ntb_transport_client;
265static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
266 struct ntb_queue_entry *entry);
267static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
262 268
263static int ntb_transport_bus_match(struct device *dev, 269static int ntb_transport_bus_match(struct device *dev,
264 struct device_driver *drv) 270 struct device_driver *drv)
@@ -1467,12 +1473,39 @@ static void ntb_transport_rxc_db(unsigned long data)
1467 } 1473 }
1468} 1474}
1469 1475
1470static void ntb_tx_copy_callback(void *data) 1476static void ntb_tx_copy_callback(void *data,
1477 const struct dmaengine_result *res)
1471{ 1478{
1472 struct ntb_queue_entry *entry = data; 1479 struct ntb_queue_entry *entry = data;
1473 struct ntb_transport_qp *qp = entry->qp; 1480 struct ntb_transport_qp *qp = entry->qp;
1474 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1481 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1475 1482
1483 /* we need to check DMA results if we are using DMA */
1484 if (res) {
1485 enum dmaengine_tx_result dma_err = res->result;
1486
1487 switch (dma_err) {
1488 case DMA_TRANS_READ_FAILED:
1489 case DMA_TRANS_WRITE_FAILED:
1490 entry->errors++;
1491 case DMA_TRANS_ABORTED:
1492 {
1493 void __iomem *offset =
1494 qp->tx_mw + qp->tx_max_frame *
1495 entry->tx_index;
1496
1497 /* resubmit via CPU */
1498 ntb_memcpy_tx(entry, offset);
1499 qp->tx_memcpy++;
1500 return;
1501 }
1502
1503 case DMA_TRANS_NOERROR:
1504 default:
1505 break;
1506 }
1507 }
1508
1476 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1509 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1477 1510
1478 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1511 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
@@ -1507,40 +1540,25 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1507 /* Ensure that the data is fully copied out before setting the flags */ 1540 /* Ensure that the data is fully copied out before setting the flags */
1508 wmb(); 1541 wmb();
1509 1542
1510 ntb_tx_copy_callback(entry); 1543 ntb_tx_copy_callback(entry, NULL);
1511} 1544}
1512 1545
1513static void ntb_async_tx(struct ntb_transport_qp *qp, 1546static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1514 struct ntb_queue_entry *entry) 1547 struct ntb_queue_entry *entry)
1515{ 1548{
1516 struct ntb_payload_header __iomem *hdr;
1517 struct dma_async_tx_descriptor *txd; 1549 struct dma_async_tx_descriptor *txd;
1518 struct dma_chan *chan = qp->tx_dma_chan; 1550 struct dma_chan *chan = qp->tx_dma_chan;
1519 struct dma_device *device; 1551 struct dma_device *device;
1552 size_t len = entry->len;
1553 void *buf = entry->buf;
1520 size_t dest_off, buff_off; 1554 size_t dest_off, buff_off;
1521 struct dmaengine_unmap_data *unmap; 1555 struct dmaengine_unmap_data *unmap;
1522 dma_addr_t dest; 1556 dma_addr_t dest;
1523 dma_cookie_t cookie; 1557 dma_cookie_t cookie;
1524 void __iomem *offset;
1525 size_t len = entry->len;
1526 void *buf = entry->buf;
1527 int retries = 0; 1558 int retries = 0;
1528 1559
1529 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1530 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1531 entry->tx_hdr = hdr;
1532
1533 iowrite32(entry->len, &hdr->len);
1534 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1535
1536 if (!chan)
1537 goto err;
1538
1539 if (len < copy_bytes)
1540 goto err;
1541
1542 device = chan->device; 1560 device = chan->device;
1543 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; 1561 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
1544 buff_off = (size_t)buf & ~PAGE_MASK; 1562 buff_off = (size_t)buf & ~PAGE_MASK;
1545 dest_off = (size_t)dest & ~PAGE_MASK; 1563 dest_off = (size_t)dest & ~PAGE_MASK;
1546 1564
@@ -1560,8 +1578,9 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1560 unmap->to_cnt = 1; 1578 unmap->to_cnt = 1;
1561 1579
1562 for (retries = 0; retries < DMA_RETRIES; retries++) { 1580 for (retries = 0; retries < DMA_RETRIES; retries++) {
1563 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], 1581 txd = device->device_prep_dma_memcpy(chan, dest,
1564 len, DMA_PREP_INTERRUPT); 1582 unmap->addr[0], len,
1583 DMA_PREP_INTERRUPT);
1565 if (txd) 1584 if (txd)
1566 break; 1585 break;
1567 1586
@@ -1574,7 +1593,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1574 goto err_get_unmap; 1593 goto err_get_unmap;
1575 } 1594 }
1576 1595
1577 txd->callback = ntb_tx_copy_callback; 1596 txd->callback_result = ntb_tx_copy_callback;
1578 txd->callback_param = entry; 1597 txd->callback_param = entry;
1579 dma_set_unmap(txd, unmap); 1598 dma_set_unmap(txd, unmap);
1580 1599
@@ -1585,14 +1604,48 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1585 dmaengine_unmap_put(unmap); 1604 dmaengine_unmap_put(unmap);
1586 1605
1587 dma_async_issue_pending(chan); 1606 dma_async_issue_pending(chan);
1588 qp->tx_async++;
1589 1607
1590 return; 1608 return 0;
1591err_set_unmap: 1609err_set_unmap:
1592 dmaengine_unmap_put(unmap); 1610 dmaengine_unmap_put(unmap);
1593err_get_unmap: 1611err_get_unmap:
1594 dmaengine_unmap_put(unmap); 1612 dmaengine_unmap_put(unmap);
1595err: 1613err:
1614 return -ENXIO;
1615}
1616
1617static void ntb_async_tx(struct ntb_transport_qp *qp,
1618 struct ntb_queue_entry *entry)
1619{
1620 struct ntb_payload_header __iomem *hdr;
1621 struct dma_chan *chan = qp->tx_dma_chan;
1622 void __iomem *offset;
1623 int res;
1624
1625 entry->tx_index = qp->tx_index;
1626 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
1627 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1628 entry->tx_hdr = hdr;
1629
1630 iowrite32(entry->len, &hdr->len);
1631 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1632
1633 if (!chan)
1634 goto err;
1635
1636 if (entry->len < copy_bytes)
1637 goto err;
1638
1639 res = ntb_async_tx_submit(qp, entry);
1640 if (res < 0)
1641 goto err;
1642
1643 if (!entry->retries)
1644 qp->tx_async++;
1645
1646 return;
1647
1648err:
1596 ntb_memcpy_tx(entry, offset); 1649 ntb_memcpy_tx(entry, offset);
1597 qp->tx_memcpy++; 1650 qp->tx_memcpy++;
1598} 1651}
@@ -1970,6 +2023,9 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1970 entry->buf = data; 2023 entry->buf = data;
1971 entry->len = len; 2024 entry->len = len;
1972 entry->flags = 0; 2025 entry->flags = 0;
2026 entry->errors = 0;
2027 entry->retries = 0;
2028 entry->tx_index = 0;
1973 2029
1974 rc = ntb_process_tx(qp, entry); 2030 rc = ntb_process_tx(qp, entry);
1975 if (rc) 2031 if (rc)