aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2016-07-20 16:14:13 -0400
committerVinod Koul <vinod.koul@intel.com>2016-08-07 22:41:42 -0400
commit72203572afd7aef243c182f19925e5a77a1dc6a1 (patch)
treea8d97950881d84f5ecf0f803eb25a28ed8c485b8
parent9cabc2691e9d21b840b145a944f09299f895a7e0 (diff)
ntb: add DMA error handling for RX DMA
Adding support on the rx DMA path to allow recovery of errors when DMA responds with error status and abort all the subsequent ops. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Allen Hubbe <Allen.Hubbe@emc.com> Cc: Jon Mason <jdmason@kudzu.us> Cc: linux-ntb@googlegroups.com Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/ntb/ntb_transport.c83
1 files changed, 67 insertions, 16 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index e61db11e6ccc..8601c10acf74 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -105,13 +105,13 @@ struct ntb_queue_entry {
105 int retries; 105 int retries;
106 int errors; 106 int errors;
107 unsigned int tx_index; 107 unsigned int tx_index;
108 unsigned int rx_index;
108 109
109 struct ntb_transport_qp *qp; 110 struct ntb_transport_qp *qp;
110 union { 111 union {
111 struct ntb_payload_header __iomem *tx_hdr; 112 struct ntb_payload_header __iomem *tx_hdr;
112 struct ntb_payload_header *rx_hdr; 113 struct ntb_payload_header *rx_hdr;
113 }; 114 };
114 unsigned int index;
115}; 115};
116 116
117struct ntb_rx_info { 117struct ntb_rx_info {
@@ -265,6 +265,9 @@ static struct ntb_client ntb_transport_client;
265static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 265static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
266 struct ntb_queue_entry *entry); 266 struct ntb_queue_entry *entry);
267static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); 267static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
268static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
269static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
270
268 271
269static int ntb_transport_bus_match(struct device *dev, 272static int ntb_transport_bus_match(struct device *dev,
270 struct device_driver *drv) 273 struct device_driver *drv)
@@ -1235,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1235 break; 1238 break;
1236 1239
1237 entry->rx_hdr->flags = 0; 1240 entry->rx_hdr->flags = 0;
1238 iowrite32(entry->index, &qp->rx_info->entry); 1241 iowrite32(entry->rx_index, &qp->rx_info->entry);
1239 1242
1240 cb_data = entry->cb_data; 1243 cb_data = entry->cb_data;
1241 len = entry->len; 1244 len = entry->len;
@@ -1253,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1253 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1256 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1254} 1257}
1255 1258
1256static void ntb_rx_copy_callback(void *data) 1259static void ntb_rx_copy_callback(void *data,
1260 const struct dmaengine_result *res)
1257{ 1261{
1258 struct ntb_queue_entry *entry = data; 1262 struct ntb_queue_entry *entry = data;
1259 1263
1264 /* we need to check DMA results if we are using DMA */
1265 if (res) {
1266 enum dmaengine_tx_result dma_err = res->result;
1267
1268 switch (dma_err) {
1269 case DMA_TRANS_READ_FAILED:
1270 case DMA_TRANS_WRITE_FAILED:
1271 entry->errors++;
1272 case DMA_TRANS_ABORTED:
1273 {
1274 struct ntb_transport_qp *qp = entry->qp;
1275 void *offset = qp->rx_buff + qp->rx_max_frame *
1276 qp->rx_index;
1277
1278 ntb_memcpy_rx(entry, offset);
1279 qp->rx_memcpy++;
1280 return;
1281 }
1282
1283 case DMA_TRANS_NOERROR:
1284 default:
1285 break;
1286 }
1287 }
1288
1260 entry->flags |= DESC_DONE_FLAG; 1289 entry->flags |= DESC_DONE_FLAG;
1261 1290
1262 ntb_complete_rxc(entry->qp); 1291 ntb_complete_rxc(entry->qp);
@@ -1272,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1272 /* Ensure that the data is fully copied out before clearing the flag */ 1301 /* Ensure that the data is fully copied out before clearing the flag */
1273 wmb(); 1302 wmb();
1274 1303
1275 ntb_rx_copy_callback(entry); 1304 ntb_rx_copy_callback(entry, NULL);
1276} 1305}
1277 1306
1278static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1307static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1279{ 1308{
1280 struct dma_async_tx_descriptor *txd; 1309 struct dma_async_tx_descriptor *txd;
1281 struct ntb_transport_qp *qp = entry->qp; 1310 struct ntb_transport_qp *qp = entry->qp;
@@ -1288,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1288 int retries = 0; 1317 int retries = 0;
1289 1318
1290 len = entry->len; 1319 len = entry->len;
1291
1292 if (!chan)
1293 goto err;
1294
1295 if (len < copy_bytes)
1296 goto err;
1297
1298 device = chan->device; 1320 device = chan->device;
1299 pay_off = (size_t)offset & ~PAGE_MASK; 1321 pay_off = (size_t)offset & ~PAGE_MASK;
1300 buff_off = (size_t)buf & ~PAGE_MASK; 1322 buff_off = (size_t)buf & ~PAGE_MASK;
@@ -1322,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1322 unmap->from_cnt = 1; 1344 unmap->from_cnt = 1;
1323 1345
1324 for (retries = 0; retries < DMA_RETRIES; retries++) { 1346 for (retries = 0; retries < DMA_RETRIES; retries++) {
1325 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1347 txd = device->device_prep_dma_memcpy(chan,
1348 unmap->addr[1],
1326 unmap->addr[0], len, 1349 unmap->addr[0], len,
1327 DMA_PREP_INTERRUPT); 1350 DMA_PREP_INTERRUPT);
1328 if (txd) 1351 if (txd)
@@ -1337,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1337 goto err_get_unmap; 1360 goto err_get_unmap;
1338 } 1361 }
1339 1362
1340 txd->callback = ntb_rx_copy_callback; 1363 txd->callback_result = ntb_rx_copy_callback;
1341 txd->callback_param = entry; 1364 txd->callback_param = entry;
1342 dma_set_unmap(txd, unmap); 1365 dma_set_unmap(txd, unmap);
1343 1366
@@ -1351,13 +1374,38 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1351 1374
1352 qp->rx_async++; 1375 qp->rx_async++;
1353 1376
1354 return; 1377 return 0;
1355 1378
1356err_set_unmap: 1379err_set_unmap:
1357 dmaengine_unmap_put(unmap); 1380 dmaengine_unmap_put(unmap);
1358err_get_unmap: 1381err_get_unmap:
1359 dmaengine_unmap_put(unmap); 1382 dmaengine_unmap_put(unmap);
1360err: 1383err:
1384 return -ENXIO;
1385}
1386
1387static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1388{
1389 struct ntb_transport_qp *qp = entry->qp;
1390 struct dma_chan *chan = qp->rx_dma_chan;
1391 int res;
1392
1393 if (!chan)
1394 goto err;
1395
1396 if (entry->len < copy_bytes)
1397 goto err;
1398
1399 res = ntb_async_rx_submit(entry, offset);
1400 if (res < 0)
1401 goto err;
1402
1403 if (!entry->retries)
1404 qp->rx_async++;
1405
1406 return;
1407
1408err:
1361 ntb_memcpy_rx(entry, offset); 1409 ntb_memcpy_rx(entry, offset);
1362 qp->rx_memcpy++; 1410 qp->rx_memcpy++;
1363} 1411}
@@ -1403,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1403 } 1451 }
1404 1452
1405 entry->rx_hdr = hdr; 1453 entry->rx_hdr = hdr;
1406 entry->index = qp->rx_index; 1454 entry->rx_index = qp->rx_index;
1407 1455
1408 if (hdr->len > entry->len) { 1456 if (hdr->len > entry->len) {
1409 dev_dbg(&qp->ndev->pdev->dev, 1457 dev_dbg(&qp->ndev->pdev->dev,
@@ -1981,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1981 entry->buf = data; 2029 entry->buf = data;
1982 entry->len = len; 2030 entry->len = len;
1983 entry->flags = 0; 2031 entry->flags = 0;
2032 entry->retries = 0;
2033 entry->errors = 0;
2034 entry->rx_index = 0;
1984 2035
1985 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 2036 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1986 2037