aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ntb/ntb_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ntb/ntb_transport.c')
-rw-r--r--drivers/ntb/ntb_transport.c163
1 files changed, 97 insertions, 66 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 12a9e83c008b..3217f394d45b 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,6 @@ struct ntb_transport_qp {
119 119
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len); 121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q; 122 struct list_head rx_pend_q;
124 struct list_head rx_free_q; 123 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock; 124 spinlock_t ntb_rx_pend_q_lock;
@@ -584,11 +583,8 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
584 return 0; 583 return 0;
585} 584}
586 585
587static void ntb_qp_link_cleanup(struct work_struct *work) 586static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
588{ 587{
589 struct ntb_transport_qp *qp = container_of(work,
590 struct ntb_transport_qp,
591 link_cleanup);
592 struct ntb_transport *nt = qp->transport; 588 struct ntb_transport *nt = qp->transport;
593 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 589 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
594 590
@@ -602,6 +598,16 @@ static void ntb_qp_link_cleanup(struct work_struct *work)
602 598
603 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); 599 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
604 qp->qp_link = NTB_LINK_DOWN; 600 qp->qp_link = NTB_LINK_DOWN;
601}
602
603static void ntb_qp_link_cleanup_work(struct work_struct *work)
604{
605 struct ntb_transport_qp *qp = container_of(work,
606 struct ntb_transport_qp,
607 link_cleanup);
608 struct ntb_transport *nt = qp->transport;
609
610 ntb_qp_link_cleanup(qp);
605 611
606 if (nt->transport_link == NTB_LINK_UP) 612 if (nt->transport_link == NTB_LINK_UP)
607 schedule_delayed_work(&qp->link_work, 613 schedule_delayed_work(&qp->link_work,
@@ -613,22 +619,20 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
613 schedule_work(&qp->link_cleanup); 619 schedule_work(&qp->link_cleanup);
614} 620}
615 621
616static void ntb_transport_link_cleanup(struct work_struct *work) 622static void ntb_transport_link_cleanup(struct ntb_transport *nt)
617{ 623{
618 struct ntb_transport *nt = container_of(work, struct ntb_transport,
619 link_cleanup);
620 int i; 624 int i;
621 625
626 /* Pass along the info to any clients */
627 for (i = 0; i < nt->max_qps; i++)
628 if (!test_bit(i, &nt->qp_bitmap))
629 ntb_qp_link_cleanup(&nt->qps[i]);
630
622 if (nt->transport_link == NTB_LINK_DOWN) 631 if (nt->transport_link == NTB_LINK_DOWN)
623 cancel_delayed_work_sync(&nt->link_work); 632 cancel_delayed_work_sync(&nt->link_work);
624 else 633 else
625 nt->transport_link = NTB_LINK_DOWN; 634 nt->transport_link = NTB_LINK_DOWN;
626 635
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_down(&nt->qps[i]);
631
632 /* The scratchpad registers keep the values if the remote side 636 /* The scratchpad registers keep the values if the remote side
633 * goes down, blast them now to give them a sane value the next 637 * goes down, blast them now to give them a sane value the next
634 * time they are accessed 638 * time they are accessed
@@ -637,6 +641,14 @@ static void ntb_transport_link_cleanup(struct work_struct *work)
637 ntb_write_local_spad(nt->ndev, i, 0); 641 ntb_write_local_spad(nt->ndev, i, 0);
638} 642}
639 643
644static void ntb_transport_link_cleanup_work(struct work_struct *work)
645{
646 struct ntb_transport *nt = container_of(work, struct ntb_transport,
647 link_cleanup);
648
649 ntb_transport_link_cleanup(nt);
650}
651
640static void ntb_transport_event_callback(void *data, enum ntb_hw_event event) 652static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
641{ 653{
642 struct ntb_transport *nt = data; 654 struct ntb_transport *nt = data;
@@ -880,7 +892,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
880 } 892 }
881 893
882 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 894 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
883 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup); 895 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
884 896
885 spin_lock_init(&qp->ntb_rx_pend_q_lock); 897 spin_lock_init(&qp->ntb_rx_pend_q_lock);
886 spin_lock_init(&qp->ntb_rx_free_q_lock); 898 spin_lock_init(&qp->ntb_rx_free_q_lock);
@@ -936,7 +948,7 @@ int ntb_transport_init(struct pci_dev *pdev)
936 } 948 }
937 949
938 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 950 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
939 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup); 951 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
940 952
941 rc = ntb_register_event_callback(nt->ndev, 953 rc = ntb_register_event_callback(nt->ndev,
942 ntb_transport_event_callback); 954 ntb_transport_event_callback);
@@ -972,7 +984,7 @@ void ntb_transport_free(void *transport)
972 struct ntb_device *ndev = nt->ndev; 984 struct ntb_device *ndev = nt->ndev;
973 int i; 985 int i;
974 986
975 nt->transport_link = NTB_LINK_DOWN; 987 ntb_transport_link_cleanup(nt);
976 988
977 /* verify that all the qp's are freed */ 989 /* verify that all the qp's are freed */
978 for (i = 0; i < nt->max_qps; i++) { 990 for (i = 0; i < nt->max_qps; i++) {
@@ -1034,10 +1046,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1034 struct dma_chan *chan = qp->dma_chan; 1046 struct dma_chan *chan = qp->dma_chan;
1035 struct dma_device *device; 1047 struct dma_device *device;
1036 size_t pay_off, buff_off; 1048 size_t pay_off, buff_off;
1037 dma_addr_t src, dest; 1049 struct dmaengine_unmap_data *unmap;
1038 dma_cookie_t cookie; 1050 dma_cookie_t cookie;
1039 void *buf = entry->buf; 1051 void *buf = entry->buf;
1040 unsigned long flags;
1041 1052
1042 entry->len = len; 1053 entry->len = len;
1043 1054
@@ -1045,35 +1056,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1045 goto err; 1056 goto err;
1046 1057
1047 if (len < copy_bytes) 1058 if (len < copy_bytes)
1048 goto err1; 1059 goto err_wait;
1049 1060
1050 device = chan->device; 1061 device = chan->device;
1051 pay_off = (size_t) offset & ~PAGE_MASK; 1062 pay_off = (size_t) offset & ~PAGE_MASK;
1052 buff_off = (size_t) buf & ~PAGE_MASK; 1063 buff_off = (size_t) buf & ~PAGE_MASK;
1053 1064
1054 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1065 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1055 goto err1; 1066 goto err_wait;
1056 1067
1057 dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); 1068 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1058 if (dma_mapping_error(device->dev, dest)) 1069 if (!unmap)
1059 goto err1; 1070 goto err_wait;
1060 1071
1061 src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); 1072 unmap->len = len;
1062 if (dma_mapping_error(device->dev, src)) 1073 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1063 goto err2; 1074 pay_off, len, DMA_TO_DEVICE);
1075 if (dma_mapping_error(device->dev, unmap->addr[0]))
1076 goto err_get_unmap;
1077
1078 unmap->to_cnt = 1;
1079
1080 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1081 buff_off, len, DMA_FROM_DEVICE);
1082 if (dma_mapping_error(device->dev, unmap->addr[1]))
1083 goto err_get_unmap;
1084
1085 unmap->from_cnt = 1;
1064 1086
1065 flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | 1087 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1066 DMA_PREP_INTERRUPT; 1088 unmap->addr[0], len,
1067 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); 1089 DMA_PREP_INTERRUPT);
1068 if (!txd) 1090 if (!txd)
1069 goto err3; 1091 goto err_get_unmap;
1070 1092
1071 txd->callback = ntb_rx_copy_callback; 1093 txd->callback = ntb_rx_copy_callback;
1072 txd->callback_param = entry; 1094 txd->callback_param = entry;
1095 dma_set_unmap(txd, unmap);
1073 1096
1074 cookie = dmaengine_submit(txd); 1097 cookie = dmaengine_submit(txd);
1075 if (dma_submit_error(cookie)) 1098 if (dma_submit_error(cookie))
1076 goto err3; 1099 goto err_set_unmap;
1100
1101 dmaengine_unmap_put(unmap);
1077 1102
1078 qp->last_cookie = cookie; 1103 qp->last_cookie = cookie;
1079 1104
@@ -1081,11 +1106,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1081 1106
1082 return; 1107 return;
1083 1108
1084err3: 1109err_set_unmap:
1085 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); 1110 dmaengine_unmap_put(unmap);
1086err2: 1111err_get_unmap:
1087 dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); 1112 dmaengine_unmap_put(unmap);
1088err1: 1113err_wait:
1089 /* If the callbacks come out of order, the writing of the index to the 1114 /* If the callbacks come out of order, the writing of the index to the
1090 * last completed will be out of order. This may result in the 1115 * last completed will be out of order. This may result in the
1091 * receive stalling forever. 1116 * receive stalling forever.
@@ -1175,11 +1200,14 @@ err:
1175 goto out; 1200 goto out;
1176} 1201}
1177 1202
1178static void ntb_transport_rx(unsigned long data) 1203static int ntb_transport_rxc_db(void *data, int db_num)
1179{ 1204{
1180 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1205 struct ntb_transport_qp *qp = data;
1181 int rc, i; 1206 int rc, i;
1182 1207
1208 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1209 __func__, db_num);
1210
1183 /* Limit the number of packets processed in a single interrupt to 1211 /* Limit the number of packets processed in a single interrupt to
1184 * provide fairness to others 1212 * provide fairness to others
1185 */ 1213 */
@@ -1191,16 +1219,8 @@ static void ntb_transport_rx(unsigned long data)
1191 1219
1192 if (qp->dma_chan) 1220 if (qp->dma_chan)
1193 dma_async_issue_pending(qp->dma_chan); 1221 dma_async_issue_pending(qp->dma_chan);
1194}
1195
1196static void ntb_transport_rxc_db(void *data, int db_num)
1197{
1198 struct ntb_transport_qp *qp = data;
1199
1200 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1201 __func__, db_num);
1202 1222
1203 tasklet_schedule(&qp->rx_work); 1223 return i;
1204} 1224}
1205 1225
1206static void ntb_tx_copy_callback(void *data) 1226static void ntb_tx_copy_callback(void *data)
@@ -1245,12 +1265,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1245 struct dma_chan *chan = qp->dma_chan; 1265 struct dma_chan *chan = qp->dma_chan;
1246 struct dma_device *device; 1266 struct dma_device *device;
1247 size_t dest_off, buff_off; 1267 size_t dest_off, buff_off;
1248 dma_addr_t src, dest; 1268 struct dmaengine_unmap_data *unmap;
1269 dma_addr_t dest;
1249 dma_cookie_t cookie; 1270 dma_cookie_t cookie;
1250 void __iomem *offset; 1271 void __iomem *offset;
1251 size_t len = entry->len; 1272 size_t len = entry->len;
1252 void *buf = entry->buf; 1273 void *buf = entry->buf;
1253 unsigned long flags;
1254 1274
1255 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1275 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1256 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1276 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
@@ -1273,28 +1293,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1273 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1293 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1274 goto err; 1294 goto err;
1275 1295
1276 src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); 1296 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1277 if (dma_mapping_error(device->dev, src)) 1297 if (!unmap)
1278 goto err; 1298 goto err;
1279 1299
1280 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; 1300 unmap->len = len;
1281 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); 1301 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1302 buff_off, len, DMA_TO_DEVICE);
1303 if (dma_mapping_error(device->dev, unmap->addr[0]))
1304 goto err_get_unmap;
1305
1306 unmap->to_cnt = 1;
1307
1308 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1309 DMA_PREP_INTERRUPT);
1282 if (!txd) 1310 if (!txd)
1283 goto err1; 1311 goto err_get_unmap;
1284 1312
1285 txd->callback = ntb_tx_copy_callback; 1313 txd->callback = ntb_tx_copy_callback;
1286 txd->callback_param = entry; 1314 txd->callback_param = entry;
1315 dma_set_unmap(txd, unmap);
1287 1316
1288 cookie = dmaengine_submit(txd); 1317 cookie = dmaengine_submit(txd);
1289 if (dma_submit_error(cookie)) 1318 if (dma_submit_error(cookie))
1290 goto err1; 1319 goto err_set_unmap;
1320
1321 dmaengine_unmap_put(unmap);
1291 1322
1292 dma_async_issue_pending(chan); 1323 dma_async_issue_pending(chan);
1293 qp->tx_async++; 1324 qp->tx_async++;
1294 1325
1295 return; 1326 return;
1296err1: 1327err_set_unmap:
1297 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); 1328 dmaengine_unmap_put(unmap);
1329err_get_unmap:
1330 dmaengine_unmap_put(unmap);
1298err: 1331err:
1299 ntb_memcpy_tx(entry, offset); 1332 ntb_memcpy_tx(entry, offset);
1300 qp->tx_memcpy++; 1333 qp->tx_memcpy++;
@@ -1406,11 +1439,12 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1406 qp->tx_handler = handlers->tx_handler; 1439 qp->tx_handler = handlers->tx_handler;
1407 qp->event_handler = handlers->event_handler; 1440 qp->event_handler = handlers->event_handler;
1408 1441
1442 dmaengine_get();
1409 qp->dma_chan = dma_find_channel(DMA_MEMCPY); 1443 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1410 if (!qp->dma_chan) 1444 if (!qp->dma_chan) {
1445 dmaengine_put();
1411 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); 1446 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1412 else 1447 }
1413 dmaengine_get();
1414 1448
1415 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1449 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1416 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); 1450 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
@@ -1432,25 +1466,23 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1432 &qp->tx_free_q); 1466 &qp->tx_free_q);
1433 } 1467 }
1434 1468
1435 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1436
1437 rc = ntb_register_db_callback(qp->ndev, free_queue, qp, 1469 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1438 ntb_transport_rxc_db); 1470 ntb_transport_rxc_db);
1439 if (rc) 1471 if (rc)
1440 goto err3; 1472 goto err2;
1441 1473
1442 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1474 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1443 1475
1444 return qp; 1476 return qp;
1445 1477
1446err3:
1447 tasklet_disable(&qp->rx_work);
1448err2: 1478err2:
1449 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1479 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1450 kfree(entry); 1480 kfree(entry);
1451err1: 1481err1:
1452 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1482 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1453 kfree(entry); 1483 kfree(entry);
1484 if (qp->dma_chan)
1485 dmaengine_put();
1454 set_bit(free_queue, &nt->qp_bitmap); 1486 set_bit(free_queue, &nt->qp_bitmap);
1455err: 1487err:
1456 return NULL; 1488 return NULL;
@@ -1489,7 +1521,6 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1489 } 1521 }
1490 1522
1491 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1523 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1492 tasklet_disable(&qp->rx_work);
1493 1524
1494 cancel_delayed_work_sync(&qp->link_work); 1525 cancel_delayed_work_sync(&qp->link_work);
1495 1526