aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ntb/ntb_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ntb/ntb_transport.c')
-rw-r--r--drivers/ntb/ntb_transport.c77
1 files changed, 41 insertions, 36 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d0222f13d154..3217f394d45b 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,6 @@ struct ntb_transport_qp {
119 119
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len); 121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q; 122 struct list_head rx_pend_q;
124 struct list_head rx_free_q; 123 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock; 124 spinlock_t ntb_rx_pend_q_lock;
@@ -584,11 +583,8 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
584 return 0; 583 return 0;
585} 584}
586 585
587static void ntb_qp_link_cleanup(struct work_struct *work) 586static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
588{ 587{
589 struct ntb_transport_qp *qp = container_of(work,
590 struct ntb_transport_qp,
591 link_cleanup);
592 struct ntb_transport *nt = qp->transport; 588 struct ntb_transport *nt = qp->transport;
593 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 589 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
594 590
@@ -602,6 +598,16 @@ static void ntb_qp_link_cleanup(struct work_struct *work)
602 598
603 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); 599 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
604 qp->qp_link = NTB_LINK_DOWN; 600 qp->qp_link = NTB_LINK_DOWN;
601}
602
603static void ntb_qp_link_cleanup_work(struct work_struct *work)
604{
605 struct ntb_transport_qp *qp = container_of(work,
606 struct ntb_transport_qp,
607 link_cleanup);
608 struct ntb_transport *nt = qp->transport;
609
610 ntb_qp_link_cleanup(qp);
605 611
606 if (nt->transport_link == NTB_LINK_UP) 612 if (nt->transport_link == NTB_LINK_UP)
607 schedule_delayed_work(&qp->link_work, 613 schedule_delayed_work(&qp->link_work,
@@ -613,22 +619,20 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
613 schedule_work(&qp->link_cleanup); 619 schedule_work(&qp->link_cleanup);
614} 620}
615 621
616static void ntb_transport_link_cleanup(struct work_struct *work) 622static void ntb_transport_link_cleanup(struct ntb_transport *nt)
617{ 623{
618 struct ntb_transport *nt = container_of(work, struct ntb_transport,
619 link_cleanup);
620 int i; 624 int i;
621 625
626 /* Pass along the info to any clients */
627 for (i = 0; i < nt->max_qps; i++)
628 if (!test_bit(i, &nt->qp_bitmap))
629 ntb_qp_link_cleanup(&nt->qps[i]);
630
622 if (nt->transport_link == NTB_LINK_DOWN) 631 if (nt->transport_link == NTB_LINK_DOWN)
623 cancel_delayed_work_sync(&nt->link_work); 632 cancel_delayed_work_sync(&nt->link_work);
624 else 633 else
625 nt->transport_link = NTB_LINK_DOWN; 634 nt->transport_link = NTB_LINK_DOWN;
626 635
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_down(&nt->qps[i]);
631
632 /* The scratchpad registers keep the values if the remote side 636 /* The scratchpad registers keep the values if the remote side
633 * goes down, blast them now to give them a sane value the next 637 * goes down, blast them now to give them a sane value the next
634 * time they are accessed 638 * time they are accessed
@@ -637,6 +641,14 @@ static void ntb_transport_link_cleanup(struct work_struct *work)
637 ntb_write_local_spad(nt->ndev, i, 0); 641 ntb_write_local_spad(nt->ndev, i, 0);
638} 642}
639 643
644static void ntb_transport_link_cleanup_work(struct work_struct *work)
645{
646 struct ntb_transport *nt = container_of(work, struct ntb_transport,
647 link_cleanup);
648
649 ntb_transport_link_cleanup(nt);
650}
651
640static void ntb_transport_event_callback(void *data, enum ntb_hw_event event) 652static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
641{ 653{
642 struct ntb_transport *nt = data; 654 struct ntb_transport *nt = data;
@@ -880,7 +892,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
880 } 892 }
881 893
882 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 894 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
883 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup); 895 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
884 896
885 spin_lock_init(&qp->ntb_rx_pend_q_lock); 897 spin_lock_init(&qp->ntb_rx_pend_q_lock);
886 spin_lock_init(&qp->ntb_rx_free_q_lock); 898 spin_lock_init(&qp->ntb_rx_free_q_lock);
@@ -936,7 +948,7 @@ int ntb_transport_init(struct pci_dev *pdev)
936 } 948 }
937 949
938 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 950 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
939 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup); 951 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
940 952
941 rc = ntb_register_event_callback(nt->ndev, 953 rc = ntb_register_event_callback(nt->ndev,
942 ntb_transport_event_callback); 954 ntb_transport_event_callback);
@@ -972,7 +984,7 @@ void ntb_transport_free(void *transport)
972 struct ntb_device *ndev = nt->ndev; 984 struct ntb_device *ndev = nt->ndev;
973 int i; 985 int i;
974 986
975 nt->transport_link = NTB_LINK_DOWN; 987 ntb_transport_link_cleanup(nt);
976 988
977 /* verify that all the qp's are freed */ 989 /* verify that all the qp's are freed */
978 for (i = 0; i < nt->max_qps; i++) { 990 for (i = 0; i < nt->max_qps; i++) {
@@ -1188,11 +1200,14 @@ err:
1188 goto out; 1200 goto out;
1189} 1201}
1190 1202
1191static void ntb_transport_rx(unsigned long data) 1203static int ntb_transport_rxc_db(void *data, int db_num)
1192{ 1204{
1193 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1205 struct ntb_transport_qp *qp = data;
1194 int rc, i; 1206 int rc, i;
1195 1207
1208 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1209 __func__, db_num);
1210
1196 /* Limit the number of packets processed in a single interrupt to 1211 /* Limit the number of packets processed in a single interrupt to
1197 * provide fairness to others 1212 * provide fairness to others
1198 */ 1213 */
@@ -1204,16 +1219,8 @@ static void ntb_transport_rx(unsigned long data)
1204 1219
1205 if (qp->dma_chan) 1220 if (qp->dma_chan)
1206 dma_async_issue_pending(qp->dma_chan); 1221 dma_async_issue_pending(qp->dma_chan);
1207}
1208
1209static void ntb_transport_rxc_db(void *data, int db_num)
1210{
1211 struct ntb_transport_qp *qp = data;
1212
1213 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1214 __func__, db_num);
1215 1222
1216 tasklet_schedule(&qp->rx_work); 1223 return i;
1217} 1224}
1218 1225
1219static void ntb_tx_copy_callback(void *data) 1226static void ntb_tx_copy_callback(void *data)
@@ -1432,11 +1439,12 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1432 qp->tx_handler = handlers->tx_handler; 1439 qp->tx_handler = handlers->tx_handler;
1433 qp->event_handler = handlers->event_handler; 1440 qp->event_handler = handlers->event_handler;
1434 1441
1442 dmaengine_get();
1435 qp->dma_chan = dma_find_channel(DMA_MEMCPY); 1443 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1436 if (!qp->dma_chan) 1444 if (!qp->dma_chan) {
1445 dmaengine_put();
1437 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); 1446 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1438 else 1447 }
1439 dmaengine_get();
1440 1448
1441 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1449 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1442 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); 1450 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
@@ -1458,25 +1466,23 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1458 &qp->tx_free_q); 1466 &qp->tx_free_q);
1459 } 1467 }
1460 1468
1461 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1462
1463 rc = ntb_register_db_callback(qp->ndev, free_queue, qp, 1469 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1464 ntb_transport_rxc_db); 1470 ntb_transport_rxc_db);
1465 if (rc) 1471 if (rc)
1466 goto err3; 1472 goto err2;
1467 1473
1468 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1474 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1469 1475
1470 return qp; 1476 return qp;
1471 1477
1472err3:
1473 tasklet_disable(&qp->rx_work);
1474err2: 1478err2:
1475 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1479 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1476 kfree(entry); 1480 kfree(entry);
1477err1: 1481err1:
1478 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1482 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1479 kfree(entry); 1483 kfree(entry);
1484 if (qp->dma_chan)
1485 dmaengine_put();
1480 set_bit(free_queue, &nt->qp_bitmap); 1486 set_bit(free_queue, &nt->qp_bitmap);
1481err: 1487err:
1482 return NULL; 1488 return NULL;
@@ -1515,7 +1521,6 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1515 } 1521 }
1516 1522
1517 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1523 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1518 tasklet_disable(&qp->rx_work);
1519 1524
1520 cancel_delayed_work_sync(&qp->link_work); 1525 cancel_delayed_work_sync(&qp->link_work);
1521 1526