aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ntb
diff options
context:
space:
mode:
authorAllen Hubbe <Allen.Hubbe@emc.com>2015-05-18 06:20:47 -0400
committerJon Mason <jdmason@kudzu.us>2015-07-04 14:08:33 -0400
commit1199aa61264a74717bc747e7031673242bad5119 (patch)
tree7abdd0cc51a2559980c233e3d548f016e2b11db0 /drivers/ntb
parent2876228941ac5dcab12854aa5a3462b3f2274b09 (diff)
NTB: Use NUMA memory and DMA chan in transport
Allocate memory and request the DMA channel for the same NUMA node as the NTB device. Signed-off-by: Allen Hubbe <Allen.Hubbe@emc.com> Signed-off-by: Jon Mason <jdmason@kudzu.us>
Diffstat (limited to 'drivers/ntb')
-rw-r--r--drivers/ntb/ntb_transport.c46
1 files changed, 32 insertions, 14 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 8ce0bf67ac20..dc14ec81c43e 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -346,6 +346,7 @@ int ntb_transport_register_client_dev(char *device_name)
346{ 346{
347 struct ntb_transport_client_dev *client_dev; 347 struct ntb_transport_client_dev *client_dev;
348 struct ntb_transport_ctx *nt; 348 struct ntb_transport_ctx *nt;
349 int node;
349 int rc, i = 0; 350 int rc, i = 0;
350 351
351 if (list_empty(&ntb_transport_list)) 352 if (list_empty(&ntb_transport_list))
@@ -354,8 +355,10 @@ int ntb_transport_register_client_dev(char *device_name)
354 list_for_each_entry(nt, &ntb_transport_list, entry) { 355 list_for_each_entry(nt, &ntb_transport_list, entry) {
355 struct device *dev; 356 struct device *dev;
356 357
357 client_dev = kzalloc(sizeof(*client_dev), 358 node = dev_to_node(&nt->ndev->dev);
358 GFP_KERNEL); 359
360 client_dev = kzalloc_node(sizeof(*client_dev),
361 GFP_KERNEL, node);
359 if (!client_dev) { 362 if (!client_dev) {
360 rc = -ENOMEM; 363 rc = -ENOMEM;
361 goto err; 364 goto err;
@@ -953,6 +956,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
953 struct ntb_transport_mw *mw; 956 struct ntb_transport_mw *mw;
954 unsigned int mw_count, qp_count; 957 unsigned int mw_count, qp_count;
955 u64 qp_bitmap; 958 u64 qp_bitmap;
959 int node;
956 int rc, i; 960 int rc, i;
957 961
958 if (ntb_db_is_unsafe(ndev)) 962 if (ntb_db_is_unsafe(ndev))
@@ -962,7 +966,9 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
962 dev_dbg(&ndev->dev, 966 dev_dbg(&ndev->dev,
963 "scratchpad is unsafe, proceed anyway...\n"); 967 "scratchpad is unsafe, proceed anyway...\n");
964 968
965 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 969 node = dev_to_node(&ndev->dev);
970
971 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
966 if (!nt) 972 if (!nt)
967 return -ENOMEM; 973 return -ENOMEM;
968 974
@@ -972,7 +978,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
972 978
973 nt->mw_count = mw_count; 979 nt->mw_count = mw_count;
974 980
975 nt->mw_vec = kcalloc(mw_count, sizeof(*nt->mw_vec), GFP_KERNEL); 981 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
982 GFP_KERNEL, node);
976 if (!nt->mw_vec) { 983 if (!nt->mw_vec) {
977 rc = -ENOMEM; 984 rc = -ENOMEM;
978 goto err; 985 goto err;
@@ -1012,7 +1019,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1012 nt->qp_bitmap = qp_bitmap; 1019 nt->qp_bitmap = qp_bitmap;
1013 nt->qp_bitmap_free = qp_bitmap; 1020 nt->qp_bitmap_free = qp_bitmap;
1014 1021
1015 nt->qp_vec = kcalloc(qp_count, sizeof(*nt->qp_vec), GFP_KERNEL); 1022 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
1023 GFP_KERNEL, node);
1016 if (!nt->qp_vec) { 1024 if (!nt->qp_vec) {
1017 rc = -ENOMEM; 1025 rc = -ENOMEM;
1018 goto err2; 1026 goto err2;
@@ -1512,6 +1520,11 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
1512 ntb_qp_link_down_reset(qp); 1520 ntb_qp_link_down_reset(qp);
1513} 1521}
1514 1522
1523static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1524{
1525 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1526}
1527
1515/** 1528/**
1516 * ntb_transport_create_queue - Create a new NTB transport layer queue 1529 * ntb_transport_create_queue - Create a new NTB transport layer queue
1517 * @rx_handler: receive callback function 1530 * @rx_handler: receive callback function
@@ -1537,12 +1550,16 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1537 struct ntb_transport_qp *qp; 1550 struct ntb_transport_qp *qp;
1538 u64 qp_bit; 1551 u64 qp_bit;
1539 unsigned int free_queue; 1552 unsigned int free_queue;
1553 dma_cap_mask_t dma_mask;
1554 int node;
1540 int i; 1555 int i;
1541 1556
1542 ndev = dev_ntb(client_dev->parent); 1557 ndev = dev_ntb(client_dev->parent);
1543 pdev = ndev->pdev; 1558 pdev = ndev->pdev;
1544 nt = ndev->ctx; 1559 nt = ndev->ctx;
1545 1560
1561 node = dev_to_node(&ndev->dev);
1562
1546 free_queue = ffs(nt->qp_bitmap); 1563 free_queue = ffs(nt->qp_bitmap);
1547 if (!free_queue) 1564 if (!free_queue)
1548 goto err; 1565 goto err;
@@ -1560,15 +1577,16 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1560 qp->tx_handler = handlers->tx_handler; 1577 qp->tx_handler = handlers->tx_handler;
1561 qp->event_handler = handlers->event_handler; 1578 qp->event_handler = handlers->event_handler;
1562 1579
1563 dmaengine_get(); 1580 dma_cap_zero(dma_mask);
1564 qp->dma_chan = dma_find_channel(DMA_MEMCPY); 1581 dma_cap_set(DMA_MEMCPY, dma_mask);
1565 if (!qp->dma_chan) { 1582
1566 dmaengine_put(); 1583 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
1584 (void *)(unsigned long)node);
1585 if (!qp->dma_chan)
1567 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); 1586 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1568 }
1569 1587
1570 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1588 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1571 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1589 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1572 if (!entry) 1590 if (!entry)
1573 goto err1; 1591 goto err1;
1574 1592
@@ -1578,7 +1596,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1578 } 1596 }
1579 1597
1580 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1598 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1581 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1599 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1582 if (!entry) 1600 if (!entry)
1583 goto err2; 1601 goto err2;
1584 1602
@@ -1601,7 +1619,7 @@ err1:
1601 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1619 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1602 kfree(entry); 1620 kfree(entry);
1603 if (qp->dma_chan) 1621 if (qp->dma_chan)
1604 dmaengine_put(); 1622 dma_release_channel(qp->dma_chan);
1605 nt->qp_bitmap_free |= qp_bit; 1623 nt->qp_bitmap_free |= qp_bit;
1606err: 1624err:
1607 return NULL; 1625 return NULL;
@@ -1638,7 +1656,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1638 */ 1656 */
1639 dma_sync_wait(chan, qp->last_cookie); 1657 dma_sync_wait(chan, qp->last_cookie);
1640 dmaengine_terminate_all(chan); 1658 dmaengine_terminate_all(chan);
1641 dmaengine_put(); 1659 dma_release_channel(chan);
1642 } 1660 }
1643 1661
1644 qp_bit = BIT_ULL(qp->qp_num); 1662 qp_bit = BIT_ULL(qp->qp_num);