aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2015-05-19 16:52:04 -0400
committerJon Mason <jdmason@kudzu.us>2015-07-04 14:09:24 -0400
commita41ef053f700618f5f55a1dd658908a71163400b (patch)
tree6dbde672b790ffab2d8a784ed7cbab349afc1307
parent06917f753547e6bba8a5d17f79971d1c071a70dd (diff)
NTB: Default to CPU memcpy for performance
Disable DMA usage by default, since the CPU provides much better performance with write combining. Provide a module parameter to enable DMA usage when offloading the memcpy is preferred. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Allen Hubbe <Allen.Hubbe@emc.com> Signed-off-by: Jon Mason <jdmason@kudzu.us>
-rw-r--r--drivers/ntb/ntb_transport.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 7a765d3230d8..e07b056af3be 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -88,6 +88,10 @@ static unsigned int copy_bytes = 1024;
88module_param(copy_bytes, uint, 0644); 88module_param(copy_bytes, uint, 0644);
89MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 89MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
90 90
91static bool use_dma;
92module_param(use_dma, bool, 0644);
93MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
94
91static struct dentry *nt_debugfs_dir; 95static struct dentry *nt_debugfs_dir;
92 96
93struct ntb_queue_entry { 97struct ntb_queue_entry {
@@ -1589,10 +1593,15 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1589 dma_cap_zero(dma_mask); 1593 dma_cap_zero(dma_mask);
1590 dma_cap_set(DMA_MEMCPY, dma_mask); 1594 dma_cap_set(DMA_MEMCPY, dma_mask);
1591 1595
1592 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn, 1596 if (use_dma) {
1593 (void *)(unsigned long)node); 1597 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
1594 if (!qp->dma_chan) 1598 (void *)(unsigned long)node);
1595 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); 1599 if (!qp->dma_chan)
1600 dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
1601 } else {
1602 qp->dma_chan = NULL;
1603 }
1604 dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
1596 1605
1597 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1606 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1598 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1607 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);