aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ntb
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2016-04-08 13:49:06 -0400
committerJon Mason <jdmason@kudzu.us>2016-08-05 10:21:05 -0400
commita754a8fcaf383be3c5fcc6c3c08e36d9f3005988 (patch)
tree606a8e0132113a30a10ca60364a5da217367fc09 /drivers/ntb
parent625f0802e84e47760959a5015fdc704d809fd6c8 (diff)
NTB: allocate number transport entries depending on size of ring size
Currently we only allocate a fixed default number of descriptors for the tx and rx side. We should dynamically resize it to the number of descriptors resides in the transport rings. We should know the number of transmit descriptors at initializaiton. We will allocate the default number of descriptors for receive side and allocate additional ones when we know the actual max entries for receive. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Allen Hubbe <allen.hubbe@emc.com> Signed-off-by: Jon Mason <jdmason@kudzu.us>
Diffstat (limited to 'drivers/ntb')
-rw-r--r--drivers/ntb/ntb_transport.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 2ef9d9130864..6db8c8528f26 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -153,6 +153,7 @@ struct ntb_transport_qp {
153 unsigned int rx_index; 153 unsigned int rx_index;
154 unsigned int rx_max_entry; 154 unsigned int rx_max_entry;
155 unsigned int rx_max_frame; 155 unsigned int rx_max_frame;
156 unsigned int rx_alloc_entry;
156 dma_cookie_t last_cookie; 157 dma_cookie_t last_cookie;
157 struct tasklet_struct rxc_db_work; 158 struct tasklet_struct rxc_db_work;
158 159
@@ -480,7 +481,9 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
480 out_offset += snprintf(buf + out_offset, out_count - out_offset, 481 out_offset += snprintf(buf + out_offset, out_count - out_offset,
481 "rx_index - \t%u\n", qp->rx_index); 482 "rx_index - \t%u\n", qp->rx_index);
482 out_offset += snprintf(buf + out_offset, out_count - out_offset, 483 out_offset += snprintf(buf + out_offset, out_count - out_offset,
483 "rx_max_entry - \t%u\n\n", qp->rx_max_entry); 484 "rx_max_entry - \t%u\n", qp->rx_max_entry);
485 out_offset += snprintf(buf + out_offset, out_count - out_offset,
486 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
484 487
485 out_offset += snprintf(buf + out_offset, out_count - out_offset, 488 out_offset += snprintf(buf + out_offset, out_count - out_offset,
486 "tx_bytes - \t%llu\n", qp->tx_bytes); 489 "tx_bytes - \t%llu\n", qp->tx_bytes);
@@ -597,9 +600,12 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
597{ 600{
598 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 601 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
599 struct ntb_transport_mw *mw; 602 struct ntb_transport_mw *mw;
603 struct ntb_dev *ndev = nt->ndev;
604 struct ntb_queue_entry *entry;
600 unsigned int rx_size, num_qps_mw; 605 unsigned int rx_size, num_qps_mw;
601 unsigned int mw_num, mw_count, qp_count; 606 unsigned int mw_num, mw_count, qp_count;
602 unsigned int i; 607 unsigned int i;
608 int node;
603 609
604 mw_count = nt->mw_count; 610 mw_count = nt->mw_count;
605 qp_count = nt->qp_count; 611 qp_count = nt->qp_count;
@@ -626,6 +632,23 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
626 qp->rx_max_entry = rx_size / qp->rx_max_frame; 632 qp->rx_max_entry = rx_size / qp->rx_max_frame;
627 qp->rx_index = 0; 633 qp->rx_index = 0;
628 634
635 /*
636 * Checking to see if we have more entries than the default.
637 * We should add additional entries if that is the case so we
638 * can be in sync with the transport frames.
639 */
640 node = dev_to_node(&ndev->dev);
641 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
642 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
643 if (!entry)
644 return -ENOMEM;
645
646 entry->qp = qp;
647 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
648 &qp->rx_free_q);
649 qp->rx_alloc_entry++;
650 }
651
629 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 652 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
630 653
631 /* setup the hdr offsets with 0's */ 654 /* setup the hdr offsets with 0's */
@@ -1722,8 +1745,9 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1722 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 1745 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1723 &qp->rx_free_q); 1746 &qp->rx_free_q);
1724 } 1747 }
1748 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
1725 1749
1726 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1750 for (i = 0; i < qp->tx_max_entry; i++) {
1727 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1751 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1728 if (!entry) 1752 if (!entry)
1729 goto err2; 1753 goto err2;
@@ -1744,6 +1768,7 @@ err2:
1744 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1768 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1745 kfree(entry); 1769 kfree(entry);
1746err1: 1770err1:
1771 qp->rx_alloc_entry = 0;
1747 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1772 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1748 kfree(entry); 1773 kfree(entry);
1749 if (qp->tx_dma_chan) 1774 if (qp->tx_dma_chan)