aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJay Freyensee <james_p_freyensee@linux.intel.com>2016-08-17 18:00:27 -0400
committerSagi Grimberg <sagi@grimberg.me>2016-08-18 02:58:06 -0400
commitc5af8654c422cfdd8480be3a244748e18cace6c5 (patch)
treed75ba0cd1e6a02851b2592758e90986a5fc09f94
parentf994d9dc28bc27353acde2caaf718222d92a3e24 (diff)
nvme-rdma: fix sqsize/hsqsize per spec
Per NVMe-over-Fabrics 1.0 spec, sqsize is represented as a 0-based value. Also per spec, the RDMA binding values shall be set to sqsize, which makes hsqsize 0-based values. Thus, the sqsize during NVMf connect() is now: [root@fedora23-fabrics-host1 for-48]# dmesg [ 318.720645] nvme_fabrics: nvmf_connect_admin_queue(): sqsize for admin queue: 31 [ 318.720884] nvme nvme0: creating 16 I/O queues. [ 318.810114] nvme_fabrics: nvmf_connect_io_queue(): sqsize for i/o queue: 127 Finally, current interpretation implies hrqsize is 1's based so set it appropriately. Reported-by: Daniel Verkamp <daniel.verkamp@intel.com> Signed-off-by: Jay Freyensee <james_p_freyensee@linux.intel.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
-rw-r--r--drivers/nvme/host/rdma.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d44809e6b03f..c133256fd745 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -645,7 +645,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
645 int i, ret; 645 int i, ret;
646 646
647 for (i = 1; i < ctrl->queue_count; i++) { 647 for (i = 1; i < ctrl->queue_count; i++) {
648 ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize); 648 ret = nvme_rdma_init_queue(ctrl, i,
649 ctrl->ctrl.opts->queue_size);
649 if (ret) { 650 if (ret) {
650 dev_info(ctrl->ctrl.device, 651 dev_info(ctrl->ctrl.device,
651 "failed to initialize i/o queue: %d\n", ret); 652 "failed to initialize i/o queue: %d\n", ret);
@@ -1286,8 +1287,13 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1286 priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH); 1287 priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
1287 priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1); 1288 priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
1288 } else { 1289 } else {
1290 /*
1291 * current interpretation of the fabrics spec
1292 * is at minimum you make hrqsize sqsize+1, or a
1293 * 1's based representation of sqsize.
1294 */
1289 priv.hrqsize = cpu_to_le16(queue->queue_size); 1295 priv.hrqsize = cpu_to_le16(queue->queue_size);
1290 priv.hsqsize = cpu_to_le16(queue->queue_size); 1296 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1291 } 1297 }
1292 1298
1293 ret = rdma_connect(queue->cm_id, &param); 1299 ret = rdma_connect(queue->cm_id, &param);
@@ -1825,7 +1831,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1825 1831
1826 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 1832 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
1827 ctrl->tag_set.ops = &nvme_rdma_mq_ops; 1833 ctrl->tag_set.ops = &nvme_rdma_mq_ops;
1828 ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize; 1834 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
1829 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 1835 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
1830 ctrl->tag_set.numa_node = NUMA_NO_NODE; 1836 ctrl->tag_set.numa_node = NUMA_NO_NODE;
1831 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1837 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -1923,7 +1929,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1923 spin_lock_init(&ctrl->lock); 1929 spin_lock_init(&ctrl->lock);
1924 1930
1925 ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ 1931 ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
1926 ctrl->ctrl.sqsize = opts->queue_size; 1932 ctrl->ctrl.sqsize = opts->queue_size - 1;
1927 ctrl->ctrl.kato = opts->kato; 1933 ctrl->ctrl.kato = opts->kato;
1928 1934
1929 ret = -ENOMEM; 1935 ret = -ENOMEM;