diff options
author | Sagi Grimberg <sagi@grimberg.me> | 2017-03-09 06:26:07 -0500 |
---|---|---|
committer | Sagi Grimberg <sagi@grimberg.me> | 2017-03-21 12:38:41 -0400 |
commit | c248c64387fac5a6b31b343d9acb78f478e8619c (patch) | |
tree | 1fdb4de39c93fffa45c4c6458092a99857a87ca7 | |
parent | b25634e2a051bef4b2524b11adddfbfa6448f6cd (diff) |
nvme-rdma: handle cpu unplug when re-establishing the controller
If a cpu unplug event has occured, we need to take the minimum
of the provided nr_io_queues and the number of online cpus,
otherwise we won't be able to connect them as blk-mq mapping
won't dispatch to those queues.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
-rw-r--r-- | drivers/nvme/host/rdma.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 779f516e7a4e..47a479f26e5d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl, | |||
343 | struct ib_device *ibdev = dev->dev; | 343 | struct ib_device *ibdev = dev->dev; |
344 | int ret; | 344 | int ret; |
345 | 345 | ||
346 | BUG_ON(queue_idx >= ctrl->queue_count); | ||
347 | |||
348 | ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), | 346 | ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), |
349 | DMA_TO_DEVICE); | 347 | DMA_TO_DEVICE); |
350 | if (ret) | 348 | if (ret) |
@@ -652,8 +650,22 @@ out_free_queues: | |||
652 | 650 | ||
653 | static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) | 651 | static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) |
654 | { | 652 | { |
653 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
654 | unsigned int nr_io_queues; | ||
655 | int i, ret; | 655 | int i, ret; |
656 | 656 | ||
657 | nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); | ||
658 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | ||
659 | if (ret) | ||
660 | return ret; | ||
661 | |||
662 | ctrl->queue_count = nr_io_queues + 1; | ||
663 | if (ctrl->queue_count < 2) | ||
664 | return 0; | ||
665 | |||
666 | dev_info(ctrl->ctrl.device, | ||
667 | "creating %d I/O queues.\n", nr_io_queues); | ||
668 | |||
657 | for (i = 1; i < ctrl->queue_count; i++) { | 669 | for (i = 1; i < ctrl->queue_count; i++) { |
658 | ret = nvme_rdma_init_queue(ctrl, i, | 670 | ret = nvme_rdma_init_queue(ctrl, i, |
659 | ctrl->ctrl.opts->queue_size); | 671 | ctrl->ctrl.opts->queue_size); |
@@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { | |||
1791 | 1803 | ||
1792 | static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) | 1804 | static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) |
1793 | { | 1805 | { |
1794 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
1795 | int ret; | 1806 | int ret; |
1796 | 1807 | ||
1797 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); | ||
1798 | if (ret) | ||
1799 | return ret; | ||
1800 | |||
1801 | ctrl->queue_count = opts->nr_io_queues + 1; | ||
1802 | if (ctrl->queue_count < 2) | ||
1803 | return 0; | ||
1804 | |||
1805 | dev_info(ctrl->ctrl.device, | ||
1806 | "creating %d I/O queues.\n", opts->nr_io_queues); | ||
1807 | |||
1808 | ret = nvme_rdma_init_io_queues(ctrl); | 1808 | ret = nvme_rdma_init_io_queues(ctrl); |
1809 | if (ret) | 1809 | if (ret) |
1810 | return ret; | 1810 | return ret; |