aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-06-22 10:45:29 -0400
committerJens Axboe <axboe@kernel.dk>2018-06-22 10:45:29 -0400
commitf9da9d0786735850e9bfad362cfc0da200effb46 (patch)
treeea6310986c0205374da2ea3e9c59124a35fe8f96
parent08ba91ee6e2c1c08d3f0648f978cbb5dbf3491d8 (diff)
parent943e942e6266f22babee5efeb00f8f672fbff5bd (diff)
Merge branch 'nvme-4.18' of git://git.infradead.org/nvme into for-linus
Pull NVMe fixes from Christoph: "Various relatively small fixes, mostly to fix error handling of various sorts." * 'nvme-4.18' of git://git.infradead.org/nvme: nvme-pci: limit max IO size and segments to avoid high order allocations nvme-pci: move nvme_kill_queues to nvme_remove_dead_ctrl nvme-fc: release io queues to allow fast fail nvmet: reset keep alive timer in controller enable nvme-rdma: don't override opts->queue_size nvme-rdma: Fix command completion race at error recovery nvme-rdma: fix possible free of a non-allocated async event buffer nvme-rdma: fix possible double free condition when failing to create a controller
-rw-r--r--drivers/nvme/host/core.c1
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c44
-rw-r--r--drivers/nvme/host/rdma.c73
-rw-r--r--drivers/nvme/target/core.c8
6 files changed, 88 insertions, 45 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 21710a7460c8..46df030b2c3f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1808,6 +1808,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1808 u32 max_segments = 1808 u32 max_segments =
1809 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1809 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
1810 1810
1811 max_segments = min_not_zero(max_segments, ctrl->max_segments);
1811 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1812 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1812 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1813 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1813 } 1814 }
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b528a2f5826c..41d45a1b5c62 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2790 /* re-enable the admin_q so anything new can fast fail */ 2790 /* re-enable the admin_q so anything new can fast fail */
2791 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 2791 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2792 2792
2793 /* resume the io queues so that things will fast fail */
2794 nvme_start_queues(&ctrl->ctrl);
2795
2793 nvme_fc_ctlr_inactive_on_rport(ctrl); 2796 nvme_fc_ctlr_inactive_on_rport(ctrl);
2794} 2797}
2795 2798
@@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2804 * waiting for io to terminate 2807 * waiting for io to terminate
2805 */ 2808 */
2806 nvme_fc_delete_association(ctrl); 2809 nvme_fc_delete_association(ctrl);
2807
2808 /* resume the io queues so that things will fast fail */
2809 nvme_start_queues(nctrl);
2810} 2810}
2811 2811
2812static void 2812static void
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 231807cbc849..0c4a33df3b2f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -170,6 +170,7 @@ struct nvme_ctrl {
170 u64 cap; 170 u64 cap;
171 u32 page_size; 171 u32 page_size;
172 u32 max_hw_sectors; 172 u32 max_hw_sectors;
173 u32 max_segments;
173 u16 oncs; 174 u16 oncs;
174 u16 oacs; 175 u16 oacs;
175 u16 nssa; 176 u16 nssa;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fc33804662e7..ba943f211687 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -38,6 +38,13 @@
38 38
39#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 39#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
40 40
41/*
42 * These can be higher, but we need to ensure that any command doesn't
43 * require an sg allocation that needs more than a page of data.
44 */
45#define NVME_MAX_KB_SZ 4096
46#define NVME_MAX_SEGS 127
47
41static int use_threaded_interrupts; 48static int use_threaded_interrupts;
42module_param(use_threaded_interrupts, int, 0); 49module_param(use_threaded_interrupts, int, 0);
43 50
@@ -100,6 +107,8 @@ struct nvme_dev {
100 struct nvme_ctrl ctrl; 107 struct nvme_ctrl ctrl;
101 struct completion ioq_wait; 108 struct completion ioq_wait;
102 109
110 mempool_t *iod_mempool;
111
103 /* shadow doorbell buffer support: */ 112 /* shadow doorbell buffer support: */
104 u32 *dbbuf_dbs; 113 u32 *dbbuf_dbs;
105 dma_addr_t dbbuf_dbs_dma_addr; 114 dma_addr_t dbbuf_dbs_dma_addr;
@@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
477 iod->use_sgl = nvme_pci_use_sgls(dev, rq); 486 iod->use_sgl = nvme_pci_use_sgls(dev, rq);
478 487
479 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 488 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
480 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, 489 iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
481 iod->use_sgl);
482
483 iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
484 if (!iod->sg) 490 if (!iod->sg)
485 return BLK_STS_RESOURCE; 491 return BLK_STS_RESOURCE;
486 } else { 492 } else {
@@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
526 } 532 }
527 533
528 if (iod->sg != iod->inline_sg) 534 if (iod->sg != iod->inline_sg)
529 kfree(iod->sg); 535 mempool_free(iod->sg, dev->iod_mempool);
530} 536}
531 537
532#ifdef CONFIG_BLK_DEV_INTEGRITY 538#ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2280 blk_put_queue(dev->ctrl.admin_q); 2286 blk_put_queue(dev->ctrl.admin_q);
2281 kfree(dev->queues); 2287 kfree(dev->queues);
2282 free_opal_dev(dev->ctrl.opal_dev); 2288 free_opal_dev(dev->ctrl.opal_dev);
2289 mempool_destroy(dev->iod_mempool);
2283 kfree(dev); 2290 kfree(dev);
2284} 2291}
2285 2292
@@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
2289 2296
2290 nvme_get_ctrl(&dev->ctrl); 2297 nvme_get_ctrl(&dev->ctrl);
2291 nvme_dev_disable(dev, false); 2298 nvme_dev_disable(dev, false);
2299 nvme_kill_queues(&dev->ctrl);
2292 if (!queue_work(nvme_wq, &dev->remove_work)) 2300 if (!queue_work(nvme_wq, &dev->remove_work))
2293 nvme_put_ctrl(&dev->ctrl); 2301 nvme_put_ctrl(&dev->ctrl);
2294} 2302}
@@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work)
2333 if (result) 2341 if (result)
2334 goto out; 2342 goto out;
2335 2343
2344 /*
2345 * Limit the max command size to prevent iod->sg allocations going
2346 * over a single page.
2347 */
2348 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
2349 dev->ctrl.max_segments = NVME_MAX_SEGS;
2350
2336 result = nvme_init_identify(&dev->ctrl); 2351 result = nvme_init_identify(&dev->ctrl);
2337 if (result) 2352 if (result)
2338 goto out; 2353 goto out;
@@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
2405 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 2420 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
2406 struct pci_dev *pdev = to_pci_dev(dev->dev); 2421 struct pci_dev *pdev = to_pci_dev(dev->dev);
2407 2422
2408 nvme_kill_queues(&dev->ctrl);
2409 if (pci_get_drvdata(pdev)) 2423 if (pci_get_drvdata(pdev))
2410 device_release_driver(&pdev->dev); 2424 device_release_driver(&pdev->dev);
2411 nvme_put_ctrl(&dev->ctrl); 2425 nvme_put_ctrl(&dev->ctrl);
@@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2509 int node, result = -ENOMEM; 2523 int node, result = -ENOMEM;
2510 struct nvme_dev *dev; 2524 struct nvme_dev *dev;
2511 unsigned long quirks = id->driver_data; 2525 unsigned long quirks = id->driver_data;
2526 size_t alloc_size;
2512 2527
2513 node = dev_to_node(&pdev->dev); 2528 node = dev_to_node(&pdev->dev);
2514 if (node == NUMA_NO_NODE) 2529 if (node == NUMA_NO_NODE)
@@ -2546,6 +2561,23 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2546 if (result) 2561 if (result)
2547 goto release_pools; 2562 goto release_pools;
2548 2563
2564 /*
2565 * Double check that our mempool alloc size will cover the biggest
2566 * command we support.
2567 */
2568 alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
2569 NVME_MAX_SEGS, true);
2570 WARN_ON_ONCE(alloc_size > PAGE_SIZE);
2571
2572 dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
2573 mempool_kfree,
2574 (void *) alloc_size,
2575 GFP_KERNEL, node);
2576 if (!dev->iod_mempool) {
2577 result = -ENOMEM;
2578 goto release_pools;
2579 }
2580
2549 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2581 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2550 2582
2551 nvme_get_ctrl(&dev->ctrl); 2583 nvme_get_ctrl(&dev->ctrl);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c9424da0d23e..9544625c0b7d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
560 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) 560 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
561 return; 561 return;
562 562
563 if (nvme_rdma_queue_idx(queue) == 0) {
564 nvme_rdma_free_qe(queue->device->dev,
565 &queue->ctrl->async_event_sqe,
566 sizeof(struct nvme_command), DMA_TO_DEVICE);
567 }
568
569 nvme_rdma_destroy_queue_ib(queue); 563 nvme_rdma_destroy_queue_ib(queue);
570 rdma_destroy_id(queue->cm_id); 564 rdma_destroy_id(queue->cm_id);
571} 565}
@@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
698 set = &ctrl->tag_set; 692 set = &ctrl->tag_set;
699 memset(set, 0, sizeof(*set)); 693 memset(set, 0, sizeof(*set));
700 set->ops = &nvme_rdma_mq_ops; 694 set->ops = &nvme_rdma_mq_ops;
701 set->queue_depth = nctrl->opts->queue_size; 695 set->queue_depth = nctrl->sqsize + 1;
702 set->reserved_tags = 1; /* fabric connect */ 696 set->reserved_tags = 1; /* fabric connect */
703 set->numa_node = NUMA_NO_NODE; 697 set->numa_node = NUMA_NO_NODE;
704 set->flags = BLK_MQ_F_SHOULD_MERGE; 698 set->flags = BLK_MQ_F_SHOULD_MERGE;
@@ -734,11 +728,12 @@ out:
734static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, 728static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
735 bool remove) 729 bool remove)
736{ 730{
737 nvme_rdma_stop_queue(&ctrl->queues[0]);
738 if (remove) { 731 if (remove) {
739 blk_cleanup_queue(ctrl->ctrl.admin_q); 732 blk_cleanup_queue(ctrl->ctrl.admin_q);
740 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 733 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
741 } 734 }
735 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
736 sizeof(struct nvme_command), DMA_TO_DEVICE);
742 nvme_rdma_free_queue(&ctrl->queues[0]); 737 nvme_rdma_free_queue(&ctrl->queues[0]);
743} 738}
744 739
@@ -755,11 +750,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
755 750
756 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); 751 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
757 752
753 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
754 sizeof(struct nvme_command), DMA_TO_DEVICE);
755 if (error)
756 goto out_free_queue;
757
758 if (new) { 758 if (new) {
759 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); 759 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
760 if (IS_ERR(ctrl->ctrl.admin_tagset)) { 760 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
761 error = PTR_ERR(ctrl->ctrl.admin_tagset); 761 error = PTR_ERR(ctrl->ctrl.admin_tagset);
762 goto out_free_queue; 762 goto out_free_async_qe;
763 } 763 }
764 764
765 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 765 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +795,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
795 if (error) 795 if (error)
796 goto out_stop_queue; 796 goto out_stop_queue;
797 797
798 error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
799 &ctrl->async_event_sqe, sizeof(struct nvme_command),
800 DMA_TO_DEVICE);
801 if (error)
802 goto out_stop_queue;
803
804 return 0; 798 return 0;
805 799
806out_stop_queue: 800out_stop_queue:
@@ -811,6 +805,9 @@ out_cleanup_queue:
811out_free_tagset: 805out_free_tagset:
812 if (new) 806 if (new)
813 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 807 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
808out_free_async_qe:
809 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
810 sizeof(struct nvme_command), DMA_TO_DEVICE);
814out_free_queue: 811out_free_queue:
815 nvme_rdma_free_queue(&ctrl->queues[0]); 812 nvme_rdma_free_queue(&ctrl->queues[0]);
816 return error; 813 return error;
@@ -819,7 +816,6 @@ out_free_queue:
819static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, 816static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
820 bool remove) 817 bool remove)
821{ 818{
822 nvme_rdma_stop_io_queues(ctrl);
823 if (remove) { 819 if (remove) {
824 blk_cleanup_queue(ctrl->ctrl.connect_q); 820 blk_cleanup_queue(ctrl->ctrl.connect_q);
825 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); 821 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
@@ -888,9 +884,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
888 list_del(&ctrl->list); 884 list_del(&ctrl->list);
889 mutex_unlock(&nvme_rdma_ctrl_mutex); 885 mutex_unlock(&nvme_rdma_ctrl_mutex);
890 886
891 kfree(ctrl->queues);
892 nvmf_free_options(nctrl->opts); 887 nvmf_free_options(nctrl->opts);
893free_ctrl: 888free_ctrl:
889 kfree(ctrl->queues);
894 kfree(ctrl); 890 kfree(ctrl);
895} 891}
896 892
@@ -949,6 +945,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
949 return; 945 return;
950 946
951destroy_admin: 947destroy_admin:
948 nvme_rdma_stop_queue(&ctrl->queues[0]);
952 nvme_rdma_destroy_admin_queue(ctrl, false); 949 nvme_rdma_destroy_admin_queue(ctrl, false);
953requeue: 950requeue:
954 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 951 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
@@ -965,12 +962,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
965 962
966 if (ctrl->ctrl.queue_count > 1) { 963 if (ctrl->ctrl.queue_count > 1) {
967 nvme_stop_queues(&ctrl->ctrl); 964 nvme_stop_queues(&ctrl->ctrl);
965 nvme_rdma_stop_io_queues(ctrl);
968 blk_mq_tagset_busy_iter(&ctrl->tag_set, 966 blk_mq_tagset_busy_iter(&ctrl->tag_set,
969 nvme_cancel_request, &ctrl->ctrl); 967 nvme_cancel_request, &ctrl->ctrl);
970 nvme_rdma_destroy_io_queues(ctrl, false); 968 nvme_rdma_destroy_io_queues(ctrl, false);
971 } 969 }
972 970
973 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 971 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
972 nvme_rdma_stop_queue(&ctrl->queues[0]);
974 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 973 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
975 nvme_cancel_request, &ctrl->ctrl); 974 nvme_cancel_request, &ctrl->ctrl);
976 nvme_rdma_destroy_admin_queue(ctrl, false); 975 nvme_rdma_destroy_admin_queue(ctrl, false);
@@ -1736,6 +1735,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
1736{ 1735{
1737 if (ctrl->ctrl.queue_count > 1) { 1736 if (ctrl->ctrl.queue_count > 1) {
1738 nvme_stop_queues(&ctrl->ctrl); 1737 nvme_stop_queues(&ctrl->ctrl);
1738 nvme_rdma_stop_io_queues(ctrl);
1739 blk_mq_tagset_busy_iter(&ctrl->tag_set, 1739 blk_mq_tagset_busy_iter(&ctrl->tag_set,
1740 nvme_cancel_request, &ctrl->ctrl); 1740 nvme_cancel_request, &ctrl->ctrl);
1741 nvme_rdma_destroy_io_queues(ctrl, shutdown); 1741 nvme_rdma_destroy_io_queues(ctrl, shutdown);
@@ -1747,6 +1747,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
1747 nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 1747 nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
1748 1748
1749 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 1749 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
1750 nvme_rdma_stop_queue(&ctrl->queues[0]);
1750 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 1751 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
1751 nvme_cancel_request, &ctrl->ctrl); 1752 nvme_cancel_request, &ctrl->ctrl);
1752 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 1753 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1932,11 +1933,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1932 goto out_free_ctrl; 1933 goto out_free_ctrl;
1933 } 1934 }
1934 1935
1935 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
1936 0 /* no quirks, we're perfect! */);
1937 if (ret)
1938 goto out_free_ctrl;
1939
1940 INIT_DELAYED_WORK(&ctrl->reconnect_work, 1936 INIT_DELAYED_WORK(&ctrl->reconnect_work,
1941 nvme_rdma_reconnect_ctrl_work); 1937 nvme_rdma_reconnect_ctrl_work);
1942 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); 1938 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1946,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1950 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 1946 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
1951 GFP_KERNEL); 1947 GFP_KERNEL);
1952 if (!ctrl->queues) 1948 if (!ctrl->queues)
1953 goto out_uninit_ctrl; 1949 goto out_free_ctrl;
1950
1951 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
1952 0 /* no quirks, we're perfect! */);
1953 if (ret)
1954 goto out_kfree_queues;
1954 1955
1955 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); 1956 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
1956 WARN_ON_ONCE(!changed); 1957 WARN_ON_ONCE(!changed);
1957 1958
1958 ret = nvme_rdma_configure_admin_queue(ctrl, true); 1959 ret = nvme_rdma_configure_admin_queue(ctrl, true);
1959 if (ret) 1960 if (ret)
1960 goto out_kfree_queues; 1961 goto out_uninit_ctrl;
1961 1962
1962 /* sanity check icdoff */ 1963 /* sanity check icdoff */
1963 if (ctrl->ctrl.icdoff) { 1964 if (ctrl->ctrl.icdoff) {
@@ -1974,20 +1975,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1974 goto out_remove_admin_queue; 1975 goto out_remove_admin_queue;
1975 } 1976 }
1976 1977
1977 if (opts->queue_size > ctrl->ctrl.maxcmd) { 1978 /* only warn if argument is too large here, will clamp later */
1978 /* warn if maxcmd is lower than queue_size */
1979 dev_warn(ctrl->ctrl.device,
1980 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
1981 opts->queue_size, ctrl->ctrl.maxcmd);
1982 opts->queue_size = ctrl->ctrl.maxcmd;
1983 }
1984
1985 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 1979 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
1986 /* warn if sqsize is lower than queue_size */
1987 dev_warn(ctrl->ctrl.device, 1980 dev_warn(ctrl->ctrl.device,
1988 "queue_size %zu > ctrl sqsize %u, clamping down\n", 1981 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1989 opts->queue_size, ctrl->ctrl.sqsize + 1); 1982 opts->queue_size, ctrl->ctrl.sqsize + 1);
1990 opts->queue_size = ctrl->ctrl.sqsize + 1; 1983 }
1984
1985 /* warn if maxcmd is lower than sqsize+1 */
1986 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
1987 dev_warn(ctrl->ctrl.device,
1988 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1989 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
1990 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
1991 } 1991 }
1992 1992
1993 if (opts->nr_io_queues) { 1993 if (opts->nr_io_queues) {
@@ -2013,15 +2013,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
2013 return &ctrl->ctrl; 2013 return &ctrl->ctrl;
2014 2014
2015out_remove_admin_queue: 2015out_remove_admin_queue:
2016 nvme_rdma_stop_queue(&ctrl->queues[0]);
2016 nvme_rdma_destroy_admin_queue(ctrl, true); 2017 nvme_rdma_destroy_admin_queue(ctrl, true);
2017out_kfree_queues:
2018 kfree(ctrl->queues);
2019out_uninit_ctrl: 2018out_uninit_ctrl:
2020 nvme_uninit_ctrl(&ctrl->ctrl); 2019 nvme_uninit_ctrl(&ctrl->ctrl);
2021 nvme_put_ctrl(&ctrl->ctrl); 2020 nvme_put_ctrl(&ctrl->ctrl);
2022 if (ret > 0) 2021 if (ret > 0)
2023 ret = -EIO; 2022 ret = -EIO;
2024 return ERR_PTR(ret); 2023 return ERR_PTR(ret);
2024out_kfree_queues:
2025 kfree(ctrl->queues);
2025out_free_ctrl: 2026out_free_ctrl:
2026 kfree(ctrl); 2027 kfree(ctrl);
2027 return ERR_PTR(ret); 2028 return ERR_PTR(ret);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a03da764ecae..74d4b785d2da 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
686 } 686 }
687 687
688 ctrl->csts = NVME_CSTS_RDY; 688 ctrl->csts = NVME_CSTS_RDY;
689
690 /*
691 * Controllers that are not yet enabled should not really enforce the
692 * keep alive timeout, but we still want to track a timeout and cleanup
693 * in case a host died before it enabled the controller. Hence, simply
694 * reset the keep alive timer when the controller is enabled.
695 */
696 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
689} 697}
690 698
691static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) 699static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)