diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-01-08 14:19:41 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-01-08 14:19:41 -0500 |
commit | 550203e64c2f4b330211955a1c581b631fd2ebe4 (patch) | |
tree | 984a1538859490dbc63e1c8e9fb13a8d3337160c | |
parent | fb350e0ad99359768e1e80b4784692031ec340e4 (diff) | |
parent | b837b28394fb76993c28bb242db7061ee0417da6 (diff) |
Merge branch 'nvme-4.16' of git://git.infradead.org/nvme into for-4.16/block
Pull NVMe fixes from Christoph:
"Below are the pending nvme updates for Linux 4.16. Just fixes and
cleanups from various contributors this time around."
-rw-r--r-- | drivers/nvme/host/core.c | 62 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.c | 17 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.h | 2 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 1 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 1 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 38 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 1 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 11 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 24 | ||||
-rw-r--r-- | drivers/nvme/target/fcloop.c | 244 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 1 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 20 |
13 files changed, 307 insertions, 117 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f837d666cbd4..2bcd49584f71 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -232,6 +232,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
232 | 232 | ||
233 | old_state = ctrl->state; | 233 | old_state = ctrl->state; |
234 | switch (new_state) { | 234 | switch (new_state) { |
235 | case NVME_CTRL_ADMIN_ONLY: | ||
236 | switch (old_state) { | ||
237 | case NVME_CTRL_RESETTING: | ||
238 | changed = true; | ||
239 | /* FALLTHRU */ | ||
240 | default: | ||
241 | break; | ||
242 | } | ||
243 | break; | ||
235 | case NVME_CTRL_LIVE: | 244 | case NVME_CTRL_LIVE: |
236 | switch (old_state) { | 245 | switch (old_state) { |
237 | case NVME_CTRL_NEW: | 246 | case NVME_CTRL_NEW: |
@@ -247,6 +256,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
247 | switch (old_state) { | 256 | switch (old_state) { |
248 | case NVME_CTRL_NEW: | 257 | case NVME_CTRL_NEW: |
249 | case NVME_CTRL_LIVE: | 258 | case NVME_CTRL_LIVE: |
259 | case NVME_CTRL_ADMIN_ONLY: | ||
250 | changed = true; | 260 | changed = true; |
251 | /* FALLTHRU */ | 261 | /* FALLTHRU */ |
252 | default: | 262 | default: |
@@ -266,6 +276,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
266 | case NVME_CTRL_DELETING: | 276 | case NVME_CTRL_DELETING: |
267 | switch (old_state) { | 277 | switch (old_state) { |
268 | case NVME_CTRL_LIVE: | 278 | case NVME_CTRL_LIVE: |
279 | case NVME_CTRL_ADMIN_ONLY: | ||
269 | case NVME_CTRL_RESETTING: | 280 | case NVME_CTRL_RESETTING: |
270 | case NVME_CTRL_RECONNECTING: | 281 | case NVME_CTRL_RECONNECTING: |
271 | changed = true; | 282 | changed = true; |
@@ -1217,16 +1228,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode) | |||
1217 | #ifdef CONFIG_NVME_MULTIPATH | 1228 | #ifdef CONFIG_NVME_MULTIPATH |
1218 | /* should never be called due to GENHD_FL_HIDDEN */ | 1229 | /* should never be called due to GENHD_FL_HIDDEN */ |
1219 | if (WARN_ON_ONCE(ns->head->disk)) | 1230 | if (WARN_ON_ONCE(ns->head->disk)) |
1220 | return -ENXIO; | 1231 | goto fail; |
1221 | #endif | 1232 | #endif |
1222 | if (!kref_get_unless_zero(&ns->kref)) | 1233 | if (!kref_get_unless_zero(&ns->kref)) |
1223 | return -ENXIO; | 1234 | goto fail; |
1235 | if (!try_module_get(ns->ctrl->ops->module)) | ||
1236 | goto fail_put_ns; | ||
1237 | |||
1224 | return 0; | 1238 | return 0; |
1239 | |||
1240 | fail_put_ns: | ||
1241 | nvme_put_ns(ns); | ||
1242 | fail: | ||
1243 | return -ENXIO; | ||
1225 | } | 1244 | } |
1226 | 1245 | ||
1227 | static void nvme_release(struct gendisk *disk, fmode_t mode) | 1246 | static void nvme_release(struct gendisk *disk, fmode_t mode) |
1228 | { | 1247 | { |
1229 | nvme_put_ns(disk->private_data); | 1248 | struct nvme_ns *ns = disk->private_data; |
1249 | |||
1250 | module_put(ns->ctrl->ops->module); | ||
1251 | nvme_put_ns(ns); | ||
1230 | } | 1252 | } |
1231 | 1253 | ||
1232 | static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 1254 | static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
@@ -2047,6 +2069,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = { | |||
2047 | NULL, | 2069 | NULL, |
2048 | }; | 2070 | }; |
2049 | 2071 | ||
2072 | static int nvme_active_ctrls(struct nvme_subsystem *subsys) | ||
2073 | { | ||
2074 | int count = 0; | ||
2075 | struct nvme_ctrl *ctrl; | ||
2076 | |||
2077 | mutex_lock(&subsys->lock); | ||
2078 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { | ||
2079 | if (ctrl->state != NVME_CTRL_DELETING && | ||
2080 | ctrl->state != NVME_CTRL_DEAD) | ||
2081 | count++; | ||
2082 | } | ||
2083 | mutex_unlock(&subsys->lock); | ||
2084 | |||
2085 | return count; | ||
2086 | } | ||
2087 | |||
2050 | static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | 2088 | static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) |
2051 | { | 2089 | { |
2052 | struct nvme_subsystem *subsys, *found; | 2090 | struct nvme_subsystem *subsys, *found; |
@@ -2085,7 +2123,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
2085 | * Verify that the subsystem actually supports multiple | 2123 | * Verify that the subsystem actually supports multiple |
2086 | * controllers, else bail out. | 2124 | * controllers, else bail out. |
2087 | */ | 2125 | */ |
2088 | if (!(id->cmic & (1 << 1))) { | 2126 | if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) { |
2089 | dev_err(ctrl->device, | 2127 | dev_err(ctrl->device, |
2090 | "ignoring ctrl due to duplicate subnqn (%s).\n", | 2128 | "ignoring ctrl due to duplicate subnqn (%s).\n", |
2091 | found->subnqn); | 2129 | found->subnqn); |
@@ -2252,7 +2290,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
2252 | shutdown_timeout, 60); | 2290 | shutdown_timeout, 60); |
2253 | 2291 | ||
2254 | if (ctrl->shutdown_timeout != shutdown_timeout) | 2292 | if (ctrl->shutdown_timeout != shutdown_timeout) |
2255 | dev_warn(ctrl->device, | 2293 | dev_info(ctrl->device, |
2256 | "Shutdown timeout set to %u seconds\n", | 2294 | "Shutdown timeout set to %u seconds\n", |
2257 | ctrl->shutdown_timeout); | 2295 | ctrl->shutdown_timeout); |
2258 | } else | 2296 | } else |
@@ -2336,8 +2374,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file) | |||
2336 | struct nvme_ctrl *ctrl = | 2374 | struct nvme_ctrl *ctrl = |
2337 | container_of(inode->i_cdev, struct nvme_ctrl, cdev); | 2375 | container_of(inode->i_cdev, struct nvme_ctrl, cdev); |
2338 | 2376 | ||
2339 | if (ctrl->state != NVME_CTRL_LIVE) | 2377 | switch (ctrl->state) { |
2378 | case NVME_CTRL_LIVE: | ||
2379 | case NVME_CTRL_ADMIN_ONLY: | ||
2380 | break; | ||
2381 | default: | ||
2340 | return -EWOULDBLOCK; | 2382 | return -EWOULDBLOCK; |
2383 | } | ||
2384 | |||
2341 | file->private_data = ctrl; | 2385 | file->private_data = ctrl; |
2342 | return 0; | 2386 | return 0; |
2343 | } | 2387 | } |
@@ -2601,6 +2645,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, | |||
2601 | static const char *const state_name[] = { | 2645 | static const char *const state_name[] = { |
2602 | [NVME_CTRL_NEW] = "new", | 2646 | [NVME_CTRL_NEW] = "new", |
2603 | [NVME_CTRL_LIVE] = "live", | 2647 | [NVME_CTRL_LIVE] = "live", |
2648 | [NVME_CTRL_ADMIN_ONLY] = "only-admin", | ||
2604 | [NVME_CTRL_RESETTING] = "resetting", | 2649 | [NVME_CTRL_RESETTING] = "resetting", |
2605 | [NVME_CTRL_RECONNECTING]= "reconnecting", | 2650 | [NVME_CTRL_RECONNECTING]= "reconnecting", |
2606 | [NVME_CTRL_DELETING] = "deleting", | 2651 | [NVME_CTRL_DELETING] = "deleting", |
@@ -3073,6 +3118,8 @@ static void nvme_scan_work(struct work_struct *work) | |||
3073 | if (ctrl->state != NVME_CTRL_LIVE) | 3118 | if (ctrl->state != NVME_CTRL_LIVE) |
3074 | return; | 3119 | return; |
3075 | 3120 | ||
3121 | WARN_ON_ONCE(!ctrl->tagset); | ||
3122 | |||
3076 | if (nvme_identify_ctrl(ctrl, &id)) | 3123 | if (nvme_identify_ctrl(ctrl, &id)) |
3077 | return; | 3124 | return; |
3078 | 3125 | ||
@@ -3093,8 +3140,7 @@ static void nvme_scan_work(struct work_struct *work) | |||
3093 | void nvme_queue_scan(struct nvme_ctrl *ctrl) | 3140 | void nvme_queue_scan(struct nvme_ctrl *ctrl) |
3094 | { | 3141 | { |
3095 | /* | 3142 | /* |
3096 | * Do not queue new scan work when a controller is reset during | 3143 | * Only new queue scan work when admin and IO queues are both alive |
3097 | * removal. | ||
3098 | */ | 3144 | */ |
3099 | if (ctrl->state == NVME_CTRL_LIVE) | 3145 | if (ctrl->state == NVME_CTRL_LIVE) |
3100 | queue_work(nvme_wq, &ctrl->scan_work); | 3146 | queue_work(nvme_wq, &ctrl->scan_work); |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 76b4fe6816a0..2f68befd31bf 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -492,7 +492,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect); | |||
492 | */ | 492 | */ |
493 | int nvmf_register_transport(struct nvmf_transport_ops *ops) | 493 | int nvmf_register_transport(struct nvmf_transport_ops *ops) |
494 | { | 494 | { |
495 | if (!ops->create_ctrl) | 495 | if (!ops->create_ctrl || !ops->module) |
496 | return -EINVAL; | 496 | return -EINVAL; |
497 | 497 | ||
498 | down_write(&nvmf_transports_rwsem); | 498 | down_write(&nvmf_transports_rwsem); |
@@ -868,32 +868,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) | |||
868 | goto out_unlock; | 868 | goto out_unlock; |
869 | } | 869 | } |
870 | 870 | ||
871 | if (!try_module_get(ops->module)) { | ||
872 | ret = -EBUSY; | ||
873 | goto out_unlock; | ||
874 | } | ||
875 | |||
871 | ret = nvmf_check_required_opts(opts, ops->required_opts); | 876 | ret = nvmf_check_required_opts(opts, ops->required_opts); |
872 | if (ret) | 877 | if (ret) |
873 | goto out_unlock; | 878 | goto out_module_put; |
874 | ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS | | 879 | ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS | |
875 | ops->allowed_opts | ops->required_opts); | 880 | ops->allowed_opts | ops->required_opts); |
876 | if (ret) | 881 | if (ret) |
877 | goto out_unlock; | 882 | goto out_module_put; |
878 | 883 | ||
879 | ctrl = ops->create_ctrl(dev, opts); | 884 | ctrl = ops->create_ctrl(dev, opts); |
880 | if (IS_ERR(ctrl)) { | 885 | if (IS_ERR(ctrl)) { |
881 | ret = PTR_ERR(ctrl); | 886 | ret = PTR_ERR(ctrl); |
882 | goto out_unlock; | 887 | goto out_module_put; |
883 | } | 888 | } |
884 | 889 | ||
885 | if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) { | 890 | if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) { |
886 | dev_warn(ctrl->device, | 891 | dev_warn(ctrl->device, |
887 | "controller returned incorrect NQN: \"%s\".\n", | 892 | "controller returned incorrect NQN: \"%s\".\n", |
888 | ctrl->subsys->subnqn); | 893 | ctrl->subsys->subnqn); |
894 | module_put(ops->module); | ||
889 | up_read(&nvmf_transports_rwsem); | 895 | up_read(&nvmf_transports_rwsem); |
890 | nvme_delete_ctrl_sync(ctrl); | 896 | nvme_delete_ctrl_sync(ctrl); |
891 | return ERR_PTR(-EINVAL); | 897 | return ERR_PTR(-EINVAL); |
892 | } | 898 | } |
893 | 899 | ||
900 | module_put(ops->module); | ||
894 | up_read(&nvmf_transports_rwsem); | 901 | up_read(&nvmf_transports_rwsem); |
895 | return ctrl; | 902 | return ctrl; |
896 | 903 | ||
904 | out_module_put: | ||
905 | module_put(ops->module); | ||
897 | out_unlock: | 906 | out_unlock: |
898 | up_read(&nvmf_transports_rwsem); | 907 | up_read(&nvmf_transports_rwsem); |
899 | out_free_opts: | 908 | out_free_opts: |
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 9ba614953607..25b19f722f5b 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h | |||
@@ -108,6 +108,7 @@ struct nvmf_ctrl_options { | |||
108 | * fabric implementation of NVMe fabrics. | 108 | * fabric implementation of NVMe fabrics. |
109 | * @entry: Used by the fabrics library to add the new | 109 | * @entry: Used by the fabrics library to add the new |
110 | * registration entry to its linked-list internal tree. | 110 | * registration entry to its linked-list internal tree. |
111 | * @module: Transport module reference | ||
111 | * @name: Name of the NVMe fabric driver implementation. | 112 | * @name: Name of the NVMe fabric driver implementation. |
112 | * @required_opts: sysfs command-line options that must be specified | 113 | * @required_opts: sysfs command-line options that must be specified |
113 | * when adding a new NVMe controller. | 114 | * when adding a new NVMe controller. |
@@ -126,6 +127,7 @@ struct nvmf_ctrl_options { | |||
126 | */ | 127 | */ |
127 | struct nvmf_transport_ops { | 128 | struct nvmf_transport_ops { |
128 | struct list_head entry; | 129 | struct list_head entry; |
130 | struct module *module; | ||
129 | const char *name; | 131 | const char *name; |
130 | int required_opts; | 132 | int required_opts; |
131 | int allowed_opts; | 133 | int allowed_opts; |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 0a8af4daef89..2a7a9a75105d 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -3381,6 +3381,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) | |||
3381 | 3381 | ||
3382 | static struct nvmf_transport_ops nvme_fc_transport = { | 3382 | static struct nvmf_transport_ops nvme_fc_transport = { |
3383 | .name = "fc", | 3383 | .name = "fc", |
3384 | .module = THIS_MODULE, | ||
3384 | .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, | 3385 | .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, |
3385 | .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, | 3386 | .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, |
3386 | .create_ctrl = nvme_fc_create_ctrl, | 3387 | .create_ctrl = nvme_fc_create_ctrl, |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ea1aa5283e8e..eecf71ce6e75 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -119,6 +119,7 @@ static inline struct nvme_request *nvme_req(struct request *req) | |||
119 | enum nvme_ctrl_state { | 119 | enum nvme_ctrl_state { |
120 | NVME_CTRL_NEW, | 120 | NVME_CTRL_NEW, |
121 | NVME_CTRL_LIVE, | 121 | NVME_CTRL_LIVE, |
122 | NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ | ||
122 | NVME_CTRL_RESETTING, | 123 | NVME_CTRL_RESETTING, |
123 | NVME_CTRL_RECONNECTING, | 124 | NVME_CTRL_RECONNECTING, |
124 | NVME_CTRL_DELETING, | 125 | NVME_CTRL_DELETING, |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f5800c3c9082..62119078c2bf 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1770,7 +1770,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, | |||
1770 | dma_addr_t descs_dma; | 1770 | dma_addr_t descs_dma; |
1771 | int i = 0; | 1771 | int i = 0; |
1772 | void **bufs; | 1772 | void **bufs; |
1773 | u64 size = 0, tmp; | 1773 | u64 size, tmp; |
1774 | 1774 | ||
1775 | tmp = (preferred + chunk_size - 1); | 1775 | tmp = (preferred + chunk_size - 1); |
1776 | do_div(tmp, chunk_size); | 1776 | do_div(tmp, chunk_size); |
@@ -1853,7 +1853,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) | |||
1853 | u64 preferred = (u64)dev->ctrl.hmpre * 4096; | 1853 | u64 preferred = (u64)dev->ctrl.hmpre * 4096; |
1854 | u64 min = (u64)dev->ctrl.hmmin * 4096; | 1854 | u64 min = (u64)dev->ctrl.hmmin * 4096; |
1855 | u32 enable_bits = NVME_HOST_MEM_ENABLE; | 1855 | u32 enable_bits = NVME_HOST_MEM_ENABLE; |
1856 | int ret = 0; | 1856 | int ret; |
1857 | 1857 | ||
1858 | preferred = min(preferred, max); | 1858 | preferred = min(preferred, max); |
1859 | if (min > max) { | 1859 | if (min > max) { |
@@ -2035,13 +2035,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) | |||
2035 | } | 2035 | } |
2036 | 2036 | ||
2037 | /* | 2037 | /* |
2038 | * Return: error value if an error occurred setting up the queues or calling | 2038 | * return error value only when tagset allocation failed |
2039 | * Identify Device. 0 if these succeeded, even if adding some of the | ||
2040 | * namespaces failed. At the moment, these failures are silent. TBD which | ||
2041 | * failures should be reported. | ||
2042 | */ | 2039 | */ |
2043 | static int nvme_dev_add(struct nvme_dev *dev) | 2040 | static int nvme_dev_add(struct nvme_dev *dev) |
2044 | { | 2041 | { |
2042 | int ret; | ||
2043 | |||
2045 | if (!dev->ctrl.tagset) { | 2044 | if (!dev->ctrl.tagset) { |
2046 | dev->tagset.ops = &nvme_mq_ops; | 2045 | dev->tagset.ops = &nvme_mq_ops; |
2047 | dev->tagset.nr_hw_queues = dev->online_queues - 1; | 2046 | dev->tagset.nr_hw_queues = dev->online_queues - 1; |
@@ -2057,8 +2056,12 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
2057 | dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; | 2056 | dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; |
2058 | dev->tagset.driver_data = dev; | 2057 | dev->tagset.driver_data = dev; |
2059 | 2058 | ||
2060 | if (blk_mq_alloc_tag_set(&dev->tagset)) | 2059 | ret = blk_mq_alloc_tag_set(&dev->tagset); |
2061 | return 0; | 2060 | if (ret) { |
2061 | dev_warn(dev->ctrl.device, | ||
2062 | "IO queues tagset allocation failed %d\n", ret); | ||
2063 | return ret; | ||
2064 | } | ||
2062 | dev->ctrl.tagset = &dev->tagset; | 2065 | dev->ctrl.tagset = &dev->tagset; |
2063 | 2066 | ||
2064 | nvme_dbbuf_set(dev); | 2067 | nvme_dbbuf_set(dev); |
@@ -2291,6 +2294,7 @@ static void nvme_reset_work(struct work_struct *work) | |||
2291 | container_of(work, struct nvme_dev, ctrl.reset_work); | 2294 | container_of(work, struct nvme_dev, ctrl.reset_work); |
2292 | bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); | 2295 | bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); |
2293 | int result = -ENODEV; | 2296 | int result = -ENODEV; |
2297 | enum nvme_ctrl_state new_state = NVME_CTRL_LIVE; | ||
2294 | 2298 | ||
2295 | if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) | 2299 | if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) |
2296 | goto out; | 2300 | goto out; |
@@ -2354,15 +2358,23 @@ static void nvme_reset_work(struct work_struct *work) | |||
2354 | dev_warn(dev->ctrl.device, "IO queues not created\n"); | 2358 | dev_warn(dev->ctrl.device, "IO queues not created\n"); |
2355 | nvme_kill_queues(&dev->ctrl); | 2359 | nvme_kill_queues(&dev->ctrl); |
2356 | nvme_remove_namespaces(&dev->ctrl); | 2360 | nvme_remove_namespaces(&dev->ctrl); |
2361 | new_state = NVME_CTRL_ADMIN_ONLY; | ||
2357 | } else { | 2362 | } else { |
2358 | nvme_start_queues(&dev->ctrl); | 2363 | nvme_start_queues(&dev->ctrl); |
2359 | nvme_wait_freeze(&dev->ctrl); | 2364 | nvme_wait_freeze(&dev->ctrl); |
2360 | nvme_dev_add(dev); | 2365 | /* hit this only when allocate tagset fails */ |
2366 | if (nvme_dev_add(dev)) | ||
2367 | new_state = NVME_CTRL_ADMIN_ONLY; | ||
2361 | nvme_unfreeze(&dev->ctrl); | 2368 | nvme_unfreeze(&dev->ctrl); |
2362 | } | 2369 | } |
2363 | 2370 | ||
2364 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { | 2371 | /* |
2365 | dev_warn(dev->ctrl.device, "failed to mark controller live\n"); | 2372 | * If only admin queue live, keep it to do further investigation or |
2373 | * recovery. | ||
2374 | */ | ||
2375 | if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) { | ||
2376 | dev_warn(dev->ctrl.device, | ||
2377 | "failed to mark controller state %d\n", new_state); | ||
2366 | goto out; | 2378 | goto out; |
2367 | } | 2379 | } |
2368 | 2380 | ||
@@ -2498,10 +2510,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2498 | if (result) | 2510 | if (result) |
2499 | goto release_pools; | 2511 | goto release_pools; |
2500 | 2512 | ||
2501 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING); | ||
2502 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); | 2513 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
2503 | 2514 | ||
2504 | queue_work(nvme_wq, &dev->ctrl.reset_work); | 2515 | nvme_reset_ctrl(&dev->ctrl); |
2516 | |||
2505 | return 0; | 2517 | return 0; |
2506 | 2518 | ||
2507 | release_pools: | 2519 | release_pools: |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 37af56596be6..75d6956eb380 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -2006,6 +2006,7 @@ out_free_ctrl: | |||
2006 | 2006 | ||
2007 | static struct nvmf_transport_ops nvme_rdma_transport = { | 2007 | static struct nvmf_transport_ops nvme_rdma_transport = { |
2008 | .name = "rdma", | 2008 | .name = "rdma", |
2009 | .module = THIS_MODULE, | ||
2009 | .required_opts = NVMF_OPT_TRADDR, | 2010 | .required_opts = NVMF_OPT_TRADDR, |
2010 | .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | | 2011 | .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | |
2011 | NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO, | 2012 | NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO, |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b54748ad5f48..7282ea8d3b96 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -830,7 +830,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, | |||
830 | /* Don't accept keep-alive timeout for discovery controllers */ | 830 | /* Don't accept keep-alive timeout for discovery controllers */ |
831 | if (kato) { | 831 | if (kato) { |
832 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | 832 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
833 | goto out_free_sqs; | 833 | goto out_remove_ida; |
834 | } | 834 | } |
835 | 835 | ||
836 | /* | 836 | /* |
@@ -860,6 +860,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, | |||
860 | *ctrlp = ctrl; | 860 | *ctrlp = ctrl; |
861 | return 0; | 861 | return 0; |
862 | 862 | ||
863 | out_remove_ida: | ||
864 | ida_simple_remove(&cntlid_ida, ctrl->cntlid); | ||
863 | out_free_sqs: | 865 | out_free_sqs: |
864 | kfree(ctrl->sqs); | 866 | kfree(ctrl->sqs); |
865 | out_free_cqs: | 867 | out_free_cqs: |
@@ -877,21 +879,22 @@ static void nvmet_ctrl_free(struct kref *ref) | |||
877 | struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); | 879 | struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); |
878 | struct nvmet_subsys *subsys = ctrl->subsys; | 880 | struct nvmet_subsys *subsys = ctrl->subsys; |
879 | 881 | ||
880 | nvmet_stop_keep_alive_timer(ctrl); | ||
881 | |||
882 | mutex_lock(&subsys->lock); | 882 | mutex_lock(&subsys->lock); |
883 | list_del(&ctrl->subsys_entry); | 883 | list_del(&ctrl->subsys_entry); |
884 | mutex_unlock(&subsys->lock); | 884 | mutex_unlock(&subsys->lock); |
885 | 885 | ||
886 | nvmet_stop_keep_alive_timer(ctrl); | ||
887 | |||
886 | flush_work(&ctrl->async_event_work); | 888 | flush_work(&ctrl->async_event_work); |
887 | cancel_work_sync(&ctrl->fatal_err_work); | 889 | cancel_work_sync(&ctrl->fatal_err_work); |
888 | 890 | ||
889 | ida_simple_remove(&cntlid_ida, ctrl->cntlid); | 891 | ida_simple_remove(&cntlid_ida, ctrl->cntlid); |
890 | nvmet_subsys_put(subsys); | ||
891 | 892 | ||
892 | kfree(ctrl->sqs); | 893 | kfree(ctrl->sqs); |
893 | kfree(ctrl->cqs); | 894 | kfree(ctrl->cqs); |
894 | kfree(ctrl); | 895 | kfree(ctrl); |
896 | |||
897 | nvmet_subsys_put(subsys); | ||
895 | } | 898 | } |
896 | 899 | ||
897 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) | 900 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) |
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index db3bf6b8bf9e..19e9e42ae943 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c | |||
@@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) | |||
225 | goto out_ctrl_put; | 225 | goto out_ctrl_put; |
226 | } | 226 | } |
227 | 227 | ||
228 | pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); | 228 | pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); |
229 | 229 | ||
230 | out: | 230 | out: |
231 | kfree(d); | 231 | kfree(d); |
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 840d1a39de33..9b39a6cb1935 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
@@ -2490,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port) | |||
2490 | list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { | 2490 | list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { |
2491 | if ((tgtport->fc_target_port.node_name == traddr.nn) && | 2491 | if ((tgtport->fc_target_port.node_name == traddr.nn) && |
2492 | (tgtport->fc_target_port.port_name == traddr.pn)) { | 2492 | (tgtport->fc_target_port.port_name == traddr.pn)) { |
2493 | /* a FC port can only be 1 nvmet port id */ | 2493 | tgtport->port = port; |
2494 | if (!tgtport->port) { | 2494 | ret = 0; |
2495 | tgtport->port = port; | ||
2496 | port->priv = tgtport; | ||
2497 | nvmet_fc_tgtport_get(tgtport); | ||
2498 | ret = 0; | ||
2499 | } else | ||
2500 | ret = -EALREADY; | ||
2501 | break; | 2495 | break; |
2502 | } | 2496 | } |
2503 | } | 2497 | } |
@@ -2508,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port) | |||
2508 | static void | 2502 | static void |
2509 | nvmet_fc_remove_port(struct nvmet_port *port) | 2503 | nvmet_fc_remove_port(struct nvmet_port *port) |
2510 | { | 2504 | { |
2511 | struct nvmet_fc_tgtport *tgtport = port->priv; | 2505 | /* nothing to do */ |
2512 | unsigned long flags; | ||
2513 | bool matched = false; | ||
2514 | |||
2515 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); | ||
2516 | if (tgtport->port == port) { | ||
2517 | matched = true; | ||
2518 | tgtport->port = NULL; | ||
2519 | } | ||
2520 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); | ||
2521 | |||
2522 | if (matched) | ||
2523 | nvmet_fc_tgtport_put(tgtport); | ||
2524 | } | 2506 | } |
2525 | 2507 | ||
2526 | static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { | 2508 | static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 7b75d9de55ab..9f8a6726df91 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
@@ -204,6 +204,10 @@ struct fcloop_lport { | |||
204 | struct completion unreg_done; | 204 | struct completion unreg_done; |
205 | }; | 205 | }; |
206 | 206 | ||
207 | struct fcloop_lport_priv { | ||
208 | struct fcloop_lport *lport; | ||
209 | }; | ||
210 | |||
207 | struct fcloop_rport { | 211 | struct fcloop_rport { |
208 | struct nvme_fc_remote_port *remoteport; | 212 | struct nvme_fc_remote_port *remoteport; |
209 | struct nvmet_fc_target_port *targetport; | 213 | struct nvmet_fc_target_port *targetport; |
@@ -238,21 +242,32 @@ struct fcloop_lsreq { | |||
238 | int status; | 242 | int status; |
239 | }; | 243 | }; |
240 | 244 | ||
245 | enum { | ||
246 | INI_IO_START = 0, | ||
247 | INI_IO_ACTIVE = 1, | ||
248 | INI_IO_ABORTED = 2, | ||
249 | INI_IO_COMPLETED = 3, | ||
250 | }; | ||
251 | |||
241 | struct fcloop_fcpreq { | 252 | struct fcloop_fcpreq { |
242 | struct fcloop_tport *tport; | 253 | struct fcloop_tport *tport; |
243 | struct nvmefc_fcp_req *fcpreq; | 254 | struct nvmefc_fcp_req *fcpreq; |
244 | spinlock_t reqlock; | 255 | spinlock_t reqlock; |
245 | u16 status; | 256 | u16 status; |
257 | u32 inistate; | ||
246 | bool active; | 258 | bool active; |
247 | bool aborted; | 259 | bool aborted; |
248 | struct work_struct work; | 260 | struct kref ref; |
261 | struct work_struct fcp_rcv_work; | ||
262 | struct work_struct abort_rcv_work; | ||
263 | struct work_struct tio_done_work; | ||
249 | struct nvmefc_tgt_fcp_req tgt_fcp_req; | 264 | struct nvmefc_tgt_fcp_req tgt_fcp_req; |
250 | }; | 265 | }; |
251 | 266 | ||
252 | struct fcloop_ini_fcpreq { | 267 | struct fcloop_ini_fcpreq { |
253 | struct nvmefc_fcp_req *fcpreq; | 268 | struct nvmefc_fcp_req *fcpreq; |
254 | struct fcloop_fcpreq *tfcp_req; | 269 | struct fcloop_fcpreq *tfcp_req; |
255 | struct work_struct iniwork; | 270 | spinlock_t inilock; |
256 | }; | 271 | }; |
257 | 272 | ||
258 | static inline struct fcloop_lsreq * | 273 | static inline struct fcloop_lsreq * |
@@ -343,17 +358,122 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport, | |||
343 | return 0; | 358 | return 0; |
344 | } | 359 | } |
345 | 360 | ||
346 | /* | ||
347 | * FCP IO operation done by initiator abort. | ||
348 | * call back up initiator "done" flows. | ||
349 | */ | ||
350 | static void | 361 | static void |
351 | fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work) | 362 | fcloop_tfcp_req_free(struct kref *ref) |
352 | { | 363 | { |
353 | struct fcloop_ini_fcpreq *inireq = | 364 | struct fcloop_fcpreq *tfcp_req = |
354 | container_of(work, struct fcloop_ini_fcpreq, iniwork); | 365 | container_of(ref, struct fcloop_fcpreq, ref); |
366 | |||
367 | kfree(tfcp_req); | ||
368 | } | ||
369 | |||
370 | static void | ||
371 | fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) | ||
372 | { | ||
373 | kref_put(&tfcp_req->ref, fcloop_tfcp_req_free); | ||
374 | } | ||
375 | |||
376 | static int | ||
377 | fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) | ||
378 | { | ||
379 | return kref_get_unless_zero(&tfcp_req->ref); | ||
380 | } | ||
381 | |||
382 | static void | ||
383 | fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, | ||
384 | struct fcloop_fcpreq *tfcp_req, int status) | ||
385 | { | ||
386 | struct fcloop_ini_fcpreq *inireq = NULL; | ||
387 | |||
388 | if (fcpreq) { | ||
389 | inireq = fcpreq->private; | ||
390 | spin_lock(&inireq->inilock); | ||
391 | inireq->tfcp_req = NULL; | ||
392 | spin_unlock(&inireq->inilock); | ||
393 | |||
394 | fcpreq->status = status; | ||
395 | fcpreq->done(fcpreq); | ||
396 | } | ||
397 | |||
398 | /* release original io reference on tgt struct */ | ||
399 | fcloop_tfcp_req_put(tfcp_req); | ||
400 | } | ||
401 | |||
402 | static void | ||
403 | fcloop_fcp_recv_work(struct work_struct *work) | ||
404 | { | ||
405 | struct fcloop_fcpreq *tfcp_req = | ||
406 | container_of(work, struct fcloop_fcpreq, fcp_rcv_work); | ||
407 | struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; | ||
408 | int ret = 0; | ||
409 | bool aborted = false; | ||
410 | |||
411 | spin_lock(&tfcp_req->reqlock); | ||
412 | switch (tfcp_req->inistate) { | ||
413 | case INI_IO_START: | ||
414 | tfcp_req->inistate = INI_IO_ACTIVE; | ||
415 | break; | ||
416 | case INI_IO_ABORTED: | ||
417 | aborted = true; | ||
418 | break; | ||
419 | default: | ||
420 | spin_unlock(&tfcp_req->reqlock); | ||
421 | WARN_ON(1); | ||
422 | return; | ||
423 | } | ||
424 | spin_unlock(&tfcp_req->reqlock); | ||
425 | |||
426 | if (unlikely(aborted)) | ||
427 | ret = -ECANCELED; | ||
428 | else | ||
429 | ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, | ||
430 | &tfcp_req->tgt_fcp_req, | ||
431 | fcpreq->cmdaddr, fcpreq->cmdlen); | ||
432 | if (ret) | ||
433 | fcloop_call_host_done(fcpreq, tfcp_req, ret); | ||
434 | |||
435 | return; | ||
436 | } | ||
437 | |||
438 | static void | ||
439 | fcloop_fcp_abort_recv_work(struct work_struct *work) | ||
440 | { | ||
441 | struct fcloop_fcpreq *tfcp_req = | ||
442 | container_of(work, struct fcloop_fcpreq, abort_rcv_work); | ||
443 | struct nvmefc_fcp_req *fcpreq; | ||
444 | bool completed = false; | ||
445 | |||
446 | spin_lock(&tfcp_req->reqlock); | ||
447 | fcpreq = tfcp_req->fcpreq; | ||
448 | switch (tfcp_req->inistate) { | ||
449 | case INI_IO_ABORTED: | ||
450 | break; | ||
451 | case INI_IO_COMPLETED: | ||
452 | completed = true; | ||
453 | break; | ||
454 | default: | ||
455 | spin_unlock(&tfcp_req->reqlock); | ||
456 | WARN_ON(1); | ||
457 | return; | ||
458 | } | ||
459 | spin_unlock(&tfcp_req->reqlock); | ||
460 | |||
461 | if (unlikely(completed)) { | ||
462 | /* remove reference taken in original abort downcall */ | ||
463 | fcloop_tfcp_req_put(tfcp_req); | ||
464 | return; | ||
465 | } | ||
355 | 466 | ||
356 | inireq->fcpreq->done(inireq->fcpreq); | 467 | if (tfcp_req->tport->targetport) |
468 | nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, | ||
469 | &tfcp_req->tgt_fcp_req); | ||
470 | |||
471 | spin_lock(&tfcp_req->reqlock); | ||
472 | tfcp_req->fcpreq = NULL; | ||
473 | spin_unlock(&tfcp_req->reqlock); | ||
474 | |||
475 | fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); | ||
476 | /* call_host_done releases reference for abort downcall */ | ||
357 | } | 477 | } |
358 | 478 | ||
359 | /* | 479 | /* |
@@ -364,20 +484,15 @@ static void | |||
364 | fcloop_tgt_fcprqst_done_work(struct work_struct *work) | 484 | fcloop_tgt_fcprqst_done_work(struct work_struct *work) |
365 | { | 485 | { |
366 | struct fcloop_fcpreq *tfcp_req = | 486 | struct fcloop_fcpreq *tfcp_req = |
367 | container_of(work, struct fcloop_fcpreq, work); | 487 | container_of(work, struct fcloop_fcpreq, tio_done_work); |
368 | struct fcloop_tport *tport = tfcp_req->tport; | ||
369 | struct nvmefc_fcp_req *fcpreq; | 488 | struct nvmefc_fcp_req *fcpreq; |
370 | 489 | ||
371 | spin_lock(&tfcp_req->reqlock); | 490 | spin_lock(&tfcp_req->reqlock); |
372 | fcpreq = tfcp_req->fcpreq; | 491 | fcpreq = tfcp_req->fcpreq; |
492 | tfcp_req->inistate = INI_IO_COMPLETED; | ||
373 | spin_unlock(&tfcp_req->reqlock); | 493 | spin_unlock(&tfcp_req->reqlock); |
374 | 494 | ||
375 | if (tport->remoteport && fcpreq) { | 495 | fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); |
376 | fcpreq->status = tfcp_req->status; | ||
377 | fcpreq->done(fcpreq); | ||
378 | } | ||
379 | |||
380 | kfree(tfcp_req); | ||
381 | } | 496 | } |
382 | 497 | ||
383 | 498 | ||
@@ -390,7 +505,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport, | |||
390 | struct fcloop_rport *rport = remoteport->private; | 505 | struct fcloop_rport *rport = remoteport->private; |
391 | struct fcloop_ini_fcpreq *inireq = fcpreq->private; | 506 | struct fcloop_ini_fcpreq *inireq = fcpreq->private; |
392 | struct fcloop_fcpreq *tfcp_req; | 507 | struct fcloop_fcpreq *tfcp_req; |
393 | int ret = 0; | ||
394 | 508 | ||
395 | if (!rport->targetport) | 509 | if (!rport->targetport) |
396 | return -ECONNREFUSED; | 510 | return -ECONNREFUSED; |
@@ -401,16 +515,20 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport, | |||
401 | 515 | ||
402 | inireq->fcpreq = fcpreq; | 516 | inireq->fcpreq = fcpreq; |
403 | inireq->tfcp_req = tfcp_req; | 517 | inireq->tfcp_req = tfcp_req; |
404 | INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work); | 518 | spin_lock_init(&inireq->inilock); |
519 | |||
405 | tfcp_req->fcpreq = fcpreq; | 520 | tfcp_req->fcpreq = fcpreq; |
406 | tfcp_req->tport = rport->targetport->private; | 521 | tfcp_req->tport = rport->targetport->private; |
522 | tfcp_req->inistate = INI_IO_START; | ||
407 | spin_lock_init(&tfcp_req->reqlock); | 523 | spin_lock_init(&tfcp_req->reqlock); |
408 | INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work); | 524 | INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); |
525 | INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); | ||
526 | INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); | ||
527 | kref_init(&tfcp_req->ref); | ||
409 | 528 | ||
410 | ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req, | 529 | schedule_work(&tfcp_req->fcp_rcv_work); |
411 | fcpreq->cmdaddr, fcpreq->cmdlen); | ||
412 | 530 | ||
413 | return ret; | 531 | return 0; |
414 | } | 532 | } |
415 | 533 | ||
416 | static void | 534 | static void |
@@ -589,7 +707,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, | |||
589 | { | 707 | { |
590 | struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); | 708 | struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); |
591 | 709 | ||
592 | schedule_work(&tfcp_req->work); | 710 | schedule_work(&tfcp_req->tio_done_work); |
593 | } | 711 | } |
594 | 712 | ||
595 | static void | 713 | static void |
@@ -605,27 +723,47 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, | |||
605 | void *hw_queue_handle, | 723 | void *hw_queue_handle, |
606 | struct nvmefc_fcp_req *fcpreq) | 724 | struct nvmefc_fcp_req *fcpreq) |
607 | { | 725 | { |
608 | struct fcloop_rport *rport = remoteport->private; | ||
609 | struct fcloop_ini_fcpreq *inireq = fcpreq->private; | 726 | struct fcloop_ini_fcpreq *inireq = fcpreq->private; |
610 | struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req; | 727 | struct fcloop_fcpreq *tfcp_req; |
728 | bool abortio = true; | ||
729 | |||
730 | spin_lock(&inireq->inilock); | ||
731 | tfcp_req = inireq->tfcp_req; | ||
732 | if (tfcp_req) | ||
733 | fcloop_tfcp_req_get(tfcp_req); | ||
734 | spin_unlock(&inireq->inilock); | ||
611 | 735 | ||
612 | if (!tfcp_req) | 736 | if (!tfcp_req) |
613 | /* abort has already been called */ | 737 | /* abort has already been called */ |
614 | return; | 738 | return; |
615 | 739 | ||
616 | if (rport->targetport) | ||
617 | nvmet_fc_rcv_fcp_abort(rport->targetport, | ||
618 | &tfcp_req->tgt_fcp_req); | ||
619 | |||
620 | /* break initiator/target relationship for io */ | 740 | /* break initiator/target relationship for io */ |
621 | spin_lock(&tfcp_req->reqlock); | 741 | spin_lock(&tfcp_req->reqlock); |
622 | inireq->tfcp_req = NULL; | 742 | switch (tfcp_req->inistate) { |
623 | tfcp_req->fcpreq = NULL; | 743 | case INI_IO_START: |
744 | case INI_IO_ACTIVE: | ||
745 | tfcp_req->inistate = INI_IO_ABORTED; | ||
746 | break; | ||
747 | case INI_IO_COMPLETED: | ||
748 | abortio = false; | ||
749 | break; | ||
750 | default: | ||
751 | spin_unlock(&tfcp_req->reqlock); | ||
752 | WARN_ON(1); | ||
753 | return; | ||
754 | } | ||
624 | spin_unlock(&tfcp_req->reqlock); | 755 | spin_unlock(&tfcp_req->reqlock); |
625 | 756 | ||
626 | /* post the aborted io completion */ | 757 | if (abortio) |
627 | fcpreq->status = -ECANCELED; | 758 | /* leave the reference while the work item is scheduled */ |
628 | schedule_work(&inireq->iniwork); | 759 | WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work)); |
760 | else { | ||
761 | /* | ||
762 | * as the io has already had the done callback made, | ||
763 | * nothing more to do. So release the reference taken above | ||
764 | */ | ||
765 | fcloop_tfcp_req_put(tfcp_req); | ||
766 | } | ||
629 | } | 767 | } |
630 | 768 | ||
631 | static void | 769 | static void |
@@ -657,7 +795,8 @@ fcloop_nport_get(struct fcloop_nport *nport) | |||
657 | static void | 795 | static void |
658 | fcloop_localport_delete(struct nvme_fc_local_port *localport) | 796 | fcloop_localport_delete(struct nvme_fc_local_port *localport) |
659 | { | 797 | { |
660 | struct fcloop_lport *lport = localport->private; | 798 | struct fcloop_lport_priv *lport_priv = localport->private; |
799 | struct fcloop_lport *lport = lport_priv->lport; | ||
661 | 800 | ||
662 | /* release any threads waiting for the unreg to complete */ | 801 | /* release any threads waiting for the unreg to complete */ |
663 | complete(&lport->unreg_done); | 802 | complete(&lport->unreg_done); |
@@ -697,7 +836,7 @@ static struct nvme_fc_port_template fctemplate = { | |||
697 | .max_dif_sgl_segments = FCLOOP_SGL_SEGS, | 836 | .max_dif_sgl_segments = FCLOOP_SGL_SEGS, |
698 | .dma_boundary = FCLOOP_DMABOUND_4G, | 837 | .dma_boundary = FCLOOP_DMABOUND_4G, |
699 | /* sizes of additional private data for data structures */ | 838 | /* sizes of additional private data for data structures */ |
700 | .local_priv_sz = sizeof(struct fcloop_lport), | 839 | .local_priv_sz = sizeof(struct fcloop_lport_priv), |
701 | .remote_priv_sz = sizeof(struct fcloop_rport), | 840 | .remote_priv_sz = sizeof(struct fcloop_rport), |
702 | .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), | 841 | .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), |
703 | .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), | 842 | .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), |
@@ -714,8 +853,7 @@ static struct nvmet_fc_target_template tgttemplate = { | |||
714 | .max_dif_sgl_segments = FCLOOP_SGL_SEGS, | 853 | .max_dif_sgl_segments = FCLOOP_SGL_SEGS, |
715 | .dma_boundary = FCLOOP_DMABOUND_4G, | 854 | .dma_boundary = FCLOOP_DMABOUND_4G, |
716 | /* optional features */ | 855 | /* optional features */ |
717 | .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | | 856 | .target_features = 0, |
718 | NVMET_FCTGTFEAT_OPDONE_IN_ISR, | ||
719 | /* sizes of additional private data for data structures */ | 857 | /* sizes of additional private data for data structures */ |
720 | .target_priv_sz = sizeof(struct fcloop_tport), | 858 | .target_priv_sz = sizeof(struct fcloop_tport), |
721 | }; | 859 | }; |
@@ -728,11 +866,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, | |||
728 | struct fcloop_ctrl_options *opts; | 866 | struct fcloop_ctrl_options *opts; |
729 | struct nvme_fc_local_port *localport; | 867 | struct nvme_fc_local_port *localport; |
730 | struct fcloop_lport *lport; | 868 | struct fcloop_lport *lport; |
731 | int ret; | 869 | struct fcloop_lport_priv *lport_priv; |
870 | unsigned long flags; | ||
871 | int ret = -ENOMEM; | ||
872 | |||
873 | lport = kzalloc(sizeof(*lport), GFP_KERNEL); | ||
874 | if (!lport) | ||
875 | return -ENOMEM; | ||
732 | 876 | ||
733 | opts = kzalloc(sizeof(*opts), GFP_KERNEL); | 877 | opts = kzalloc(sizeof(*opts), GFP_KERNEL); |
734 | if (!opts) | 878 | if (!opts) |
735 | return -ENOMEM; | 879 | goto out_free_lport; |
736 | 880 | ||
737 | ret = fcloop_parse_options(opts, buf); | 881 | ret = fcloop_parse_options(opts, buf); |
738 | if (ret) | 882 | if (ret) |
@@ -752,23 +896,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, | |||
752 | 896 | ||
753 | ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); | 897 | ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); |
754 | if (!ret) { | 898 | if (!ret) { |
755 | unsigned long flags; | ||
756 | |||
757 | /* success */ | 899 | /* success */ |
758 | lport = localport->private; | 900 | lport_priv = localport->private; |
901 | lport_priv->lport = lport; | ||
902 | |||
759 | lport->localport = localport; | 903 | lport->localport = localport; |
760 | INIT_LIST_HEAD(&lport->lport_list); | 904 | INIT_LIST_HEAD(&lport->lport_list); |
761 | 905 | ||
762 | spin_lock_irqsave(&fcloop_lock, flags); | 906 | spin_lock_irqsave(&fcloop_lock, flags); |
763 | list_add_tail(&lport->lport_list, &fcloop_lports); | 907 | list_add_tail(&lport->lport_list, &fcloop_lports); |
764 | spin_unlock_irqrestore(&fcloop_lock, flags); | 908 | spin_unlock_irqrestore(&fcloop_lock, flags); |
765 | |||
766 | /* mark all of the input buffer consumed */ | ||
767 | ret = count; | ||
768 | } | 909 | } |
769 | 910 | ||
770 | out_free_opts: | 911 | out_free_opts: |
771 | kfree(opts); | 912 | kfree(opts); |
913 | out_free_lport: | ||
914 | /* free only if we're going to fail */ | ||
915 | if (ret) | ||
916 | kfree(lport); | ||
917 | |||
772 | return ret ? ret : count; | 918 | return ret ? ret : count; |
773 | } | 919 | } |
774 | 920 | ||
@@ -790,6 +936,8 @@ __wait_localport_unreg(struct fcloop_lport *lport) | |||
790 | 936 | ||
791 | wait_for_completion(&lport->unreg_done); | 937 | wait_for_completion(&lport->unreg_done); |
792 | 938 | ||
939 | kfree(lport); | ||
940 | |||
793 | return ret; | 941 | return ret; |
794 | } | 942 | } |
795 | 943 | ||
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 1e21b286f299..fdfcc961029f 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = { | |||
686 | 686 | ||
687 | static struct nvmf_transport_ops nvme_loop_transport = { | 687 | static struct nvmf_transport_ops nvme_loop_transport = { |
688 | .name = "loop", | 688 | .name = "loop", |
689 | .module = THIS_MODULE, | ||
689 | .create_ctrl = nvme_loop_create_ctrl, | 690 | .create_ctrl = nvme_loop_create_ctrl, |
690 | }; | 691 | }; |
691 | 692 | ||
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 0e4c15754c58..978e169c11bf 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -921,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) | |||
921 | 921 | ||
922 | static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) | 922 | static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) |
923 | { | 923 | { |
924 | pr_info("freeing queue %d\n", queue->idx); | 924 | pr_debug("freeing queue %d\n", queue->idx); |
925 | 925 | ||
926 | nvmet_sq_destroy(&queue->nvme_sq); | 926 | nvmet_sq_destroy(&queue->nvme_sq); |
927 | 927 | ||
@@ -1503,25 +1503,9 @@ err_ib_client: | |||
1503 | 1503 | ||
1504 | static void __exit nvmet_rdma_exit(void) | 1504 | static void __exit nvmet_rdma_exit(void) |
1505 | { | 1505 | { |
1506 | struct nvmet_rdma_queue *queue; | ||
1507 | |||
1508 | nvmet_unregister_transport(&nvmet_rdma_ops); | 1506 | nvmet_unregister_transport(&nvmet_rdma_ops); |
1509 | |||
1510 | flush_scheduled_work(); | ||
1511 | |||
1512 | mutex_lock(&nvmet_rdma_queue_mutex); | ||
1513 | while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list, | ||
1514 | struct nvmet_rdma_queue, queue_list))) { | ||
1515 | list_del_init(&queue->queue_list); | ||
1516 | |||
1517 | mutex_unlock(&nvmet_rdma_queue_mutex); | ||
1518 | __nvmet_rdma_queue_disconnect(queue); | ||
1519 | mutex_lock(&nvmet_rdma_queue_mutex); | ||
1520 | } | ||
1521 | mutex_unlock(&nvmet_rdma_queue_mutex); | ||
1522 | |||
1523 | flush_scheduled_work(); | ||
1524 | ib_unregister_client(&nvmet_rdma_ib_client); | 1507 | ib_unregister_client(&nvmet_rdma_ib_client); |
1508 | WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); | ||
1525 | ida_destroy(&nvmet_rdma_queue_ida); | 1509 | ida_destroy(&nvmet_rdma_queue_ida); |
1526 | } | 1510 | } |
1527 | 1511 | ||