diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-07-22 16:21:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-07-22 16:21:45 -0400 |
commit | 744130842193795e13ad4efcbf17f3dfb1309311 (patch) | |
tree | 7f1d37913bb28ceef44496e4a6aac1f344d6e3c8 /drivers | |
parent | 165ea0d1c2286f550efbf14dc3528267af088f08 (diff) | |
parent | 9b382768135ee3ff282f828c906574a8478e036b (diff) |
Merge tag 'nvme-for-4.18' of git://git.infradead.org/nvme
Pull NVMe fixes from Christoph Hellwig:
- fix a regression in 4.18 that causes a memory leak on probe failure
(Keith Bush)
- fix a deadlock in the passthrough ioctl code (Scott Bauer)
- don't enable AENs if not supported (Weiping Zhang)
- fix an old regression in metadata handling in the passthrough ioctl
code (Roland Dreier)
* tag 'nvme-for-4.18' of git://git.infradead.org/nvme:
nvme: fix handling of metadata_len for NVME_IOCTL_IO_CMD
nvme: don't enable AEN if not supported
nvme: ensure forward progress during Admin passthru
nvme-pci: fix memory leak on probe failure
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/nvme/host/core.c | 63 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 12 |
2 files changed, 41 insertions, 34 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 46df030b2c3f..bf65501e6ed6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class; | |||
100 | static void nvme_ns_remove(struct nvme_ns *ns); | 100 | static void nvme_ns_remove(struct nvme_ns *ns); |
101 | static int nvme_revalidate_disk(struct gendisk *disk); | 101 | static int nvme_revalidate_disk(struct gendisk *disk); |
102 | static void nvme_put_subsystem(struct nvme_subsystem *subsys); | 102 | static void nvme_put_subsystem(struct nvme_subsystem *subsys); |
103 | static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, | ||
104 | unsigned nsid); | ||
105 | |||
106 | static void nvme_set_queue_dying(struct nvme_ns *ns) | ||
107 | { | ||
108 | /* | ||
109 | * Revalidating a dead namespace sets capacity to 0. This will end | ||
110 | * buffered writers dirtying pages that can't be synced. | ||
111 | */ | ||
112 | if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) | ||
113 | return; | ||
114 | revalidate_disk(ns->disk); | ||
115 | blk_set_queue_dying(ns->queue); | ||
116 | /* Forcibly unquiesce queues to avoid blocking dispatch */ | ||
117 | blk_mq_unquiesce_queue(ns->queue); | ||
118 | } | ||
103 | 119 | ||
104 | static void nvme_queue_scan(struct nvme_ctrl *ctrl) | 120 | static void nvme_queue_scan(struct nvme_ctrl *ctrl) |
105 | { | 121 | { |
@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count); | |||
1044 | 1060 | ||
1045 | static void nvme_enable_aen(struct nvme_ctrl *ctrl) | 1061 | static void nvme_enable_aen(struct nvme_ctrl *ctrl) |
1046 | { | 1062 | { |
1047 | u32 result; | 1063 | u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; |
1048 | int status; | 1064 | int status; |
1049 | 1065 | ||
1050 | status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, | 1066 | if (!supported_aens) |
1051 | ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result); | 1067 | return; |
1068 | |||
1069 | status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, | ||
1070 | NULL, 0, &result); | ||
1052 | if (status) | 1071 | if (status) |
1053 | dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", | 1072 | dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", |
1054 | ctrl->oaes & NVME_AEN_SUPPORTED); | 1073 | supported_aens); |
1055 | } | 1074 | } |
1056 | 1075 | ||
1057 | static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | 1076 | static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) |
@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | |||
1151 | 1170 | ||
1152 | static void nvme_update_formats(struct nvme_ctrl *ctrl) | 1171 | static void nvme_update_formats(struct nvme_ctrl *ctrl) |
1153 | { | 1172 | { |
1154 | struct nvme_ns *ns, *next; | 1173 | struct nvme_ns *ns; |
1155 | LIST_HEAD(rm_list); | ||
1156 | 1174 | ||
1157 | down_write(&ctrl->namespaces_rwsem); | 1175 | down_read(&ctrl->namespaces_rwsem); |
1158 | list_for_each_entry(ns, &ctrl->namespaces, list) { | 1176 | list_for_each_entry(ns, &ctrl->namespaces, list) |
1159 | if (ns->disk && nvme_revalidate_disk(ns->disk)) { | 1177 | if (ns->disk && nvme_revalidate_disk(ns->disk)) |
1160 | list_move_tail(&ns->list, &rm_list); | 1178 | nvme_set_queue_dying(ns); |
1161 | } | 1179 | up_read(&ctrl->namespaces_rwsem); |
1162 | } | ||
1163 | up_write(&ctrl->namespaces_rwsem); | ||
1164 | 1180 | ||
1165 | list_for_each_entry_safe(ns, next, &rm_list, list) | 1181 | nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); |
1166 | nvme_ns_remove(ns); | ||
1167 | } | 1182 | } |
1168 | 1183 | ||
1169 | static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) | 1184 | static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) |
@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | |||
1218 | effects = nvme_passthru_start(ctrl, ns, cmd.opcode); | 1233 | effects = nvme_passthru_start(ctrl, ns, cmd.opcode); |
1219 | status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, | 1234 | status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, |
1220 | (void __user *)(uintptr_t)cmd.addr, cmd.data_len, | 1235 | (void __user *)(uintptr_t)cmd.addr, cmd.data_len, |
1221 | (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, | 1236 | (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, |
1222 | 0, &cmd.result, timeout); | 1237 | 0, &cmd.result, timeout); |
1223 | nvme_passthru_end(ctrl, effects); | 1238 | nvme_passthru_end(ctrl, effects); |
1224 | 1239 | ||
@@ -3138,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, | |||
3138 | 3153 | ||
3139 | down_write(&ctrl->namespaces_rwsem); | 3154 | down_write(&ctrl->namespaces_rwsem); |
3140 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { | 3155 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { |
3141 | if (ns->head->ns_id > nsid) | 3156 | if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) |
3142 | list_move_tail(&ns->list, &rm_list); | 3157 | list_move_tail(&ns->list, &rm_list); |
3143 | } | 3158 | } |
3144 | up_write(&ctrl->namespaces_rwsem); | 3159 | up_write(&ctrl->namespaces_rwsem); |
@@ -3542,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) | |||
3542 | if (ctrl->admin_q) | 3557 | if (ctrl->admin_q) |
3543 | blk_mq_unquiesce_queue(ctrl->admin_q); | 3558 | blk_mq_unquiesce_queue(ctrl->admin_q); |
3544 | 3559 | ||
3545 | list_for_each_entry(ns, &ctrl->namespaces, list) { | 3560 | list_for_each_entry(ns, &ctrl->namespaces, list) |
3546 | /* | 3561 | nvme_set_queue_dying(ns); |
3547 | * Revalidating a dead namespace sets capacity to 0. This will | ||
3548 | * end buffered writers dirtying pages that can't be synced. | ||
3549 | */ | ||
3550 | if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) | ||
3551 | continue; | ||
3552 | revalidate_disk(ns->disk); | ||
3553 | blk_set_queue_dying(ns->queue); | ||
3554 | 3562 | ||
3555 | /* Forcibly unquiesce queues to avoid blocking dispatch */ | ||
3556 | blk_mq_unquiesce_queue(ns->queue); | ||
3557 | } | ||
3558 | up_read(&ctrl->namespaces_rwsem); | 3563 | up_read(&ctrl->namespaces_rwsem); |
3559 | } | 3564 | } |
3560 | EXPORT_SYMBOL_GPL(nvme_kill_queues); | 3565 | EXPORT_SYMBOL_GPL(nvme_kill_queues); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ba943f211687..ddd441b1516a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -2556,11 +2556,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2556 | 2556 | ||
2557 | quirks |= check_vendor_combination_bug(pdev); | 2557 | quirks |= check_vendor_combination_bug(pdev); |
2558 | 2558 | ||
2559 | result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, | ||
2560 | quirks); | ||
2561 | if (result) | ||
2562 | goto release_pools; | ||
2563 | |||
2564 | /* | 2559 | /* |
2565 | * Double check that our mempool alloc size will cover the biggest | 2560 | * Double check that our mempool alloc size will cover the biggest |
2566 | * command we support. | 2561 | * command we support. |
@@ -2578,6 +2573,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2578 | goto release_pools; | 2573 | goto release_pools; |
2579 | } | 2574 | } |
2580 | 2575 | ||
2576 | result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, | ||
2577 | quirks); | ||
2578 | if (result) | ||
2579 | goto release_mempool; | ||
2580 | |||
2581 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); | 2581 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
2582 | 2582 | ||
2583 | nvme_get_ctrl(&dev->ctrl); | 2583 | nvme_get_ctrl(&dev->ctrl); |
@@ -2585,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2585 | 2585 | ||
2586 | return 0; | 2586 | return 0; |
2587 | 2587 | ||
2588 | release_mempool: | ||
2589 | mempool_destroy(dev->iod_mempool); | ||
2588 | release_pools: | 2590 | release_pools: |
2589 | nvme_release_prp_pools(dev); | 2591 | nvme_release_prp_pools(dev); |
2590 | unmap: | 2592 | unmap: |