aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r--drivers/block/nvme-core.c177
1 files changed, 127 insertions, 50 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b1d5d8797315..d826bf3e62c8 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -106,7 +106,7 @@ struct nvme_queue {
106 dma_addr_t cq_dma_addr; 106 dma_addr_t cq_dma_addr;
107 u32 __iomem *q_db; 107 u32 __iomem *q_db;
108 u16 q_depth; 108 u16 q_depth;
109 u16 cq_vector; 109 s16 cq_vector;
110 u16 sq_head; 110 u16 sq_head;
111 u16 sq_tail; 111 u16 sq_tail;
112 u16 cq_head; 112 u16 cq_head;
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
215 cmd->fn = handler; 215 cmd->fn = handler;
216 cmd->ctx = ctx; 216 cmd->ctx = ctx;
217 cmd->aborted = 0; 217 cmd->aborted = 0;
218 blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
218} 219}
219 220
220/* Special values must be less than 0x1000 */ 221/* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
431 if (unlikely(status)) { 432 if (unlikely(status)) {
432 if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 433 if (!(status & NVME_SC_DNR || blk_noretry_request(req))
433 && (jiffies - req->start_time) < req->timeout) { 434 && (jiffies - req->start_time) < req->timeout) {
435 unsigned long flags;
436
434 blk_mq_requeue_request(req); 437 blk_mq_requeue_request(req);
435 blk_mq_kick_requeue_list(req->q); 438 spin_lock_irqsave(req->q->queue_lock, flags);
439 if (!blk_queue_stopped(req->q))
440 blk_mq_kick_requeue_list(req->q);
441 spin_unlock_irqrestore(req->q->queue_lock, flags);
436 return; 442 return;
437 } 443 }
438 req->errors = nvme_error_status(status); 444 req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
664 } 670 }
665 } 671 }
666 672
667 blk_mq_start_request(req);
668
669 nvme_set_info(cmd, iod, req_completion); 673 nvme_set_info(cmd, iod, req_completion);
670 spin_lock_irq(&nvmeq->q_lock); 674 spin_lock_irq(&nvmeq->q_lock);
671 if (req->cmd_flags & REQ_DISCARD) 675 if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
835 if (IS_ERR(req)) 839 if (IS_ERR(req))
836 return PTR_ERR(req); 840 return PTR_ERR(req);
837 841
842 req->cmd_flags |= REQ_NO_TIMEOUT;
838 cmd_info = blk_mq_rq_to_pdu(req); 843 cmd_info = blk_mq_rq_to_pdu(req);
839 nvme_set_info(cmd_info, req, async_req_completion); 844 nvme_set_info(cmd_info, req, async_req_completion);
840 845
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
1016 struct nvme_command cmd; 1021 struct nvme_command cmd;
1017 1022
1018 if (!nvmeq->qid || cmd_rq->aborted) { 1023 if (!nvmeq->qid || cmd_rq->aborted) {
1024 unsigned long flags;
1025
1026 spin_lock_irqsave(&dev_list_lock, flags);
1019 if (work_busy(&dev->reset_work)) 1027 if (work_busy(&dev->reset_work))
1020 return; 1028 goto out;
1021 list_del_init(&dev->node); 1029 list_del_init(&dev->node);
1022 dev_warn(&dev->pci_dev->dev, 1030 dev_warn(&dev->pci_dev->dev,
1023 "I/O %d QID %d timeout, reset controller\n", 1031 "I/O %d QID %d timeout, reset controller\n",
1024 req->tag, nvmeq->qid); 1032 req->tag, nvmeq->qid);
1025 dev->reset_workfn = nvme_reset_failed_dev; 1033 dev->reset_workfn = nvme_reset_failed_dev;
1026 queue_work(nvme_workq, &dev->reset_work); 1034 queue_work(nvme_workq, &dev->reset_work);
1035 out:
1036 spin_unlock_irqrestore(&dev_list_lock, flags);
1027 return; 1037 return;
1028 } 1038 }
1029 1039
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
1064 void *ctx; 1074 void *ctx;
1065 nvme_completion_fn fn; 1075 nvme_completion_fn fn;
1066 struct nvme_cmd_info *cmd; 1076 struct nvme_cmd_info *cmd;
1067 static struct nvme_completion cqe = { 1077 struct nvme_completion cqe;
1068 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), 1078
1069 }; 1079 if (!blk_mq_request_started(req))
1080 return;
1070 1081
1071 cmd = blk_mq_rq_to_pdu(req); 1082 cmd = blk_mq_rq_to_pdu(req);
1072 1083
1073 if (cmd->ctx == CMD_CTX_CANCELLED) 1084 if (cmd->ctx == CMD_CTX_CANCELLED)
1074 return; 1085 return;
1075 1086
1087 if (blk_queue_dying(req->q))
1088 cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
1089 else
1090 cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1091
1092
1076 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", 1093 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
1077 req->tag, nvmeq->qid); 1094 req->tag, nvmeq->qid);
1078 ctx = cancel_cmd_info(cmd, &fn); 1095 ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1084 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 1101 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
1085 struct nvme_queue *nvmeq = cmd->nvmeq; 1102 struct nvme_queue *nvmeq = cmd->nvmeq;
1086 1103
1087 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1088 nvmeq->qid);
1089 if (nvmeq->dev->initialized)
1090 nvme_abort_req(req);
1091
1092 /* 1104 /*
1093 * The aborted req will be completed on receiving the abort req. 1105 * The aborted req will be completed on receiving the abort req.
1094 * We enable the timer again. If hit twice, it'll cause a device reset, 1106 * We enable the timer again. If hit twice, it'll cause a device reset,
1095 * as the device then is in a faulty state. 1107 * as the device then is in a faulty state.
1096 */ 1108 */
1097 return BLK_EH_RESET_TIMER; 1109 int ret = BLK_EH_RESET_TIMER;
1110
1111 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1112 nvmeq->qid);
1113
1114 spin_lock_irq(&nvmeq->q_lock);
1115 if (!nvmeq->dev->initialized) {
1116 /*
1117 * Force cancelled command frees the request, which requires we
1118 * return BLK_EH_NOT_HANDLED.
1119 */
1120 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
1121 ret = BLK_EH_NOT_HANDLED;
1122 } else
1123 nvme_abort_req(req);
1124 spin_unlock_irq(&nvmeq->q_lock);
1125
1126 return ret;
1098} 1127}
1099 1128
1100static void nvme_free_queue(struct nvme_queue *nvmeq) 1129static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1131 */ 1160 */
1132static int nvme_suspend_queue(struct nvme_queue *nvmeq) 1161static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1133{ 1162{
1134 int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 1163 int vector;
1135 1164
1136 spin_lock_irq(&nvmeq->q_lock); 1165 spin_lock_irq(&nvmeq->q_lock);
1166 if (nvmeq->cq_vector == -1) {
1167 spin_unlock_irq(&nvmeq->q_lock);
1168 return 1;
1169 }
1170 vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
1137 nvmeq->dev->online_queues--; 1171 nvmeq->dev->online_queues--;
1172 nvmeq->cq_vector = -1;
1138 spin_unlock_irq(&nvmeq->q_lock); 1173 spin_unlock_irq(&nvmeq->q_lock);
1139 1174
1140 irq_set_affinity_hint(vector, NULL); 1175 irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1169 adapter_delete_sq(dev, qid); 1204 adapter_delete_sq(dev, qid);
1170 adapter_delete_cq(dev, qid); 1205 adapter_delete_cq(dev, qid);
1171 } 1206 }
1207 if (!qid && dev->admin_q)
1208 blk_mq_freeze_queue_start(dev->admin_q);
1172 nvme_clear_queue(nvmeq); 1209 nvme_clear_queue(nvmeq);
1173} 1210}
1174 1211
1175static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1212static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1176 int depth, int vector) 1213 int depth)
1177{ 1214{
1178 struct device *dmadev = &dev->pci_dev->dev; 1215 struct device *dmadev = &dev->pci_dev->dev;
1179 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); 1216 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1199 nvmeq->cq_phase = 1; 1236 nvmeq->cq_phase = 1;
1200 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1237 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1201 nvmeq->q_depth = depth; 1238 nvmeq->q_depth = depth;
1202 nvmeq->cq_vector = vector;
1203 nvmeq->qid = qid; 1239 nvmeq->qid = qid;
1204 dev->queue_count++; 1240 dev->queue_count++;
1205 dev->queues[qid] = nvmeq; 1241 dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1244 struct nvme_dev *dev = nvmeq->dev; 1280 struct nvme_dev *dev = nvmeq->dev;
1245 int result; 1281 int result;
1246 1282
1283 nvmeq->cq_vector = qid - 1;
1247 result = adapter_alloc_cq(dev, qid, nvmeq); 1284 result = adapter_alloc_cq(dev, qid, nvmeq);
1248 if (result < 0) 1285 if (result < 0)
1249 return result; 1286 return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
1355 .timeout = nvme_timeout, 1392 .timeout = nvme_timeout,
1356}; 1393};
1357 1394
1395static void nvme_dev_remove_admin(struct nvme_dev *dev)
1396{
1397 if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
1398 blk_cleanup_queue(dev->admin_q);
1399 blk_mq_free_tag_set(&dev->admin_tagset);
1400 }
1401}
1402
1358static int nvme_alloc_admin_tags(struct nvme_dev *dev) 1403static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1359{ 1404{
1360 if (!dev->admin_q) { 1405 if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1370 return -ENOMEM; 1415 return -ENOMEM;
1371 1416
1372 dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); 1417 dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
1373 if (!dev->admin_q) { 1418 if (IS_ERR(dev->admin_q)) {
1374 blk_mq_free_tag_set(&dev->admin_tagset); 1419 blk_mq_free_tag_set(&dev->admin_tagset);
1375 return -ENOMEM; 1420 return -ENOMEM;
1376 } 1421 }
1377 } 1422 if (!blk_get_queue(dev->admin_q)) {
1423 nvme_dev_remove_admin(dev);
1424 return -ENODEV;
1425 }
1426 } else
1427 blk_mq_unfreeze_queue(dev->admin_q);
1378 1428
1379 return 0; 1429 return 0;
1380} 1430}
1381 1431
1382static void nvme_free_admin_tags(struct nvme_dev *dev)
1383{
1384 if (dev->admin_q)
1385 blk_mq_free_tag_set(&dev->admin_tagset);
1386}
1387
1388static int nvme_configure_admin_queue(struct nvme_dev *dev) 1432static int nvme_configure_admin_queue(struct nvme_dev *dev)
1389{ 1433{
1390 int result; 1434 int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1416 1460
1417 nvmeq = dev->queues[0]; 1461 nvmeq = dev->queues[0];
1418 if (!nvmeq) { 1462 if (!nvmeq) {
1419 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); 1463 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1420 if (!nvmeq) 1464 if (!nvmeq)
1421 return -ENOMEM; 1465 return -ENOMEM;
1422 } 1466 }
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1439 if (result) 1483 if (result)
1440 goto free_nvmeq; 1484 goto free_nvmeq;
1441 1485
1442 result = nvme_alloc_admin_tags(dev); 1486 nvmeq->cq_vector = 0;
1443 if (result)
1444 goto free_nvmeq;
1445
1446 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1487 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1447 if (result) 1488 if (result)
1448 goto free_tags; 1489 goto free_nvmeq;
1449 1490
1450 return result; 1491 return result;
1451 1492
1452 free_tags:
1453 nvme_free_admin_tags(dev);
1454 free_nvmeq: 1493 free_nvmeq:
1455 nvme_free_queues(dev, 0); 1494 nvme_free_queues(dev, 0);
1456 return result; 1495 return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
1944 unsigned i; 1983 unsigned i;
1945 1984
1946 for (i = dev->queue_count; i <= dev->max_qid; i++) 1985 for (i = dev->queue_count; i <= dev->max_qid; i++)
1947 if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) 1986 if (!nvme_alloc_queue(dev, i, dev->q_depth))
1948 break; 1987 break;
1949 1988
1950 for (i = dev->online_queues; i <= dev->queue_count - 1; i++) 1989 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
2235 break; 2274 break;
2236 if (!schedule_timeout(ADMIN_TIMEOUT) || 2275 if (!schedule_timeout(ADMIN_TIMEOUT) ||
2237 fatal_signal_pending(current)) { 2276 fatal_signal_pending(current)) {
2277 /*
2278 * Disable the controller first since we can't trust it
2279 * at this point, but leave the admin queue enabled
2280 * until all queue deletion requests are flushed.
2281 * FIXME: This may take a while if there are more h/w
2282 * queues than admin tags.
2283 */
2238 set_current_state(TASK_RUNNING); 2284 set_current_state(TASK_RUNNING);
2239
2240 nvme_disable_ctrl(dev, readq(&dev->bar->cap)); 2285 nvme_disable_ctrl(dev, readq(&dev->bar->cap));
2241 nvme_disable_queue(dev, 0); 2286 nvme_clear_queue(dev->queues[0]);
2242
2243 send_sig(SIGKILL, dq->worker->task, 1);
2244 flush_kthread_worker(dq->worker); 2287 flush_kthread_worker(dq->worker);
2288 nvme_disable_queue(dev, 0);
2245 return; 2289 return;
2246 } 2290 }
2247 } 2291 }
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
2318{ 2362{
2319 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 2363 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2320 cmdinfo.work); 2364 cmdinfo.work);
2321 allow_signal(SIGKILL);
2322 if (nvme_delete_sq(nvmeq)) 2365 if (nvme_delete_sq(nvmeq))
2323 nvme_del_queue_end(nvmeq); 2366 nvme_del_queue_end(nvmeq);
2324} 2367}
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
2376 kthread_stop(tmp); 2419 kthread_stop(tmp);
2377} 2420}
2378 2421
2422static void nvme_freeze_queues(struct nvme_dev *dev)
2423{
2424 struct nvme_ns *ns;
2425
2426 list_for_each_entry(ns, &dev->namespaces, list) {
2427 blk_mq_freeze_queue_start(ns->queue);
2428
2429 spin_lock(ns->queue->queue_lock);
2430 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
2431 spin_unlock(ns->queue->queue_lock);
2432
2433 blk_mq_cancel_requeue_work(ns->queue);
2434 blk_mq_stop_hw_queues(ns->queue);
2435 }
2436}
2437
2438static void nvme_unfreeze_queues(struct nvme_dev *dev)
2439{
2440 struct nvme_ns *ns;
2441
2442 list_for_each_entry(ns, &dev->namespaces, list) {
2443 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
2444 blk_mq_unfreeze_queue(ns->queue);
2445 blk_mq_start_stopped_hw_queues(ns->queue, true);
2446 blk_mq_kick_requeue_list(ns->queue);
2447 }
2448}
2449
2379static void nvme_dev_shutdown(struct nvme_dev *dev) 2450static void nvme_dev_shutdown(struct nvme_dev *dev)
2380{ 2451{
2381 int i; 2452 int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2384 dev->initialized = 0; 2455 dev->initialized = 0;
2385 nvme_dev_list_remove(dev); 2456 nvme_dev_list_remove(dev);
2386 2457
2387 if (dev->bar) 2458 if (dev->bar) {
2459 nvme_freeze_queues(dev);
2388 csts = readl(&dev->bar->csts); 2460 csts = readl(&dev->bar->csts);
2461 }
2389 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 2462 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
2390 for (i = dev->queue_count - 1; i >= 0; i--) { 2463 for (i = dev->queue_count - 1; i >= 0; i--) {
2391 struct nvme_queue *nvmeq = dev->queues[i]; 2464 struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2400 nvme_dev_unmap(dev); 2473 nvme_dev_unmap(dev);
2401} 2474}
2402 2475
2403static void nvme_dev_remove_admin(struct nvme_dev *dev)
2404{
2405 if (dev->admin_q && !blk_queue_dying(dev->admin_q))
2406 blk_cleanup_queue(dev->admin_q);
2407}
2408
2409static void nvme_dev_remove(struct nvme_dev *dev) 2476static void nvme_dev_remove(struct nvme_dev *dev)
2410{ 2477{
2411 struct nvme_ns *ns; 2478 struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2413 list_for_each_entry(ns, &dev->namespaces, list) { 2480 list_for_each_entry(ns, &dev->namespaces, list) {
2414 if (ns->disk->flags & GENHD_FL_UP) 2481 if (ns->disk->flags & GENHD_FL_UP)
2415 del_gendisk(ns->disk); 2482 del_gendisk(ns->disk);
2416 if (!blk_queue_dying(ns->queue)) 2483 if (!blk_queue_dying(ns->queue)) {
2484 blk_mq_abort_requeue_list(ns->queue);
2417 blk_cleanup_queue(ns->queue); 2485 blk_cleanup_queue(ns->queue);
2486 }
2418 } 2487 }
2419} 2488}
2420 2489
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
2495 nvme_free_namespaces(dev); 2564 nvme_free_namespaces(dev);
2496 nvme_release_instance(dev); 2565 nvme_release_instance(dev);
2497 blk_mq_free_tag_set(&dev->tagset); 2566 blk_mq_free_tag_set(&dev->tagset);
2567 blk_put_queue(dev->admin_q);
2498 kfree(dev->queues); 2568 kfree(dev->queues);
2499 kfree(dev->entry); 2569 kfree(dev->entry);
2500 kfree(dev); 2570 kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
2591 } 2661 }
2592 2662
2593 nvme_init_queue(dev->queues[0], 0); 2663 nvme_init_queue(dev->queues[0], 0);
2664 result = nvme_alloc_admin_tags(dev);
2665 if (result)
2666 goto disable;
2594 2667
2595 result = nvme_setup_io_queues(dev); 2668 result = nvme_setup_io_queues(dev);
2596 if (result) 2669 if (result)
2597 goto disable; 2670 goto free_tags;
2598 2671
2599 nvme_set_irq_hints(dev); 2672 nvme_set_irq_hints(dev);
2600 2673
2601 return result; 2674 return result;
2602 2675
2676 free_tags:
2677 nvme_dev_remove_admin(dev);
2603 disable: 2678 disable:
2604 nvme_disable_queue(dev, 0); 2679 nvme_disable_queue(dev, 0);
2605 nvme_dev_list_remove(dev); 2680 nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
2639 dev->reset_workfn = nvme_remove_disks; 2714 dev->reset_workfn = nvme_remove_disks;
2640 queue_work(nvme_workq, &dev->reset_work); 2715 queue_work(nvme_workq, &dev->reset_work);
2641 spin_unlock(&dev_list_lock); 2716 spin_unlock(&dev_list_lock);
2717 } else {
2718 nvme_unfreeze_queues(dev);
2719 nvme_set_irq_hints(dev);
2642 } 2720 }
2643 dev->initialized = 1; 2721 dev->initialized = 1;
2644 return 0; 2722 return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
2776 pci_set_drvdata(pdev, NULL); 2854 pci_set_drvdata(pdev, NULL);
2777 flush_work(&dev->reset_work); 2855 flush_work(&dev->reset_work);
2778 misc_deregister(&dev->miscdev); 2856 misc_deregister(&dev->miscdev);
2779 nvme_dev_remove(dev);
2780 nvme_dev_shutdown(dev); 2857 nvme_dev_shutdown(dev);
2858 nvme_dev_remove(dev);
2781 nvme_dev_remove_admin(dev); 2859 nvme_dev_remove_admin(dev);
2782 nvme_free_queues(dev, 0); 2860 nvme_free_queues(dev, 0);
2783 nvme_free_admin_tags(dev);
2784 nvme_release_prp_pools(dev); 2861 nvme_release_prp_pools(dev);
2785 kref_put(&dev->kref, nvme_free_dev); 2862 kref_put(&dev->kref, nvme_free_dev);
2786} 2863}