diff options
| author | Keith Busch <keith.busch@intel.com> | 2015-01-07 20:55:51 -0500 |
|---|---|---|
| committer | Jens Axboe <axboe@fb.com> | 2015-01-08 11:02:18 -0500 |
| commit | cef6a948271d5437f96e731878f2e9cb8c9820b7 (patch) | |
| tree | 76b1cb6bf8b267e403f9215bdc5e32c68d861af6 | |
| parent | 0fb59cbc5f133207535b25ec7d16fba24d549ee2 (diff) | |
NVMe: Command abort handling fixes
Aborts all requeued commands prior to killing the request_queue. For
commands that time out on a dying request queue, set the "Do Not Retry"
bit on the command status so the command cannot be requeued. Finanally, if
the driver is requested to abort a command it did not start, do nothing.
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
| -rw-r--r-- | drivers/block/nvme-core.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 5fcb993fc6c9..ad9a9b61fc1d 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
| @@ -1064,15 +1064,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx, | |||
| 1064 | void *ctx; | 1064 | void *ctx; |
| 1065 | nvme_completion_fn fn; | 1065 | nvme_completion_fn fn; |
| 1066 | struct nvme_cmd_info *cmd; | 1066 | struct nvme_cmd_info *cmd; |
| 1067 | static struct nvme_completion cqe = { | 1067 | struct nvme_completion cqe; |
| 1068 | .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), | 1068 | |
| 1069 | }; | 1069 | if (!blk_mq_request_started(req)) |
| 1070 | return; | ||
| 1070 | 1071 | ||
| 1071 | cmd = blk_mq_rq_to_pdu(req); | 1072 | cmd = blk_mq_rq_to_pdu(req); |
| 1072 | 1073 | ||
| 1073 | if (cmd->ctx == CMD_CTX_CANCELLED) | 1074 | if (cmd->ctx == CMD_CTX_CANCELLED) |
| 1074 | return; | 1075 | return; |
| 1075 | 1076 | ||
| 1077 | if (blk_queue_dying(req->q)) | ||
| 1078 | cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); | ||
| 1079 | else | ||
| 1080 | cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); | ||
| 1081 | |||
| 1082 | |||
| 1076 | dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", | 1083 | dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", |
| 1077 | req->tag, nvmeq->qid); | 1084 | req->tag, nvmeq->qid); |
| 1078 | ctx = cancel_cmd_info(cmd, &fn); | 1085 | ctx = cancel_cmd_info(cmd, &fn); |
| @@ -2429,8 +2436,10 @@ static void nvme_dev_remove(struct nvme_dev *dev) | |||
| 2429 | list_for_each_entry(ns, &dev->namespaces, list) { | 2436 | list_for_each_entry(ns, &dev->namespaces, list) { |
| 2430 | if (ns->disk->flags & GENHD_FL_UP) | 2437 | if (ns->disk->flags & GENHD_FL_UP) |
| 2431 | del_gendisk(ns->disk); | 2438 | del_gendisk(ns->disk); |
| 2432 | if (!blk_queue_dying(ns->queue)) | 2439 | if (!blk_queue_dying(ns->queue)) { |
| 2440 | blk_mq_abort_requeue_list(ns->queue); | ||
| 2433 | blk_cleanup_queue(ns->queue); | 2441 | blk_cleanup_queue(ns->queue); |
| 2442 | } | ||
| 2434 | } | 2443 | } |
| 2435 | } | 2444 | } |
| 2436 | 2445 | ||
