aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2012-08-07 15:56:23 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2012-08-07 15:56:23 -0400
commita09115b23e2002bb35b7bfd337683f00875671ec (patch)
tree1335ed44723c27fe0fd9ca221915b08fba4fbec8
parent9e866774aab5d2654b0fa8f97890f68913f05700 (diff)
NVMe: Cancel outstanding IOs on queue deletion
If the device is hot-unplugged while there are active commands, we should time out the I/Os so that upper layers don't just see the I/Os disappear. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
-rw-r--r--drivers/block/nvme.c55
1 files changed, 32 insertions, 23 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 214037055e2a..f9ad514c9227 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -868,6 +868,33 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
868 return nvme_submit_admin_cmd(dev, &c, result); 868 return nvme_submit_admin_cmd(dev, &c, result);
869} 869}
870 870
871/**
872 * nvme_cancel_ios - Cancel outstanding I/Os
873 * @queue: The queue to cancel I/Os on
874 * @timeout: True to only cancel I/Os which have timed out
875 */
876static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
877{
878 int depth = nvmeq->q_depth - 1;
879 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
880 unsigned long now = jiffies;
881 int cmdid;
882
883 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
884 void *ctx;
885 nvme_completion_fn fn;
886 static struct nvme_completion cqe = {
887 .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
888 };
889
890 if (timeout && !time_after(now, info[cmdid].timeout))
891 continue;
892 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
893 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
894 fn(nvmeq->dev, ctx, &cqe);
895 }
896}
897
871static void nvme_free_queue_mem(struct nvme_queue *nvmeq) 898static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
872{ 899{
873 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 900 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
@@ -882,6 +909,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
882 struct nvme_queue *nvmeq = dev->queues[qid]; 909 struct nvme_queue *nvmeq = dev->queues[qid];
883 int vector = dev->entry[nvmeq->cq_vector].vector; 910 int vector = dev->entry[nvmeq->cq_vector].vector;
884 911
912 spin_lock_irq(&nvmeq->q_lock);
913 nvme_cancel_ios(nvmeq, false);
914 spin_unlock_irq(&nvmeq->q_lock);
915
885 irq_set_affinity_hint(vector, NULL); 916 irq_set_affinity_hint(vector, NULL);
886 free_irq(vector, nvmeq); 917 free_irq(vector, nvmeq);
887 918
@@ -1236,26 +1267,6 @@ static const struct block_device_operations nvme_fops = {
1236 .compat_ioctl = nvme_ioctl, 1267 .compat_ioctl = nvme_ioctl,
1237}; 1268};
1238 1269
1239static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1240{
1241 int depth = nvmeq->q_depth - 1;
1242 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1243 unsigned long now = jiffies;
1244 int cmdid;
1245
1246 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1247 void *ctx;
1248 nvme_completion_fn fn;
1249 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1250
1251 if (!time_after(now, info[cmdid].timeout))
1252 continue;
1253 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1254 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1255 fn(nvmeq->dev, ctx, &cqe);
1256 }
1257}
1258
1259static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1270static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1260{ 1271{
1261 while (bio_list_peek(&nvmeq->sq_cong)) { 1272 while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1287,7 +1298,7 @@ static int nvme_kthread(void *data)
1287 spin_lock_irq(&nvmeq->q_lock); 1298 spin_lock_irq(&nvmeq->q_lock);
1288 if (nvme_process_cq(nvmeq)) 1299 if (nvme_process_cq(nvmeq))
1289 printk("process_cq did something\n"); 1300 printk("process_cq did something\n");
1290 nvme_timeout_ios(nvmeq); 1301 nvme_cancel_ios(nvmeq, true);
1291 nvme_resubmit_bios(nvmeq); 1302 nvme_resubmit_bios(nvmeq);
1292 spin_unlock_irq(&nvmeq->q_lock); 1303 spin_unlock_irq(&nvmeq->q_lock);
1293 } 1304 }
@@ -1549,8 +1560,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
1549 list_del(&dev->node); 1560 list_del(&dev->node);
1550 spin_unlock(&dev_list_lock); 1561 spin_unlock(&dev_list_lock);
1551 1562
1552 /* TODO: wait all I/O finished or cancel them */
1553
1554 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1563 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1555 list_del(&ns->list); 1564 list_del(&ns->list);
1556 del_gendisk(ns->disk); 1565 del_gendisk(ns->disk);