aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2011-05-12 13:50:28 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2011-11-04 15:53:02 -0400
commit8de055350fbaa96b6563892c195a60be583faa9c (patch)
tree2fc1f69751cb553422dedc6ce0514e64926b1b39 /drivers/block/nvme.c
parent21075bdee0a6f56058920d889df4ae561bfed754 (diff)
NVMe: Add support for timing out I/Os
In the kthread, walk the list of outstanding I/Os and check they've not hit the timeout. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c37
1 files changed, 31 insertions, 6 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 9ca9db903ceb..9c0ab2af0fae 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/nvme.h> 19#include <linux/nvme.h>
20#include <linux/bio.h> 20#include <linux/bio.h>
21#include <linux/bitops.h>
21#include <linux/blkdev.h> 22#include <linux/blkdev.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/fs.h> 24#include <linux/fs.h>
@@ -601,15 +602,15 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
601typedef void (*completion_fn)(struct nvme_queue *, void *, 602typedef void (*completion_fn)(struct nvme_queue *, void *,
602 struct nvme_completion *); 603 struct nvme_completion *);
603 604
605static const completion_fn nvme_completions[4] = {
606 [sync_completion_id] = sync_completion,
607 [bio_completion_id] = bio_completion,
608};
609
604static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) 610static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
605{ 611{
606 u16 head, phase; 612 u16 head, phase;
607 613
608 static const completion_fn completions[4] = {
609 [sync_completion_id] = sync_completion,
610 [bio_completion_id] = bio_completion,
611 };
612
613 head = nvmeq->cq_head; 614 head = nvmeq->cq_head;
614 phase = nvmeq->cq_phase; 615 phase = nvmeq->cq_phase;
615 616
@@ -629,7 +630,7 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
629 data = free_cmdid(nvmeq, cqe.command_id); 630 data = free_cmdid(nvmeq, cqe.command_id);
630 handler = data & 3; 631 handler = data & 3;
631 ptr = (void *)(data & ~3UL); 632 ptr = (void *)(data & ~3UL);
632 completions[handler](nvmeq, ptr, &cqe); 633 nvme_completions[handler](nvmeq, ptr, &cqe);
633 } 634 }
634 635
635 /* If the controller ignores the cq head doorbell and continuously 636 /* If the controller ignores the cq head doorbell and continuously
@@ -1172,6 +1173,29 @@ static const struct block_device_operations nvme_fops = {
1172 .compat_ioctl = nvme_ioctl, 1173 .compat_ioctl = nvme_ioctl,
1173}; 1174};
1174 1175
1176static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1177{
1178 int depth = nvmeq->q_depth - 1;
1179 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1180 unsigned long now = jiffies;
1181 int cmdid;
1182
1183 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1184 unsigned long data;
1185 void *ptr;
1186 unsigned char handler;
1187 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1188
1189 if (!time_after(now, info[cmdid].timeout))
1190 continue;
1191 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1192 data = cancel_cmdid(nvmeq, cmdid);
1193 handler = data & 3;
1194 ptr = (void *)(data & ~3UL);
1195 nvme_completions[handler](nvmeq, ptr, &cqe);
1196 }
1197}
1198
1175static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1199static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1176{ 1200{
1177 while (bio_list_peek(&nvmeq->sq_cong)) { 1201 while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1203,6 +1227,7 @@ static int nvme_kthread(void *data)
1203 spin_lock_irq(&nvmeq->q_lock); 1227 spin_lock_irq(&nvmeq->q_lock);
1204 if (nvme_process_cq(nvmeq)) 1228 if (nvme_process_cq(nvmeq))
1205 printk("process_cq did something\n"); 1229 printk("process_cq did something\n");
1230 nvme_timeout_ios(nvmeq);
1206 nvme_resubmit_bios(nvmeq); 1231 nvme_resubmit_bios(nvmeq);
1207 spin_unlock_irq(&nvmeq->q_lock); 1232 spin_unlock_irq(&nvmeq->q_lock);
1208 } 1233 }