aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2011-02-06 18:30:16 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2011-11-04 15:52:56 -0400
commite85248e516c550382ba33ca325c272a0ca397e44 (patch)
treeb6c3b8c9d45dbc37dc6a4f2c4101ecab74853ebd /drivers/block/nvme.c
parentec6ce618d65b5ce1bef83a5509255107a0feac44 (diff)
NVMe: Record the timeout for each command
In addition to recording the completion data for each command, record the anticipated completion time. Choose a timeout of 5 seconds for normal I/Os and 60 seconds for admin I/Os. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c49
1 files changed, 32 insertions, 17 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 139e6fc1e2a8..60c1048dc8bc 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -41,6 +41,8 @@
41#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 41#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
42#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 42#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
43#define NVME_MINORS 64 43#define NVME_MINORS 64
44#define IO_TIMEOUT (5 * HZ)
45#define ADMIN_TIMEOUT (60 * HZ)
44 46
45static int nvme_major; 47static int nvme_major;
46module_param(nvme_major, int, 0); 48module_param(nvme_major, int, 0);
@@ -119,6 +121,16 @@ static inline void _nvme_check_size(void)
119 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 121 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
120} 122}
121 123
124struct nvme_cmd_info {
125 unsigned long ctx;
126 unsigned long timeout;
127};
128
129static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
130{
131 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
132}
133
122/** 134/**
123 * alloc_cmdid - Allocate a Command ID 135 * alloc_cmdid - Allocate a Command ID
124 * @param nvmeq The queue that will be used for this command 136 * @param nvmeq The queue that will be used for this command
@@ -131,10 +143,11 @@ static inline void _nvme_check_size(void)
131 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 143 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
132 * We can change this if it becomes a problem. 144 * We can change this if it becomes a problem.
133 */ 145 */
134static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler) 146static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler,
147 unsigned timeout)
135{ 148{
136 int depth = nvmeq->q_depth; 149 int depth = nvmeq->q_depth;
137 unsigned long data = (unsigned long)ctx | handler; 150 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
138 int cmdid; 151 int cmdid;
139 152
140 BUG_ON((unsigned long)ctx & 3); 153 BUG_ON((unsigned long)ctx & 3);
@@ -145,16 +158,17 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler)
145 return -EBUSY; 158 return -EBUSY;
146 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 159 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
147 160
148 nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(depth)] = data; 161 info[cmdid].ctx = (unsigned long)ctx | handler;
162 info[cmdid].timeout = jiffies + timeout;
149 return cmdid; 163 return cmdid;
150} 164}
151 165
152static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 166static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
153 int handler) 167 int handler, unsigned timeout)
154{ 168{
155 int cmdid; 169 int cmdid;
156 wait_event_killable(nvmeq->sq_full, 170 wait_event_killable(nvmeq->sq_full,
157 (cmdid = alloc_cmdid(nvmeq, ctx, handler)) >= 0); 171 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
158 return (cmdid < 0) ? -EINTR : cmdid; 172 return (cmdid < 0) ? -EINTR : cmdid;
159} 173}
160 174
@@ -175,12 +189,12 @@ enum {
175static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid) 189static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
176{ 190{
177 unsigned long data; 191 unsigned long data;
178 unsigned offset = cmdid + BITS_TO_LONGS(nvmeq->q_depth); 192 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
179 193
180 if (cmdid > nvmeq->q_depth) 194 if (cmdid >= nvmeq->q_depth)
181 return CMD_CTX_INVALID; 195 return CMD_CTX_INVALID;
182 data = nvmeq->cmdid_data[offset]; 196 data = info[cmdid].ctx;
183 nvmeq->cmdid_data[offset] = CMD_CTX_COMPLETED; 197 info[cmdid].ctx = CMD_CTX_COMPLETED;
184 clear_bit(cmdid, nvmeq->cmdid_data); 198 clear_bit(cmdid, nvmeq->cmdid_data);
185 wake_up(&nvmeq->sq_full); 199 wake_up(&nvmeq->sq_full);
186 return data; 200 return data;
@@ -188,8 +202,8 @@ static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
188 202
189static void cancel_cmdid_data(struct nvme_queue *nvmeq, int cmdid) 203static void cancel_cmdid_data(struct nvme_queue *nvmeq, int cmdid)
190{ 204{
191 unsigned offset = cmdid + BITS_TO_LONGS(nvmeq->q_depth); 205 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
192 nvmeq->cmdid_data[offset] = CMD_CTX_CANCELLED; 206 info[cmdid].ctx = CMD_CTX_CANCELLED;
193} 207}
194 208
195static struct nvme_queue *get_nvmeq(struct nvme_ns *ns) 209static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
@@ -327,7 +341,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
327 goto congestion; 341 goto congestion;
328 info->bio = bio; 342 info->bio = bio;
329 343
330 cmdid = alloc_cmdid(nvmeq, info, bio_completion_id); 344 cmdid = alloc_cmdid(nvmeq, info, bio_completion_id, IO_TIMEOUT);
331 if (unlikely(cmdid < 0)) 345 if (unlikely(cmdid < 0))
332 goto free_info; 346 goto free_info;
333 347
@@ -506,7 +520,7 @@ static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
506 * if the result is positive, it's an NVM Express status code 520 * if the result is positive, it's an NVM Express status code
507 */ 521 */
508static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, 522static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
509 struct nvme_command *cmd, u32 *result) 523 struct nvme_command *cmd, u32 *result, unsigned timeout)
510{ 524{
511 int cmdid; 525 int cmdid;
512 struct sync_cmd_info cmdinfo; 526 struct sync_cmd_info cmdinfo;
@@ -514,7 +528,8 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
514 cmdinfo.task = current; 528 cmdinfo.task = current;
515 cmdinfo.status = -EINTR; 529 cmdinfo.status = -EINTR;
516 530
517 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id); 531 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id,
532 timeout);
518 if (cmdid < 0) 533 if (cmdid < 0)
519 return cmdid; 534 return cmdid;
520 cmd->common.command_id = cmdid; 535 cmd->common.command_id = cmdid;
@@ -537,7 +552,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
537static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 552static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
538 u32 *result) 553 u32 *result)
539{ 554{
540 return nvme_submit_sync_cmd(dev->queues[0], cmd, result); 555 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
541} 556}
542 557
543static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 558static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -630,7 +645,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
630 int depth, int vector) 645 int depth, int vector)
631{ 646{
632 struct device *dmadev = &dev->pci_dev->dev; 647 struct device *dmadev = &dev->pci_dev->dev;
633 unsigned extra = (depth + BITS_TO_LONGS(depth)) * sizeof(long); 648 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
634 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 649 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
635 if (!nvmeq) 650 if (!nvmeq)
636 return NULL; 651 return NULL;
@@ -892,7 +907,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
892 * additional races since q_lock already protects against other CPUs. 907 * additional races since q_lock already protects against other CPUs.
893 */ 908 */
894 put_nvmeq(nvmeq); 909 put_nvmeq(nvmeq);
895 status = nvme_submit_sync_cmd(nvmeq, &c, &result); 910 status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT);
896 911
897 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); 912 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
898 put_user(result, &uio->result); 913 put_user(result, &uio->result);