aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-11-26 06:59:50 -0500
committerJens Axboe <axboe@fb.com>2015-12-22 11:38:34 -0500
commitaae239e1910ebc27ec9f7e8b25904a69626cf28c (patch)
tree1b025d25e50d6cf0d500dea47e1effd03559ac00
parentadf68f21c15572c68d9fadae618a09cf324b9814 (diff)
nvme: simplify completion handling
Now that all commands are executed as block layer requests we can remove the internal completion in the NVMe driver. Note that we can simply call blk_mq_complete_request to abort commands as the block layer will protect against double copletions internally. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/nvme/host/pci.c141
1 files changed, 26 insertions, 115 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0497ff67324c..84ac46fc9873 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -199,15 +199,11 @@ static inline void _nvme_check_size(void)
199 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 199 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
200} 200}
201 201
202typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
203 struct nvme_completion *);
204
205struct nvme_cmd_info { 202struct nvme_cmd_info {
206 nvme_completion_fn fn;
207 void *ctx;
208 int aborted; 203 int aborted;
209 struct nvme_queue *nvmeq; 204 struct nvme_queue *nvmeq;
210 struct nvme_iod iod[0]; 205 struct nvme_iod *iod;
206 struct nvme_iod __iod;
211}; 207};
212 208
213/* 209/*
@@ -302,15 +298,6 @@ static int nvme_init_request(void *data, struct request *req,
302 return 0; 298 return 0;
303} 299}
304 300
305static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
306 nvme_completion_fn handler)
307{
308 cmd->fn = handler;
309 cmd->ctx = ctx;
310 cmd->aborted = 0;
311 blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
312}
313
314static void *iod_get_private(struct nvme_iod *iod) 301static void *iod_get_private(struct nvme_iod *iod)
315{ 302{
316 return (void *) (iod->private & ~0x1UL); 303 return (void *) (iod->private & ~0x1UL);
@@ -324,44 +311,6 @@ static bool iod_should_kfree(struct nvme_iod *iod)
324 return (iod->private & NVME_INT_MASK) == 0; 311 return (iod->private & NVME_INT_MASK) == 0;
325} 312}
326 313
327/* Special values must be less than 0x1000 */
328#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
329#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
330#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
331#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
332
333static void special_completion(struct nvme_queue *nvmeq, void *ctx,
334 struct nvme_completion *cqe)
335{
336 if (ctx == CMD_CTX_CANCELLED)
337 return;
338 if (ctx == CMD_CTX_COMPLETED) {
339 dev_warn(nvmeq->q_dmadev,
340 "completed id %d twice on queue %d\n",
341 cqe->command_id, le16_to_cpup(&cqe->sq_id));
342 return;
343 }
344 if (ctx == CMD_CTX_INVALID) {
345 dev_warn(nvmeq->q_dmadev,
346 "invalid id %d completed on queue %d\n",
347 cqe->command_id, le16_to_cpup(&cqe->sq_id));
348 return;
349 }
350 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
351}
352
353static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
354{
355 void *ctx;
356
357 if (fn)
358 *fn = cmd->fn;
359 ctx = cmd->ctx;
360 cmd->fn = special_completion;
361 cmd->ctx = CMD_CTX_CANCELLED;
362 return ctx;
363}
364
365static void nvme_complete_async_event(struct nvme_dev *dev, 314static void nvme_complete_async_event(struct nvme_dev *dev,
366 struct nvme_completion *cqe) 315 struct nvme_completion *cqe)
367{ 316{
@@ -382,34 +331,6 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
382 } 331 }
383} 332}
384 333
385static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
386 unsigned int tag)
387{
388 struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, tag);
389
390 return blk_mq_rq_to_pdu(req);
391}
392
393/*
394 * Called with local interrupts disabled and the q_lock held. May not sleep.
395 */
396static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
397 nvme_completion_fn *fn)
398{
399 struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag);
400 void *ctx;
401 if (tag >= nvmeq->q_depth) {
402 *fn = special_completion;
403 return CMD_CTX_INVALID;
404 }
405 if (fn)
406 *fn = cmd->fn;
407 ctx = cmd->ctx;
408 cmd->fn = special_completion;
409 cmd->ctx = CMD_CTX_COMPLETED;
410 return ctx;
411}
412
413/** 334/**
414 * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 335 * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
415 * @nvmeq: The queue to use 336 * @nvmeq: The queue to use
@@ -473,7 +394,7 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
473 size <= NVME_INT_BYTES(dev)) { 394 size <= NVME_INT_BYTES(dev)) {
474 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq); 395 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);
475 396
476 iod = cmd->iod; 397 iod = &cmd->__iod;
477 iod_init(iod, size, rq->nr_phys_segments, 398 iod_init(iod, size, rq->nr_phys_segments,
478 (unsigned long) rq | NVME_INT_MASK); 399 (unsigned long) rq | NVME_INT_MASK);
479 return iod; 400 return iod;
@@ -570,12 +491,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
570} 491}
571#endif 492#endif
572 493
573static void req_completion(struct nvme_queue *nvmeq, void *ctx, 494static void req_completion(struct nvme_queue *nvmeq, struct nvme_completion *cqe)
574 struct nvme_completion *cqe)
575{ 495{
576 struct nvme_iod *iod = ctx; 496 struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
577 struct request *req = iod_get_private(iod);
578 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 497 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
498 struct nvme_iod *iod = cmd_rq->iod;
579 u16 status = le16_to_cpup(&cqe->status) >> 1; 499 u16 status = le16_to_cpup(&cqe->status) >> 1;
580 int error = 0; 500 int error = 0;
581 501
@@ -586,14 +506,10 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
586 return; 506 return;
587 } 507 }
588 508
589 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 509 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
590 if (cmd_rq->ctx == CMD_CTX_CANCELLED) 510 error = status;
591 error = NVME_SC_CANCELLED; 511 else
592 else
593 error = status;
594 } else {
595 error = nvme_error_status(status); 512 error = nvme_error_status(status);
596 }
597 } 513 }
598 514
599 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 515 if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
@@ -836,8 +752,10 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
836 if (ret) 752 if (ret)
837 goto out; 753 goto out;
838 754
755 cmd->iod = iod;
756 cmd->aborted = 0;
839 cmnd.common.command_id = req->tag; 757 cmnd.common.command_id = req->tag;
840 nvme_set_info(cmd, iod, req_completion); 758 blk_mq_start_request(req);
841 759
842 spin_lock_irq(&nvmeq->q_lock); 760 spin_lock_irq(&nvmeq->q_lock);
843 __nvme_submit_cmd(nvmeq, &cmnd); 761 __nvme_submit_cmd(nvmeq, &cmnd);
@@ -857,8 +775,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
857 phase = nvmeq->cq_phase; 775 phase = nvmeq->cq_phase;
858 776
859 for (;;) { 777 for (;;) {
860 void *ctx;
861 nvme_completion_fn fn;
862 struct nvme_completion cqe = nvmeq->cqes[head]; 778 struct nvme_completion cqe = nvmeq->cqes[head];
863 u16 status = le16_to_cpu(cqe.status); 779 u16 status = le16_to_cpu(cqe.status);
864 780
@@ -873,6 +789,13 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
873 if (tag && *tag == cqe.command_id) 789 if (tag && *tag == cqe.command_id)
874 *tag = -1; 790 *tag = -1;
875 791
792 if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
793 dev_warn(nvmeq->q_dmadev,
794 "invalid id %d completed on queue %d\n",
795 cqe.command_id, le16_to_cpu(cqe.sq_id));
796 continue;
797 }
798
876 /* 799 /*
877 * AEN requests are special as they don't time out and can 800 * AEN requests are special as they don't time out and can
878 * survive any kind of queue freeze and often don't respond to 801 * survive any kind of queue freeze and often don't respond to
@@ -885,8 +808,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
885 continue; 808 continue;
886 } 809 }
887 810
888 ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn); 811 req_completion(nvmeq, &cqe);
889 fn(nvmeq, ctx, &cqe);
890 } 812 }
891 813
892 /* If the controller ignores the cq head doorbell and continuously 814 /* If the controller ignores the cq head doorbell and continuously
@@ -1125,29 +1047,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1125static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved) 1047static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
1126{ 1048{
1127 struct nvme_queue *nvmeq = data; 1049 struct nvme_queue *nvmeq = data;
1128 void *ctx; 1050 int status;
1129 nvme_completion_fn fn;
1130 struct nvme_cmd_info *cmd;
1131 struct nvme_completion cqe;
1132 1051
1133 if (!blk_mq_request_started(req)) 1052 if (!blk_mq_request_started(req))
1134 return; 1053 return;
1135 1054
1136 cmd = blk_mq_rq_to_pdu(req); 1055 dev_warn(nvmeq->q_dmadev,
1137 1056 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
1138 if (cmd->ctx == CMD_CTX_CANCELLED)
1139 return;
1140 1057
1058 status = NVME_SC_CANCELLED;
1141 if (blk_queue_dying(req->q)) 1059 if (blk_queue_dying(req->q))
1142 cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); 1060 status |= NVME_SC_DNR;
1143 else 1061 blk_mq_complete_request(req, status);
1144 cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1145
1146
1147 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
1148 req->tag, nvmeq->qid);
1149 ctx = cancel_cmd_info(cmd, &fn);
1150 fn(nvmeq, ctx, &cqe);
1151} 1062}
1152 1063
1153static void nvme_free_queue(struct nvme_queue *nvmeq) 1064static void nvme_free_queue(struct nvme_queue *nvmeq)