aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-11-16 04:39:48 -0500
committerJens Axboe <axboe@fb.com>2015-12-22 11:38:34 -0500
commite7a2a87d5938bbebe1637c82fbde94ea6be3ef78 (patch)
tree25a76e38dcbd03a83abd079a1f3c585303f30135
parentd8f32166a9c587e87a3a86f654c73d40b6b5df00 (diff)
nvme: switch abort to blk_execute_rq_nowait
And remove the now unused nvme_submit_cmd helper. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/nvme/host/pci.c61
1 files changed, 26 insertions, 35 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d6d92b022f97..6a32a92a9227 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -375,20 +375,6 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
375 } 375 }
376} 376}
377 377
378static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
379 struct nvme_completion *cqe)
380{
381 struct request *req = ctx;
382
383 u16 status = le16_to_cpup(&cqe->status) >> 1;
384 u32 result = le32_to_cpup(&cqe->result);
385
386 blk_mq_free_request(req);
387
388 dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
389 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
390}
391
392static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq, 378static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
393 unsigned int tag) 379 unsigned int tag)
394{ 380{
@@ -440,14 +426,6 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
440 nvmeq->sq_tail = tail; 426 nvmeq->sq_tail = tail;
441} 427}
442 428
443static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
444{
445 unsigned long flags;
446 spin_lock_irqsave(&nvmeq->q_lock, flags);
447 __nvme_submit_cmd(nvmeq, cmd);
448 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
449}
450
451static __le64 **iod_list(struct nvme_iod *iod) 429static __le64 **iod_list(struct nvme_iod *iod)
452{ 430{
453 return ((void *)iod) + iod->offset; 431 return ((void *)iod) + iod->offset;
@@ -1045,13 +1023,25 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
1045 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 1023 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
1046} 1024}
1047 1025
1026static void abort_endio(struct request *req, int error)
1027{
1028 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
1029 struct nvme_queue *nvmeq = cmd->nvmeq;
1030 u32 result = (u32)(uintptr_t)req->special;
1031 u16 status = req->errors;
1032
1033 dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
1034 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1035
1036 blk_mq_free_request(req);
1037}
1038
1048static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 1039static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1049{ 1040{
1050 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 1041 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
1051 struct nvme_queue *nvmeq = cmd_rq->nvmeq; 1042 struct nvme_queue *nvmeq = cmd_rq->nvmeq;
1052 struct nvme_dev *dev = nvmeq->dev; 1043 struct nvme_dev *dev = nvmeq->dev;
1053 struct request *abort_req; 1044 struct request *abort_req;
1054 struct nvme_cmd_info *abort_cmd;
1055 struct nvme_command cmd; 1045 struct nvme_command cmd;
1056 1046
1057 /* 1047 /*
@@ -1089,30 +1079,31 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1089 return BLK_EH_HANDLED; 1079 return BLK_EH_HANDLED;
1090 } 1080 }
1091 1081
1092 if (atomic_dec_and_test(&dev->ctrl.abort_limit)) 1082 cmd_rq->aborted = 1;
1093 return BLK_EH_RESET_TIMER;
1094 1083
1095 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, 1084 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1096 BLK_MQ_REQ_NOWAIT);
1097 if (IS_ERR(abort_req)) {
1098 atomic_inc(&dev->ctrl.abort_limit); 1085 atomic_inc(&dev->ctrl.abort_limit);
1099 return BLK_EH_RESET_TIMER; 1086 return BLK_EH_RESET_TIMER;
1100 } 1087 }
1101 1088
1102 abort_cmd = blk_mq_rq_to_pdu(abort_req);
1103 nvme_set_info(abort_cmd, abort_req, abort_completion);
1104
1105 memset(&cmd, 0, sizeof(cmd)); 1089 memset(&cmd, 0, sizeof(cmd));
1106 cmd.abort.opcode = nvme_admin_abort_cmd; 1090 cmd.abort.opcode = nvme_admin_abort_cmd;
1107 cmd.abort.cid = req->tag; 1091 cmd.abort.cid = req->tag;
1108 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 1092 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1109 cmd.abort.command_id = abort_req->tag;
1110
1111 cmd_rq->aborted = 1;
1112 1093
1113 dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n", 1094 dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
1114 req->tag, nvmeq->qid); 1095 req->tag, nvmeq->qid);
1115 nvme_submit_cmd(dev->queues[0], &cmd); 1096
1097 abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1098 BLK_MQ_REQ_NOWAIT);
1099 if (IS_ERR(abort_req)) {
1100 atomic_inc(&dev->ctrl.abort_limit);
1101 return BLK_EH_RESET_TIMER;
1102 }
1103
1104 abort_req->timeout = ADMIN_TIMEOUT;
1105 abort_req->end_io_data = NULL;
1106 blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
1116 1107
1117 /* 1108 /*
1118 * The aborted req will be completed on receiving the abort req. 1109 * The aborted req will be completed on receiving the abort req.