aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2015-02-19 16:29:48 -0500
committerKeith Busch <keith.busch@intel.com>2015-02-19 18:15:38 -0500
commit0c0f9b95c8b710b74772edd9693fe7ab5419a75a (patch)
tree222cbbadbeec1a193e380d6d18ef4b95122b45fb /drivers/block
parent483285184059b3f5b3a5707977349528abc82441 (diff)
NVMe: Fix potential corruption on sync commands
This makes all sync commands uninterruptible and schedules without timeout so the controller either has to post a completion or the timeout recovery fails the command. This fixes potential memory or data corruption from a command timing out too early or woken by a signal. Previously any DMA buffers mapped for that command would have been released even though we don't know what the controller is planning to do with those addresses. Signed-off-by: Keith Busch <keith.busch@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c32
1 files changed, 3 insertions, 29 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index cf2d8e3c93a8..b64bccbb78c9 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -926,14 +926,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
926 return IRQ_WAKE_THREAD; 926 return IRQ_WAKE_THREAD;
927} 927}
928 928
929static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
930 cmd_info)
931{
932 spin_lock_irq(&nvmeq->q_lock);
933 cancel_cmd_info(cmd_info, NULL);
934 spin_unlock_irq(&nvmeq->q_lock);
935}
936
937struct sync_cmd_info { 929struct sync_cmd_info {
938 struct task_struct *task; 930 struct task_struct *task;
939 u32 result; 931 u32 result;
@@ -956,7 +948,6 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
956static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd, 948static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
957 u32 *result, unsigned timeout) 949 u32 *result, unsigned timeout)
958{ 950{
959 int ret;
960 struct sync_cmd_info cmdinfo; 951 struct sync_cmd_info cmdinfo;
961 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 952 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
962 struct nvme_queue *nvmeq = cmd_rq->nvmeq; 953 struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -968,29 +959,12 @@ static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
968 959
969 nvme_set_info(cmd_rq, &cmdinfo, sync_completion); 960 nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
970 961
971 set_current_state(TASK_KILLABLE); 962 set_current_state(TASK_UNINTERRUPTIBLE);
972 ret = nvme_submit_cmd(nvmeq, cmd); 963 nvme_submit_cmd(nvmeq, cmd);
973 if (ret) { 964 schedule();
974 nvme_finish_cmd(nvmeq, req->tag, NULL);
975 set_current_state(TASK_RUNNING);
976 }
977 ret = schedule_timeout(timeout);
978
979 /*
980 * Ensure that sync_completion has either run, or that it will
981 * never run.
982 */
983 nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
984
985 /*
986 * We never got the completion
987 */
988 if (cmdinfo.status == -EINTR)
989 return -EINTR;
990 965
991 if (result) 966 if (result)
992 *result = cmdinfo.result; 967 *result = cmdinfo.result;
993
994 return cmdinfo.status; 968 return cmdinfo.status;
995} 969}
996 970