aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-11-10 10:32:33 -0500
committerJens Axboe <axboe@fb.com>2016-11-10 12:06:24 -0500
commitd49187e97e94e2eb613cb6fed810356972077cc3 (patch)
treefcfb2353034857d86ded315e5ce139a0cd124c06 /drivers/nvme
parent41c9499b221ebe102f776589085cde43d0dadc46 (diff)
nvme: introduce struct nvme_request
This adds a shared per-request structure for all NVMe I/O. This structure is embedded as the first member in all NVMe transport drivers request private data and allows to implement common functionality between the drivers. The first use is to replace the current abuse of the SCSI command passthrough fields in struct request for the NVMe command passthrough, but it will grow a field more fields to allow implementing things like common abort handlers in the future. The passthrough commands are handled by having a pointer to the SQE (struct nvme_command) in struct nvme_request, and the union of the possible result fields, which had to be turned from an anonymous into a named union for that purpose. This avoids having to pass a reference to a full CQE around and thus makes checking the result a lot more lightweight. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c28
-rw-r--r--drivers/nvme/host/fabrics.c26
-rw-r--r--drivers/nvme/host/lightnvm.c31
-rw-r--r--drivers/nvme/host/nvme.h16
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c11
-rw-r--r--drivers/nvme/target/core.c8
-rw-r--r--drivers/nvme/target/fabrics-cmd.c14
-rw-r--r--drivers/nvme/target/loop.c12
-rw-r--r--drivers/nvme/target/nvmet.h2
10 files changed, 71 insertions, 81 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ef34f2f3566a..2fd632bcd975 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -221,8 +221,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
221 221
222 req->cmd_type = REQ_TYPE_DRV_PRIV; 222 req->cmd_type = REQ_TYPE_DRV_PRIV;
223 req->cmd_flags |= REQ_FAILFAST_DRIVER; 223 req->cmd_flags |= REQ_FAILFAST_DRIVER;
224 req->cmd = (unsigned char *)cmd; 224 nvme_req(req)->cmd = cmd;
225 req->cmd_len = sizeof(struct nvme_command);
226 225
227 return req; 226 return req;
228} 227}
@@ -321,7 +320,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
321 int ret = 0; 320 int ret = 0;
322 321
323 if (req->cmd_type == REQ_TYPE_DRV_PRIV) 322 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
324 memcpy(cmd, req->cmd, sizeof(*cmd)); 323 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
325 else if (req_op(req) == REQ_OP_FLUSH) 324 else if (req_op(req) == REQ_OP_FLUSH)
326 nvme_setup_flush(ns, cmd); 325 nvme_setup_flush(ns, cmd);
327 else if (req_op(req) == REQ_OP_DISCARD) 326 else if (req_op(req) == REQ_OP_DISCARD)
@@ -338,7 +337,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
338 * if the result is positive, it's an NVM Express status code 337 * if the result is positive, it's an NVM Express status code
339 */ 338 */
340int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 339int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
341 struct nvme_completion *cqe, void *buffer, unsigned bufflen, 340 union nvme_result *result, void *buffer, unsigned bufflen,
342 unsigned timeout, int qid, int at_head, int flags) 341 unsigned timeout, int qid, int at_head, int flags)
343{ 342{
344 struct request *req; 343 struct request *req;
@@ -349,7 +348,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
349 return PTR_ERR(req); 348 return PTR_ERR(req);
350 349
351 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 350 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
352 req->special = cqe;
353 351
354 if (buffer && bufflen) { 352 if (buffer && bufflen) {
355 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 353 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -358,6 +356,8 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
358 } 356 }
359 357
360 blk_execute_rq(req->q, NULL, req, at_head); 358 blk_execute_rq(req->q, NULL, req, at_head);
359 if (result)
360 *result = nvme_req(req)->result;
361 ret = req->errors; 361 ret = req->errors;
362 out: 362 out:
363 blk_mq_free_request(req); 363 blk_mq_free_request(req);
@@ -379,7 +379,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
379 u32 *result, unsigned timeout) 379 u32 *result, unsigned timeout)
380{ 380{
381 bool write = nvme_is_write(cmd); 381 bool write = nvme_is_write(cmd);
382 struct nvme_completion cqe;
383 struct nvme_ns *ns = q->queuedata; 382 struct nvme_ns *ns = q->queuedata;
384 struct gendisk *disk = ns ? ns->disk : NULL; 383 struct gendisk *disk = ns ? ns->disk : NULL;
385 struct request *req; 384 struct request *req;
@@ -392,7 +391,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
392 return PTR_ERR(req); 391 return PTR_ERR(req);
393 392
394 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 393 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
395 req->special = &cqe;
396 394
397 if (ubuffer && bufflen) { 395 if (ubuffer && bufflen) {
398 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 396 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -447,7 +445,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
447 blk_execute_rq(req->q, disk, req, 0); 445 blk_execute_rq(req->q, disk, req, 0);
448 ret = req->errors; 446 ret = req->errors;
449 if (result) 447 if (result)
450 *result = le32_to_cpu(cqe.result); 448 *result = le32_to_cpu(nvme_req(req)->result.u32);
451 if (meta && !ret && !write) { 449 if (meta && !ret && !write) {
452 if (copy_to_user(meta_buffer, meta, meta_len)) 450 if (copy_to_user(meta_buffer, meta, meta_len))
453 ret = -EFAULT; 451 ret = -EFAULT;
@@ -596,7 +594,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
596 void *buffer, size_t buflen, u32 *result) 594 void *buffer, size_t buflen, u32 *result)
597{ 595{
598 struct nvme_command c; 596 struct nvme_command c;
599 struct nvme_completion cqe; 597 union nvme_result res;
600 int ret; 598 int ret;
601 599
602 memset(&c, 0, sizeof(c)); 600 memset(&c, 0, sizeof(c));
@@ -604,10 +602,10 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
604 c.features.nsid = cpu_to_le32(nsid); 602 c.features.nsid = cpu_to_le32(nsid);
605 c.features.fid = cpu_to_le32(fid); 603 c.features.fid = cpu_to_le32(fid);
606 604
607 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0, 605 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
608 NVME_QID_ANY, 0, 0); 606 NVME_QID_ANY, 0, 0);
609 if (ret >= 0 && result) 607 if (ret >= 0 && result)
610 *result = le32_to_cpu(cqe.result); 608 *result = le32_to_cpu(res.u32);
611 return ret; 609 return ret;
612} 610}
613 611
@@ -615,7 +613,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
615 void *buffer, size_t buflen, u32 *result) 613 void *buffer, size_t buflen, u32 *result)
616{ 614{
617 struct nvme_command c; 615 struct nvme_command c;
618 struct nvme_completion cqe; 616 union nvme_result res;
619 int ret; 617 int ret;
620 618
621 memset(&c, 0, sizeof(c)); 619 memset(&c, 0, sizeof(c));
@@ -623,10 +621,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
623 c.features.fid = cpu_to_le32(fid); 621 c.features.fid = cpu_to_le32(fid);
624 c.features.dword11 = cpu_to_le32(dword11); 622 c.features.dword11 = cpu_to_le32(dword11);
625 623
626 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, 624 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
627 buffer, buflen, 0, NVME_QID_ANY, 0, 0); 625 buffer, buflen, 0, NVME_QID_ANY, 0, 0);
628 if (ret >= 0 && result) 626 if (ret >= 0 && result)
629 *result = le32_to_cpu(cqe.result); 627 *result = le32_to_cpu(res.u32);
630 return ret; 628 return ret;
631} 629}
632 630
@@ -1901,7 +1899,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl,
1901 struct nvme_completion *cqe) 1899 struct nvme_completion *cqe)
1902{ 1900{
1903 u16 status = le16_to_cpu(cqe->status) >> 1; 1901 u16 status = le16_to_cpu(cqe->status) >> 1;
1904 u32 result = le32_to_cpu(cqe->result); 1902 u32 result = le32_to_cpu(cqe->result.u32);
1905 1903
1906 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { 1904 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
1907 ++ctrl->event_limit; 1905 ++ctrl->event_limit;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5a3f008d3480..68fb26b3bfb9 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -161,7 +161,7 @@ EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
161int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 161int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
162{ 162{
163 struct nvme_command cmd; 163 struct nvme_command cmd;
164 struct nvme_completion cqe; 164 union nvme_result res;
165 int ret; 165 int ret;
166 166
167 memset(&cmd, 0, sizeof(cmd)); 167 memset(&cmd, 0, sizeof(cmd));
@@ -169,11 +169,11 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
169 cmd.prop_get.fctype = nvme_fabrics_type_property_get; 169 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
170 cmd.prop_get.offset = cpu_to_le32(off); 170 cmd.prop_get.offset = cpu_to_le32(off);
171 171
172 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0, 172 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
173 NVME_QID_ANY, 0, 0); 173 NVME_QID_ANY, 0, 0);
174 174
175 if (ret >= 0) 175 if (ret >= 0)
176 *val = le64_to_cpu(cqe.result64); 176 *val = le64_to_cpu(res.u64);
177 if (unlikely(ret != 0)) 177 if (unlikely(ret != 0))
178 dev_err(ctrl->device, 178 dev_err(ctrl->device,
179 "Property Get error: %d, offset %#x\n", 179 "Property Get error: %d, offset %#x\n",
@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
207int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 207int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
208{ 208{
209 struct nvme_command cmd; 209 struct nvme_command cmd;
210 struct nvme_completion cqe; 210 union nvme_result res;
211 int ret; 211 int ret;
212 212
213 memset(&cmd, 0, sizeof(cmd)); 213 memset(&cmd, 0, sizeof(cmd));
@@ -216,11 +216,11 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
216 cmd.prop_get.attrib = 1; 216 cmd.prop_get.attrib = 1;
217 cmd.prop_get.offset = cpu_to_le32(off); 217 cmd.prop_get.offset = cpu_to_le32(off);
218 218
219 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0, 219 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
220 NVME_QID_ANY, 0, 0); 220 NVME_QID_ANY, 0, 0);
221 221
222 if (ret >= 0) 222 if (ret >= 0)
223 *val = le64_to_cpu(cqe.result64); 223 *val = le64_to_cpu(res.u64);
224 if (unlikely(ret != 0)) 224 if (unlikely(ret != 0))
225 dev_err(ctrl->device, 225 dev_err(ctrl->device,
226 "Property Get error: %d, offset %#x\n", 226 "Property Get error: %d, offset %#x\n",
@@ -368,7 +368,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
368int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) 368int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
369{ 369{
370 struct nvme_command cmd; 370 struct nvme_command cmd;
371 struct nvme_completion cqe; 371 union nvme_result res;
372 struct nvmf_connect_data *data; 372 struct nvmf_connect_data *data;
373 int ret; 373 int ret;
374 374
@@ -400,16 +400,16 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
400 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); 400 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
401 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); 401 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
402 402
403 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, 403 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
404 data, sizeof(*data), 0, NVME_QID_ANY, 1, 404 data, sizeof(*data), 0, NVME_QID_ANY, 1,
405 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 405 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
406 if (ret) { 406 if (ret) {
407 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result), 407 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
408 &cmd, data); 408 &cmd, data);
409 goto out_free_data; 409 goto out_free_data;
410 } 410 }
411 411
412 ctrl->cntlid = le16_to_cpu(cqe.result16); 412 ctrl->cntlid = le16_to_cpu(res.u16);
413 413
414out_free_data: 414out_free_data:
415 kfree(data); 415 kfree(data);
@@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
441{ 441{
442 struct nvme_command cmd; 442 struct nvme_command cmd;
443 struct nvmf_connect_data *data; 443 struct nvmf_connect_data *data;
444 struct nvme_completion cqe; 444 union nvme_result res;
445 int ret; 445 int ret;
446 446
447 memset(&cmd, 0, sizeof(cmd)); 447 memset(&cmd, 0, sizeof(cmd));
@@ -459,11 +459,11 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
459 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); 459 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
460 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); 460 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
461 461
462 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe, 462 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
463 data, sizeof(*data), 0, qid, 1, 463 data, sizeof(*data), 0, qid, 1,
464 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 464 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
465 if (ret) { 465 if (ret) {
466 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result), 466 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
467 &cmd, data); 467 &cmd, data);
468 } 468 }
469 kfree(data); 469 kfree(data);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index f5e3011e31fc..442f67774ea9 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,14 +146,6 @@ struct nvme_nvm_command {
146 }; 146 };
147}; 147};
148 148
149struct nvme_nvm_completion {
150 __le64 result; /* Used by LightNVM to return ppa completions */
151 __le16 sq_head; /* how much of this queue may be reclaimed */
152 __le16 sq_id; /* submission queue that generated this entry */
153 __u16 command_id; /* of the command which completed */
154 __le16 status; /* did the command fail, and if so, why? */
155};
156
157#define NVME_NVM_LP_MLC_PAIRS 886 149#define NVME_NVM_LP_MLC_PAIRS 886
158struct nvme_nvm_lp_mlc { 150struct nvme_nvm_lp_mlc {
159 __le16 num_pairs; 151 __le16 num_pairs;
@@ -481,11 +473,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
481static void nvme_nvm_end_io(struct request *rq, int error) 473static void nvme_nvm_end_io(struct request *rq, int error)
482{ 474{
483 struct nvm_rq *rqd = rq->end_io_data; 475 struct nvm_rq *rqd = rq->end_io_data;
484 struct nvme_nvm_completion *cqe = rq->special;
485
486 if (cqe)
487 rqd->ppa_status = le64_to_cpu(cqe->result);
488 476
477 rqd->ppa_status = nvme_req(rq)->result.u64;
489 nvm_end_io(rqd, error); 478 nvm_end_io(rqd, error);
490 479
491 kfree(rq->cmd); 480 kfree(rq->cmd);
@@ -500,20 +489,18 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
500 struct bio *bio = rqd->bio; 489 struct bio *bio = rqd->bio;
501 struct nvme_nvm_command *cmd; 490 struct nvme_nvm_command *cmd;
502 491
503 rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); 492 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
504 if (IS_ERR(rq)) 493 if (!cmd)
505 return -ENOMEM; 494 return -ENOMEM;
506 495
507 cmd = kzalloc(sizeof(struct nvme_nvm_command) + 496 rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
508 sizeof(struct nvme_nvm_completion), GFP_KERNEL); 497 if (IS_ERR(rq)) {
509 if (!cmd) { 498 kfree(cmd);
510 blk_mq_free_request(rq);
511 return -ENOMEM; 499 return -ENOMEM;
512 } 500 }
501 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
513 502
514 rq->cmd_type = REQ_TYPE_DRV_PRIV;
515 rq->ioprio = bio_prio(bio); 503 rq->ioprio = bio_prio(bio);
516
517 if (bio_has_data(bio)) 504 if (bio_has_data(bio))
518 rq->nr_phys_segments = bio_phys_segments(q, bio); 505 rq->nr_phys_segments = bio_phys_segments(q, bio);
519 506
@@ -522,10 +509,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
522 509
523 nvme_nvm_rqtocmd(rq, rqd, ns, cmd); 510 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
524 511
525 rq->cmd = (unsigned char *)cmd;
526 rq->cmd_len = sizeof(struct nvme_nvm_command);
527 rq->special = cmd + 1;
528
529 rq->end_io_data = rqd; 512 rq->end_io_data = rqd;
530 513
531 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io); 514 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d47f5a5d18c7..5e64957a9b96 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -79,6 +79,20 @@ enum nvme_quirks {
79 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 79 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
80}; 80};
81 81
82/*
83 * Common request structure for NVMe passthrough. All drivers must have
84 * this structure as the first member of their request-private data.
85 */
86struct nvme_request {
87 struct nvme_command *cmd;
88 union nvme_result result;
89};
90
91static inline struct nvme_request *nvme_req(struct request *req)
92{
93 return blk_mq_rq_to_pdu(req);
94}
95
82/* The below value is the specific amount of delay needed before checking 96/* The below value is the specific amount of delay needed before checking
83 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 97 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
84 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 98 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
@@ -278,7 +292,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
278int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 292int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
279 void *buf, unsigned bufflen); 293 void *buf, unsigned bufflen);
280int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 294int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
281 struct nvme_completion *cqe, void *buffer, unsigned bufflen, 295 union nvme_result *result, void *buffer, unsigned bufflen,
282 unsigned timeout, int qid, int at_head, int flags); 296 unsigned timeout, int qid, int at_head, int flags);
283int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 297int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
284 void __user *ubuffer, unsigned bufflen, u32 *result, 298 void __user *ubuffer, unsigned bufflen, u32 *result,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0955e9d22020..de8e0505d979 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -140,6 +140,7 @@ struct nvme_queue {
140 * allocated to store the PRP list. 140 * allocated to store the PRP list.
141 */ 141 */
142struct nvme_iod { 142struct nvme_iod {
143 struct nvme_request req;
143 struct nvme_queue *nvmeq; 144 struct nvme_queue *nvmeq;
144 int aborted; 145 int aborted;
145 int npages; /* In the PRP list. 0 means small pool in use */ 146 int npages; /* In the PRP list. 0 means small pool in use */
@@ -707,8 +708,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
707 } 708 }
708 709
709 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); 710 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
710 if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) 711 nvme_req(req)->result = cqe.result;
711 memcpy(req->special, &cqe, sizeof(cqe));
712 blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1); 712 blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
713 713
714 } 714 }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 5a8388177959..0b8a161cf881 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -66,6 +66,7 @@ struct nvme_rdma_qe {
66 66
67struct nvme_rdma_queue; 67struct nvme_rdma_queue;
68struct nvme_rdma_request { 68struct nvme_rdma_request {
69 struct nvme_request req;
69 struct ib_mr *mr; 70 struct ib_mr *mr;
70 struct nvme_rdma_qe sqe; 71 struct nvme_rdma_qe sqe;
71 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; 72 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
@@ -1117,13 +1118,10 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
1117static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, 1118static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1118 struct nvme_completion *cqe, struct ib_wc *wc, int tag) 1119 struct nvme_completion *cqe, struct ib_wc *wc, int tag)
1119{ 1120{
1120 u16 status = le16_to_cpu(cqe->status);
1121 struct request *rq; 1121 struct request *rq;
1122 struct nvme_rdma_request *req; 1122 struct nvme_rdma_request *req;
1123 int ret = 0; 1123 int ret = 0;
1124 1124
1125 status >>= 1;
1126
1127 rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id); 1125 rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
1128 if (!rq) { 1126 if (!rq) {
1129 dev_err(queue->ctrl->ctrl.device, 1127 dev_err(queue->ctrl->ctrl.device,
@@ -1134,9 +1132,6 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1134 } 1132 }
1135 req = blk_mq_rq_to_pdu(rq); 1133 req = blk_mq_rq_to_pdu(rq);
1136 1134
1137 if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
1138 memcpy(rq->special, cqe, sizeof(*cqe));
1139
1140 if (rq->tag == tag) 1135 if (rq->tag == tag)
1141 ret = 1; 1136 ret = 1;
1142 1137
@@ -1144,8 +1139,8 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1144 wc->ex.invalidate_rkey == req->mr->rkey) 1139 wc->ex.invalidate_rkey == req->mr->rkey)
1145 req->mr->need_inval = false; 1140 req->mr->need_inval = false;
1146 1141
1147 blk_mq_complete_request(rq, status); 1142 req->req.result = cqe->result;
1148 1143 blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
1149 return ret; 1144 return ret;
1150} 1145}
1151 1146
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6559d5afa7bf..c232552be2d8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -617,7 +617,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
617 if (!subsys) { 617 if (!subsys) {
618 pr_warn("connect request for invalid subsystem %s!\n", 618 pr_warn("connect request for invalid subsystem %s!\n",
619 subsysnqn); 619 subsysnqn);
620 req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn); 620 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
621 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 621 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
622 } 622 }
623 623
@@ -638,7 +638,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
638 638
639 pr_warn("could not find controller %d for subsys %s / host %s\n", 639 pr_warn("could not find controller %d for subsys %s / host %s\n",
640 cntlid, subsysnqn, hostnqn); 640 cntlid, subsysnqn, hostnqn);
641 req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid); 641 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
642 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 642 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
643 643
644out: 644out:
@@ -700,7 +700,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
700 if (!subsys) { 700 if (!subsys) {
701 pr_warn("connect request for invalid subsystem %s!\n", 701 pr_warn("connect request for invalid subsystem %s!\n",
702 subsysnqn); 702 subsysnqn);
703 req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn); 703 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
704 goto out; 704 goto out;
705 } 705 }
706 706
@@ -709,7 +709,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
709 if (!nvmet_host_allowed(req, subsys, hostnqn)) { 709 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
710 pr_info("connect by host %s for subsystem %s not allowed\n", 710 pr_info("connect by host %s for subsystem %s not allowed\n",
711 hostnqn, subsysnqn); 711 hostnqn, subsysnqn);
712 req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn); 712 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
713 up_read(&nvmet_config_sem); 713 up_read(&nvmet_config_sem);
714 goto out_put_subsystem; 714 goto out_put_subsystem;
715 } 715 }
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 9a97ae67e656..f4088198cd0d 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -69,7 +69,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
69 } 69 }
70 } 70 }
71 71
72 req->rsp->result64 = cpu_to_le64(val); 72 req->rsp->result.u64 = cpu_to_le64(val);
73 nvmet_req_complete(req, status); 73 nvmet_req_complete(req, status);
74} 74}
75 75
@@ -125,7 +125,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
125 d = kmap(sg_page(req->sg)) + req->sg->offset; 125 d = kmap(sg_page(req->sg)) + req->sg->offset;
126 126
127 /* zero out initial completion result, assign values as needed */ 127 /* zero out initial completion result, assign values as needed */
128 req->rsp->result = 0; 128 req->rsp->result.u32 = 0;
129 129
130 if (c->recfmt != 0) { 130 if (c->recfmt != 0) {
131 pr_warn("invalid connect version (%d).\n", 131 pr_warn("invalid connect version (%d).\n",
@@ -138,7 +138,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
138 pr_warn("connect attempt for invalid controller ID %#x\n", 138 pr_warn("connect attempt for invalid controller ID %#x\n",
139 d->cntlid); 139 d->cntlid);
140 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 140 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
141 req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid); 141 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
142 goto out; 142 goto out;
143 } 143 }
144 144
@@ -155,7 +155,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
155 155
156 pr_info("creating controller %d for NQN %s.\n", 156 pr_info("creating controller %d for NQN %s.\n",
157 ctrl->cntlid, ctrl->hostnqn); 157 ctrl->cntlid, ctrl->hostnqn);
158 req->rsp->result16 = cpu_to_le16(ctrl->cntlid); 158 req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
159 159
160out: 160out:
161 kunmap(sg_page(req->sg)); 161 kunmap(sg_page(req->sg));
@@ -173,7 +173,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
173 d = kmap(sg_page(req->sg)) + req->sg->offset; 173 d = kmap(sg_page(req->sg)) + req->sg->offset;
174 174
175 /* zero out initial completion result, assign values as needed */ 175 /* zero out initial completion result, assign values as needed */
176 req->rsp->result = 0; 176 req->rsp->result.u32 = 0;
177 177
178 if (c->recfmt != 0) { 178 if (c->recfmt != 0) {
179 pr_warn("invalid connect version (%d).\n", 179 pr_warn("invalid connect version (%d).\n",
@@ -191,14 +191,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
191 if (unlikely(qid > ctrl->subsys->max_qid)) { 191 if (unlikely(qid > ctrl->subsys->max_qid)) {
192 pr_warn("invalid queue id (%d)\n", qid); 192 pr_warn("invalid queue id (%d)\n", qid);
193 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 193 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
194 req->rsp->result = IPO_IATTR_CONNECT_SQE(qid); 194 req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
195 goto out_ctrl_put; 195 goto out_ctrl_put;
196 } 196 }
197 197
198 status = nvmet_install_queue(ctrl, req); 198 status = nvmet_install_queue(ctrl, req);
199 if (status) { 199 if (status) {
200 /* pass back cntlid that had the issue of installing queue */ 200 /* pass back cntlid that had the issue of installing queue */
201 req->rsp->result16 = cpu_to_le16(ctrl->cntlid); 201 req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
202 goto out_ctrl_put; 202 goto out_ctrl_put;
203 } 203 }
204 204
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d5df77d686b2..757e21a31128 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -36,6 +36,7 @@
36 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS) 36 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
37 37
38struct nvme_loop_iod { 38struct nvme_loop_iod {
39 struct nvme_request nvme_req;
39 struct nvme_command cmd; 40 struct nvme_command cmd;
40 struct nvme_completion rsp; 41 struct nvme_completion rsp;
41 struct nvmet_req req; 42 struct nvmet_req req;
@@ -112,10 +113,10 @@ static void nvme_loop_complete_rq(struct request *req)
112 blk_mq_end_request(req, error); 113 blk_mq_end_request(req, error);
113} 114}
114 115
115static void nvme_loop_queue_response(struct nvmet_req *nvme_req) 116static void nvme_loop_queue_response(struct nvmet_req *req)
116{ 117{
117 struct nvme_loop_iod *iod = 118 struct nvme_loop_iod *iod =
118 container_of(nvme_req, struct nvme_loop_iod, req); 119 container_of(req, struct nvme_loop_iod, req);
119 struct nvme_completion *cqe = &iod->rsp; 120 struct nvme_completion *cqe = &iod->rsp;
120 121
121 /* 122 /*
@@ -128,11 +129,10 @@ static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
128 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) { 129 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
129 nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe); 130 nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
130 } else { 131 } else {
131 struct request *req = blk_mq_rq_from_pdu(iod); 132 struct request *rq = blk_mq_rq_from_pdu(iod);
132 133
133 if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) 134 iod->nvme_req.result = cqe->result;
134 memcpy(req->special, cqe, sizeof(*cqe)); 135 blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
135 blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
136 } 136 }
137} 137}
138 138
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 76b6eedccaf9..f9c76441e8c9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -238,7 +238,7 @@ static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
238 238
239static inline void nvmet_set_result(struct nvmet_req *req, u32 result) 239static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
240{ 240{
241 req->rsp->result = cpu_to_le32(result); 241 req->rsp->result.u32 = cpu_to_le32(result);
242} 242}
243 243
244/* 244/*