diff options
author | Christoph Hellwig <hch@lst.de> | 2016-02-29 09:59:47 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-02-29 10:47:17 -0500 |
commit | 1cb3cce5eb9de335330c8a147e47e3359a51a8b5 (patch) | |
tree | ce6bdeaa4cd35dcc966e848d92c9d427d634ebdc | |
parent | 2d55cd5f511d6fc377734473b237ac50820bfb9f (diff) |
nvme: return the whole CQE through the request passthrough interface
Both LighNVM and NVMe over Fabrics need to look at more than just the
status and result field.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Matias Bj?rling <m@bjorling.me>
Reviewed-by: Jay Freyensee <james.p.freyensee@intel.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | drivers/nvme/host/core.c | 27 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 3 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 11 |
3 files changed, 24 insertions, 17 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 07b7ec699e92..66fd3d9e4d47 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -132,7 +132,6 @@ struct request *nvme_alloc_request(struct request_queue *q, | |||
132 | 132 | ||
133 | req->cmd = (unsigned char *)cmd; | 133 | req->cmd = (unsigned char *)cmd; |
134 | req->cmd_len = sizeof(struct nvme_command); | 134 | req->cmd_len = sizeof(struct nvme_command); |
135 | req->special = (void *)0; | ||
136 | 135 | ||
137 | return req; | 136 | return req; |
138 | } | 137 | } |
@@ -143,7 +142,8 @@ EXPORT_SYMBOL_GPL(nvme_alloc_request); | |||
143 | * if the result is positive, it's an NVM Express status code | 142 | * if the result is positive, it's an NVM Express status code |
144 | */ | 143 | */ |
145 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 144 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
146 | void *buffer, unsigned bufflen, u32 *result, unsigned timeout) | 145 | struct nvme_completion *cqe, void *buffer, unsigned bufflen, |
146 | unsigned timeout) | ||
147 | { | 147 | { |
148 | struct request *req; | 148 | struct request *req; |
149 | int ret; | 149 | int ret; |
@@ -153,6 +153,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
153 | return PTR_ERR(req); | 153 | return PTR_ERR(req); |
154 | 154 | ||
155 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; | 155 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; |
156 | req->special = cqe; | ||
156 | 157 | ||
157 | if (buffer && bufflen) { | 158 | if (buffer && bufflen) { |
158 | ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); | 159 | ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); |
@@ -161,8 +162,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
161 | } | 162 | } |
162 | 163 | ||
163 | blk_execute_rq(req->q, NULL, req, 0); | 164 | blk_execute_rq(req->q, NULL, req, 0); |
164 | if (result) | ||
165 | *result = (u32)(uintptr_t)req->special; | ||
166 | ret = req->errors; | 165 | ret = req->errors; |
167 | out: | 166 | out: |
168 | blk_mq_free_request(req); | 167 | blk_mq_free_request(req); |
@@ -172,7 +171,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
172 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 171 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
173 | void *buffer, unsigned bufflen) | 172 | void *buffer, unsigned bufflen) |
174 | { | 173 | { |
175 | return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0); | 174 | return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0); |
176 | } | 175 | } |
177 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); | 176 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); |
178 | 177 | ||
@@ -182,6 +181,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
182 | u32 *result, unsigned timeout) | 181 | u32 *result, unsigned timeout) |
183 | { | 182 | { |
184 | bool write = cmd->common.opcode & 1; | 183 | bool write = cmd->common.opcode & 1; |
184 | struct nvme_completion cqe; | ||
185 | struct nvme_ns *ns = q->queuedata; | 185 | struct nvme_ns *ns = q->queuedata; |
186 | struct gendisk *disk = ns ? ns->disk : NULL; | 186 | struct gendisk *disk = ns ? ns->disk : NULL; |
187 | struct request *req; | 187 | struct request *req; |
@@ -194,6 +194,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
194 | return PTR_ERR(req); | 194 | return PTR_ERR(req); |
195 | 195 | ||
196 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; | 196 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; |
197 | req->special = &cqe; | ||
197 | 198 | ||
198 | if (ubuffer && bufflen) { | 199 | if (ubuffer && bufflen) { |
199 | ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, | 200 | ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, |
@@ -248,7 +249,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
248 | blk_execute_rq(req->q, disk, req, 0); | 249 | blk_execute_rq(req->q, disk, req, 0); |
249 | ret = req->errors; | 250 | ret = req->errors; |
250 | if (result) | 251 | if (result) |
251 | *result = (u32)(uintptr_t)req->special; | 252 | *result = le32_to_cpu(cqe.result); |
252 | if (meta && !ret && !write) { | 253 | if (meta && !ret && !write) { |
253 | if (copy_to_user(meta_buffer, meta, meta_len)) | 254 | if (copy_to_user(meta_buffer, meta, meta_len)) |
254 | ret = -EFAULT; | 255 | ret = -EFAULT; |
@@ -329,6 +330,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, | |||
329 | dma_addr_t dma_addr, u32 *result) | 330 | dma_addr_t dma_addr, u32 *result) |
330 | { | 331 | { |
331 | struct nvme_command c; | 332 | struct nvme_command c; |
333 | struct nvme_completion cqe; | ||
334 | int ret; | ||
332 | 335 | ||
333 | memset(&c, 0, sizeof(c)); | 336 | memset(&c, 0, sizeof(c)); |
334 | c.features.opcode = nvme_admin_get_features; | 337 | c.features.opcode = nvme_admin_get_features; |
@@ -336,13 +339,18 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, | |||
336 | c.features.prp1 = cpu_to_le64(dma_addr); | 339 | c.features.prp1 = cpu_to_le64(dma_addr); |
337 | c.features.fid = cpu_to_le32(fid); | 340 | c.features.fid = cpu_to_le32(fid); |
338 | 341 | ||
339 | return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0); | 342 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0); |
343 | if (ret >= 0) | ||
344 | *result = le32_to_cpu(cqe.result); | ||
345 | return ret; | ||
340 | } | 346 | } |
341 | 347 | ||
342 | int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, | 348 | int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, |
343 | dma_addr_t dma_addr, u32 *result) | 349 | dma_addr_t dma_addr, u32 *result) |
344 | { | 350 | { |
345 | struct nvme_command c; | 351 | struct nvme_command c; |
352 | struct nvme_completion cqe; | ||
353 | int ret; | ||
346 | 354 | ||
347 | memset(&c, 0, sizeof(c)); | 355 | memset(&c, 0, sizeof(c)); |
348 | c.features.opcode = nvme_admin_set_features; | 356 | c.features.opcode = nvme_admin_set_features; |
@@ -350,7 +358,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, | |||
350 | c.features.fid = cpu_to_le32(fid); | 358 | c.features.fid = cpu_to_le32(fid); |
351 | c.features.dword11 = cpu_to_le32(dword11); | 359 | c.features.dword11 = cpu_to_le32(dword11); |
352 | 360 | ||
353 | return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0); | 361 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0); |
362 | if (ret >= 0) | ||
363 | *result = le32_to_cpu(cqe.result); | ||
364 | return ret; | ||
354 | } | 365 | } |
355 | 366 | ||
356 | int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) | 367 | int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 63ba8a500ee1..2ac7539fdd17 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -248,7 +248,8 @@ void nvme_requeue_req(struct request *req); | |||
248 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 248 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
249 | void *buf, unsigned bufflen); | 249 | void *buf, unsigned bufflen); |
250 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 250 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
251 | void *buffer, unsigned bufflen, u32 *result, unsigned timeout); | 251 | struct nvme_completion *cqe, void *buffer, unsigned bufflen, |
252 | unsigned timeout); | ||
252 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | 253 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
253 | void __user *ubuffer, unsigned bufflen, u32 *result, | 254 | void __user *ubuffer, unsigned bufflen, u32 *result, |
254 | unsigned timeout); | 255 | unsigned timeout); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a62336051178..d47b08783110 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -748,10 +748,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) | |||
748 | } | 748 | } |
749 | 749 | ||
750 | req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); | 750 | req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); |
751 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { | 751 | if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) |
752 | u32 result = le32_to_cpu(cqe.result); | 752 | memcpy(req->special, &cqe, sizeof(cqe)); |
753 | req->special = (void *)(uintptr_t)result; | ||
754 | } | ||
755 | blk_mq_complete_request(req, status >> 1); | 753 | blk_mq_complete_request(req, status >> 1); |
756 | 754 | ||
757 | } | 755 | } |
@@ -901,13 +899,10 @@ static void abort_endio(struct request *req, int error) | |||
901 | { | 899 | { |
902 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); | 900 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
903 | struct nvme_queue *nvmeq = iod->nvmeq; | 901 | struct nvme_queue *nvmeq = iod->nvmeq; |
904 | u32 result = (u32)(uintptr_t)req->special; | ||
905 | u16 status = req->errors; | 902 | u16 status = req->errors; |
906 | 903 | ||
907 | dev_warn(nvmeq->dev->ctrl.device, | 904 | dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status); |
908 | "Abort status:%x result:%x", status, result); | ||
909 | atomic_inc(&nvmeq->dev->ctrl.abort_limit); | 905 | atomic_inc(&nvmeq->dev->ctrl.abort_limit); |
910 | |||
911 | blk_mq_free_request(req); | 906 | blk_mq_free_request(req); |
912 | } | 907 | } |
913 | 908 | ||