aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2016-09-16 14:16:10 -0400
committerJens Axboe <axboe@fb.com>2016-09-24 12:56:26 -0400
commit1a6fe74dfd1bb10afb41cbbbdc14890604be42a6 (patch)
treeae97fdac8d8b8dd923156a989b92dd87ef8dc8c3
parent26501db8dcbc3c63c0d8fb6c5bb098bc7d35d741 (diff)
nvme: Pass pointers, not dma addresses, to nvme_get/set_features()
Any user I can imagine that needs a buffer at all will want to pass a pointer directly. There are no currently callers that use buffers, so this change is painless, and it will make it much easier to start using features that use buffers (e.g. APST). Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Jay Freyensee <james_p_freyensee@linux.intel.com> Tested-by: Jay Freyensee <james_p_freyensee@linux.intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/scsi.c6
3 files changed, 11 insertions, 13 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index bd2156cbfc6c..4669c052239e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -599,7 +599,7 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
599} 599}
600 600
601int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, 601int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
602 dma_addr_t dma_addr, u32 *result) 602 void *buffer, size_t buflen, u32 *result)
603{ 603{
604 struct nvme_command c; 604 struct nvme_command c;
605 struct nvme_completion cqe; 605 struct nvme_completion cqe;
@@ -608,10 +608,9 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
608 memset(&c, 0, sizeof(c)); 608 memset(&c, 0, sizeof(c));
609 c.features.opcode = nvme_admin_get_features; 609 c.features.opcode = nvme_admin_get_features;
610 c.features.nsid = cpu_to_le32(nsid); 610 c.features.nsid = cpu_to_le32(nsid);
611 c.features.dptr.prp1 = cpu_to_le64(dma_addr);
612 c.features.fid = cpu_to_le32(fid); 611 c.features.fid = cpu_to_le32(fid);
613 612
614 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, 613 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
615 NVME_QID_ANY, 0, 0); 614 NVME_QID_ANY, 0, 0);
616 if (ret >= 0 && result) 615 if (ret >= 0 && result)
617 *result = le32_to_cpu(cqe.result); 616 *result = le32_to_cpu(cqe.result);
@@ -619,7 +618,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
619} 618}
620 619
621int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 620int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
622 dma_addr_t dma_addr, u32 *result) 621 void *buffer, size_t buflen, u32 *result)
623{ 622{
624 struct nvme_command c; 623 struct nvme_command c;
625 struct nvme_completion cqe; 624 struct nvme_completion cqe;
@@ -627,12 +626,11 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
627 626
628 memset(&c, 0, sizeof(c)); 627 memset(&c, 0, sizeof(c));
629 c.features.opcode = nvme_admin_set_features; 628 c.features.opcode = nvme_admin_set_features;
630 c.features.dptr.prp1 = cpu_to_le64(dma_addr);
631 c.features.fid = cpu_to_le32(fid); 629 c.features.fid = cpu_to_le32(fid);
632 c.features.dword11 = cpu_to_le32(dword11); 630 c.features.dword11 = cpu_to_le32(dword11);
633 631
634 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, 632 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
635 NVME_QID_ANY, 0, 0); 633 buffer, buflen, 0, NVME_QID_ANY, 0, 0);
636 if (ret >= 0 && result) 634 if (ret >= 0 && result)
637 *result = le32_to_cpu(cqe.result); 635 *result = le32_to_cpu(cqe.result);
638 return ret; 636 return ret;
@@ -666,7 +664,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
666 u32 result; 664 u32 result;
667 int status, nr_io_queues; 665 int status, nr_io_queues;
668 666
669 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0, 667 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
670 &result); 668 &result);
671 if (status < 0) 669 if (status < 0)
672 return status; 670 return status;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bfd25dd73bca..b0a9ec681685 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -293,9 +293,9 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
293 struct nvme_id_ns **id); 293 struct nvme_id_ns **id);
294int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log); 294int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
295int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, 295int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
296 dma_addr_t dma_addr, u32 *result); 296 void *buffer, size_t buflen, u32 *result);
297int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 297int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
298 dma_addr_t dma_addr, u32 *result); 298 void *buffer, size_t buflen, u32 *result);
299int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 299int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
300void nvme_start_keep_alive(struct nvme_ctrl *ctrl); 300void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
301void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 301void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index 44009105f8c8..c2a0a1c7d05d 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -906,7 +906,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
906 kfree(smart_log); 906 kfree(smart_log);
907 907
908 /* Get Features for Temp Threshold */ 908 /* Get Features for Temp Threshold */
909 res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0, 909 res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, NULL, 0,
910 &feature_resp); 910 &feature_resp);
911 if (res != NVME_SC_SUCCESS) 911 if (res != NVME_SC_SUCCESS)
912 temp_c_thresh = LOG_TEMP_UNKNOWN; 912 temp_c_thresh = LOG_TEMP_UNKNOWN;
@@ -1039,7 +1039,7 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1039 if (len < MODE_PAGE_CACHING_LEN) 1039 if (len < MODE_PAGE_CACHING_LEN)
1040 return -EINVAL; 1040 return -EINVAL;
1041 1041
1042 nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0, 1042 nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, NULL, 0,
1043 &feature_resp); 1043 &feature_resp);
1044 res = nvme_trans_status_code(hdr, nvme_sc); 1044 res = nvme_trans_status_code(hdr, nvme_sc);
1045 if (res) 1045 if (res)
@@ -1328,7 +1328,7 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1328 case MODE_PAGE_CACHING: 1328 case MODE_PAGE_CACHING:
1329 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); 1329 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1330 nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 1330 nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
1331 dword11, 0, NULL); 1331 dword11, NULL, 0, NULL);
1332 res = nvme_trans_status_code(hdr, nvme_sc); 1332 res = nvme_trans_status_code(hdr, nvme_sc);
1333 break; 1333 break;
1334 case MODE_PAGE_CONTROL: 1334 case MODE_PAGE_CONTROL: