aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-11-28 09:37:52 -0500
committerJens Axboe <axboe@fb.com>2015-12-01 12:59:39 -0500
commit7fd8930f26be4c9078684b2fef14da0503771bf2 (patch)
treeab4847b46857510c32ae377f969c417856909309 /drivers/nvme
parent5fd4ce1b005bd6ede913763f65efae9af6f7f386 (diff)
nvme: add a common helper to read Identify Controller data
And add the 64-bit register read operation for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c52
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/pci.c53
3 files changed, 71 insertions, 38 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index e3179b33ff81..1c9f09c80b9d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -776,6 +776,58 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
776 return ret; 776 return ret;
777} 777}
778 778
779/*
780 * Initialize the cached copies of the Identify data and various controller
781 * register in our nvme_ctrl structure. This should be called as soon as
782 * the admin queue is fully up and running.
783 */
784int nvme_init_identify(struct nvme_ctrl *ctrl)
785{
786 struct nvme_id_ctrl *id;
787 u64 cap;
788 int ret, page_shift;
789
790 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
791 if (ret) {
792 dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
793 return ret;
794 }
795 page_shift = NVME_CAP_MPSMIN(cap) + 12;
796
797 ret = nvme_identify_ctrl(ctrl, &id);
798 if (ret) {
799 dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
800 return -EIO;
801 }
802
803 ctrl->oncs = le16_to_cpup(&id->oncs);
804 ctrl->abort_limit = id->acl + 1;
805 ctrl->vwc = id->vwc;
806 memcpy(ctrl->serial, id->sn, sizeof(id->sn));
807 memcpy(ctrl->model, id->mn, sizeof(id->mn));
808 memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
809 if (id->mdts)
810 ctrl->max_hw_sectors = 1 << (id->mdts + page_shift - 9);
811 else
812 ctrl->max_hw_sectors = UINT_MAX;
813
814 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
815 unsigned int max_hw_sectors;
816
817 ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
818 max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
819 if (ctrl->max_hw_sectors) {
820 ctrl->max_hw_sectors = min(max_hw_sectors,
821 ctrl->max_hw_sectors);
822 } else {
823 ctrl->max_hw_sectors = max_hw_sectors;
824 }
825 }
826
827 kfree(id);
828 return 0;
829}
830
779static void nvme_free_ctrl(struct kref *kref) 831static void nvme_free_ctrl(struct kref *kref)
780{ 832{
781 struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); 833 struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b6c5a55ed59f..a624add7ca22 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -62,6 +62,8 @@ struct nvme_ctrl {
62 u32 ctrl_config; 62 u32 ctrl_config;
63 63
64 u32 page_size; 64 u32 page_size;
65 u32 max_hw_sectors;
66 u32 stripe_size;
65 u16 oncs; 67 u16 oncs;
66 u16 abort_limit; 68 u16 abort_limit;
67 u8 event_limit; 69 u8 event_limit;
@@ -93,6 +95,7 @@ struct nvme_ns {
93struct nvme_ctrl_ops { 95struct nvme_ctrl_ops {
94 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 96 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
95 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 97 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
98 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
96 void (*free_ctrl)(struct nvme_ctrl *ctrl); 99 void (*free_ctrl)(struct nvme_ctrl *ctrl);
97}; 100};
98 101
@@ -177,6 +180,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
177int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 180int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
178int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); 181int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
179void nvme_put_ctrl(struct nvme_ctrl *ctrl); 182void nvme_put_ctrl(struct nvme_ctrl *ctrl);
183int nvme_init_identify(struct nvme_ctrl *ctrl);
180void nvme_put_ns(struct nvme_ns *ns); 184void nvme_put_ns(struct nvme_ns *ns);
181 185
182struct request *nvme_alloc_request(struct request_queue *q, 186struct request *nvme_alloc_request(struct request_queue *q,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ccb315101a5e..086563fe6ed1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -129,8 +129,6 @@ struct nvme_dev {
129 struct work_struct probe_work; 129 struct work_struct probe_work;
130 struct work_struct scan_work; 130 struct work_struct scan_work;
131 bool subsystem; 131 bool subsystem;
132 u32 max_hw_sectors;
133 u32 stripe_size;
134 void __iomem *cmb; 132 void __iomem *cmb;
135 dma_addr_t cmb_dma_addr; 133 dma_addr_t cmb_dma_addr;
136 u64 cmb_size; 134 u64 cmb_size;
@@ -1592,13 +1590,13 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
1592 list_add_tail(&ns->list, &dev->namespaces); 1590 list_add_tail(&ns->list, &dev->namespaces);
1593 1591
1594 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1592 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1595 if (dev->max_hw_sectors) { 1593 if (dev->ctrl.max_hw_sectors) {
1596 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1594 blk_queue_max_hw_sectors(ns->queue, dev->ctrl.max_hw_sectors);
1597 blk_queue_max_segments(ns->queue, 1595 blk_queue_max_segments(ns->queue,
1598 (dev->max_hw_sectors / (dev->ctrl.page_size >> 9)) + 1); 1596 (dev->ctrl.max_hw_sectors / (dev->ctrl.page_size >> 9)) + 1);
1599 } 1597 }
1600 if (dev->stripe_size) 1598 if (dev->ctrl.stripe_size)
1601 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); 1599 blk_queue_chunk_sectors(ns->queue, dev->ctrl.stripe_size >> 9);
1602 if (dev->ctrl.vwc & NVME_CTRL_VWC_PRESENT) 1600 if (dev->ctrl.vwc & NVME_CTRL_VWC_PRESENT)
1603 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA); 1601 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
1604 blk_queue_virt_boundary(ns->queue, dev->ctrl.page_size - 1); 1602 blk_queue_virt_boundary(ns->queue, dev->ctrl.page_size - 1);
@@ -1933,38 +1931,10 @@ static void nvme_dev_scan(struct work_struct *work)
1933static int nvme_dev_add(struct nvme_dev *dev) 1931static int nvme_dev_add(struct nvme_dev *dev)
1934{ 1932{
1935 int res; 1933 int res;
1936 struct nvme_id_ctrl *ctrl;
1937 int shift = NVME_CAP_MPSMIN(lo_hi_readq(dev->bar + NVME_REG_CAP)) + 12;
1938
1939 res = nvme_identify_ctrl(&dev->ctrl, &ctrl);
1940 if (res) {
1941 dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
1942 return -EIO;
1943 }
1944
1945 dev->ctrl.oncs = le16_to_cpup(&ctrl->oncs);
1946 dev->ctrl.abort_limit = ctrl->acl + 1;
1947 dev->ctrl.vwc = ctrl->vwc;
1948 memcpy(dev->ctrl.serial, ctrl->sn, sizeof(ctrl->sn));
1949 memcpy(dev->ctrl.model, ctrl->mn, sizeof(ctrl->mn));
1950 memcpy(dev->ctrl.firmware_rev, ctrl->fr, sizeof(ctrl->fr));
1951 if (ctrl->mdts)
1952 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
1953 else
1954 dev->max_hw_sectors = UINT_MAX;
1955
1956 if ((dev->ctrl.quirks & NVME_QUIRK_STRIPE_SIZE) && ctrl->vs[3]) {
1957 unsigned int max_hw_sectors;
1958 1934
1959 dev->stripe_size = 1 << (ctrl->vs[3] + shift); 1935 res = nvme_init_identify(&dev->ctrl);
1960 max_hw_sectors = dev->stripe_size >> (shift - 9); 1936 if (res)
1961 if (dev->max_hw_sectors) { 1937 return res;
1962 dev->max_hw_sectors = min(max_hw_sectors,
1963 dev->max_hw_sectors);
1964 } else
1965 dev->max_hw_sectors = max_hw_sectors;
1966 }
1967 kfree(ctrl);
1968 1938
1969 if (!dev->tagset.tags) { 1939 if (!dev->tagset.tags) {
1970 dev->tagset.ops = &nvme_mq_ops; 1940 dev->tagset.ops = &nvme_mq_ops;
@@ -2597,9 +2567,16 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2597 return 0; 2567 return 0;
2598} 2568}
2599 2569
2570static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
2571{
2572 *val = readq(to_nvme_dev(ctrl)->bar + off);
2573 return 0;
2574}
2575
2600static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 2576static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2601 .reg_read32 = nvme_pci_reg_read32, 2577 .reg_read32 = nvme_pci_reg_read32,
2602 .reg_write32 = nvme_pci_reg_write32, 2578 .reg_write32 = nvme_pci_reg_write32,
2579 .reg_read64 = nvme_pci_reg_read64,
2603 .free_ctrl = nvme_pci_free_ctrl, 2580 .free_ctrl = nvme_pci_free_ctrl,
2604}; 2581};
2605 2582