aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-11-20 02:58:10 -0500
committerJens Axboe <axboe@fb.com>2015-12-01 12:59:38 -0500
commit7a67cbea653e444d04d7e850ab9631a14a196422 (patch)
treec0b7278210ef57355daf1090cc0a51225160bedc
parent21d34711e1b5970acfb22bddf1fefbfbd7e0123b (diff)
nvme: use offset instead of a struct for registers
This makes life easier for future non-PCI drivers where access to the registers might be more complicated. Note that Linux drivers are pretty evenly split between the two versions, and in fact the NVMe driver already uses offsets for the doorbells. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com> [Fixed CMBSZ offset] Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c60
-rw-r--r--drivers/nvme/host/scsi.c6
-rw-r--r--include/linux/nvme.h27
4 files changed, 49 insertions, 46 deletions
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a53977cc9fc2..66550b76b05c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -51,7 +51,7 @@ struct nvme_dev {
51 u32 db_stride; 51 u32 db_stride;
52 u32 ctrl_config; 52 u32 ctrl_config;
53 struct msix_entry *entry; 53 struct msix_entry *entry;
54 struct nvme_bar __iomem *bar; 54 void __iomem *bar;
55 struct list_head namespaces; 55 struct list_head namespaces;
56 struct kref kref; 56 struct kref kref;
57 struct device *device; 57 struct device *device;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 996356261c6b..bfea7ec22b98 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1322,7 +1322,7 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1322 1322
1323 /* Don't tell the adapter to delete the admin queue. 1323 /* Don't tell the adapter to delete the admin queue.
1324 * Don't tell a removed adapter to delete IO queues. */ 1324 * Don't tell a removed adapter to delete IO queues. */
1325 if (qid && readl(&dev->bar->csts) != -1) { 1325 if (qid && readl(dev->bar + NVME_REG_CSTS) != -1) {
1326 adapter_delete_sq(dev, qid); 1326 adapter_delete_sq(dev, qid);
1327 adapter_delete_cq(dev, qid); 1327 adapter_delete_cq(dev, qid);
1328 } 1328 }
@@ -1475,7 +1475,7 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
1475 1475
1476 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1476 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1477 1477
1478 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) { 1478 while ((readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_RDY) != bit) {
1479 msleep(100); 1479 msleep(100);
1480 if (fatal_signal_pending(current)) 1480 if (fatal_signal_pending(current))
1481 return -EINTR; 1481 return -EINTR;
@@ -1500,7 +1500,7 @@ static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
1500{ 1500{
1501 dev->ctrl_config &= ~NVME_CC_SHN_MASK; 1501 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
1502 dev->ctrl_config &= ~NVME_CC_ENABLE; 1502 dev->ctrl_config &= ~NVME_CC_ENABLE;
1503 writel(dev->ctrl_config, &dev->bar->cc); 1503 writel(dev->ctrl_config, dev->bar + NVME_REG_CC);
1504 1504
1505 return nvme_wait_ready(dev, cap, false); 1505 return nvme_wait_ready(dev, cap, false);
1506} 1506}
@@ -1509,7 +1509,7 @@ static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
1509{ 1509{
1510 dev->ctrl_config &= ~NVME_CC_SHN_MASK; 1510 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
1511 dev->ctrl_config |= NVME_CC_ENABLE; 1511 dev->ctrl_config |= NVME_CC_ENABLE;
1512 writel(dev->ctrl_config, &dev->bar->cc); 1512 writel(dev->ctrl_config, dev->bar + NVME_REG_CC);
1513 1513
1514 return nvme_wait_ready(dev, cap, true); 1514 return nvme_wait_ready(dev, cap, true);
1515} 1515}
@@ -1521,10 +1521,10 @@ static int nvme_shutdown_ctrl(struct nvme_dev *dev)
1521 dev->ctrl_config &= ~NVME_CC_SHN_MASK; 1521 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
1522 dev->ctrl_config |= NVME_CC_SHN_NORMAL; 1522 dev->ctrl_config |= NVME_CC_SHN_NORMAL;
1523 1523
1524 writel(dev->ctrl_config, &dev->bar->cc); 1524 writel(dev->ctrl_config, dev->bar + NVME_REG_CC);
1525 1525
1526 timeout = SHUTDOWN_TIMEOUT + jiffies; 1526 timeout = SHUTDOWN_TIMEOUT + jiffies;
1527 while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) != 1527 while ((readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_SHST_MASK) !=
1528 NVME_CSTS_SHST_CMPLT) { 1528 NVME_CSTS_SHST_CMPLT) {
1529 msleep(100); 1529 msleep(100);
1530 if (fatal_signal_pending(current)) 1530 if (fatal_signal_pending(current))
@@ -1600,7 +1600,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1600{ 1600{
1601 int result; 1601 int result;
1602 u32 aqa; 1602 u32 aqa;
1603 u64 cap = lo_hi_readq(&dev->bar->cap); 1603 u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
1604 struct nvme_queue *nvmeq; 1604 struct nvme_queue *nvmeq;
1605 /* 1605 /*
1606 * default to a 4K page size, with the intention to update this 1606 * default to a 4K page size, with the intention to update this
@@ -1618,11 +1618,12 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1618 return -ENODEV; 1618 return -ENODEV;
1619 } 1619 }
1620 1620
1621 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? 1621 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
1622 NVME_CAP_NSSRC(cap) : 0; 1622 NVME_CAP_NSSRC(cap) : 0;
1623 1623
1624 if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO)) 1624 if (dev->subsystem &&
1625 writel(NVME_CSTS_NSSRO, &dev->bar->csts); 1625 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
1626 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1626 1627
1627 result = nvme_disable_ctrl(dev, cap); 1628 result = nvme_disable_ctrl(dev, cap);
1628 if (result < 0) 1629 if (result < 0)
@@ -1645,9 +1646,9 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1645 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1646 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
1646 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1647 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
1647 1648
1648 writel(aqa, &dev->bar->aqa); 1649 writel(aqa, dev->bar + NVME_REG_AQA);
1649 lo_hi_writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 1650 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
1650 lo_hi_writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 1651 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
1651 1652
1652 result = nvme_enable_ctrl(dev, cap); 1653 result = nvme_enable_ctrl(dev, cap);
1653 if (result) 1654 if (result)
@@ -1789,7 +1790,7 @@ static int nvme_subsys_reset(struct nvme_dev *dev)
1789 if (!dev->subsystem) 1790 if (!dev->subsystem)
1790 return -ENOTTY; 1791 return -ENOTTY;
1791 1792
1792 writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */ 1793 writel(0x4E564D65, dev->bar + NVME_REG_NSSR); /* "NVMe" */
1793 return 0; 1794 return 0;
1794} 1795}
1795 1796
@@ -2076,14 +2077,14 @@ static int nvme_kthread(void *data)
2076 spin_lock(&dev_list_lock); 2077 spin_lock(&dev_list_lock);
2077 list_for_each_entry_safe(dev, next, &dev_list, node) { 2078 list_for_each_entry_safe(dev, next, &dev_list, node) {
2078 int i; 2079 int i;
2079 u32 csts = readl(&dev->bar->csts); 2080 u32 csts = readl(dev->bar + NVME_REG_CSTS);
2080 2081
2081 if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) || 2082 if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
2082 csts & NVME_CSTS_CFS) { 2083 csts & NVME_CSTS_CFS) {
2083 if (!__nvme_reset(dev)) { 2084 if (!__nvme_reset(dev)) {
2084 dev_warn(dev->dev, 2085 dev_warn(dev->dev,
2085 "Failed status: %x, reset controller\n", 2086 "Failed status: %x, reset controller\n",
2086 readl(&dev->bar->csts)); 2087 readl(dev->bar + NVME_REG_CSTS));
2087 } 2088 }
2088 continue; 2089 continue;
2089 } 2090 }
@@ -2243,11 +2244,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
2243 if (!use_cmb_sqes) 2244 if (!use_cmb_sqes)
2244 return NULL; 2245 return NULL;
2245 2246
2246 dev->cmbsz = readl(&dev->bar->cmbsz); 2247 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
2247 if (!(NVME_CMB_SZ(dev->cmbsz))) 2248 if (!(NVME_CMB_SZ(dev->cmbsz)))
2248 return NULL; 2249 return NULL;
2249 2250
2250 cmbloc = readl(&dev->bar->cmbloc); 2251 cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
2251 2252
2252 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 2253 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
2253 size = szu * NVME_CMB_SZ(dev->cmbsz); 2254 size = szu * NVME_CMB_SZ(dev->cmbsz);
@@ -2321,7 +2322,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2321 return -ENOMEM; 2322 return -ENOMEM;
2322 size = db_bar_size(dev, nr_io_queues); 2323 size = db_bar_size(dev, nr_io_queues);
2323 } while (1); 2324 } while (1);
2324 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2325 dev->dbs = dev->bar + 4096;
2325 adminq->q_db = dev->dbs; 2326 adminq->q_db = dev->dbs;
2326 } 2327 }
2327 2328
@@ -2397,8 +2398,9 @@ static struct nvme_ns *nvme_find_ns(struct nvme_dev *dev, unsigned nsid)
2397 2398
2398static inline bool nvme_io_incapable(struct nvme_dev *dev) 2399static inline bool nvme_io_incapable(struct nvme_dev *dev)
2399{ 2400{
2400 return (!dev->bar || readl(&dev->bar->csts) & NVME_CSTS_CFS || 2401 return (!dev->bar ||
2401 dev->online_queues < 2); 2402 readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_CFS ||
2403 dev->online_queues < 2);
2402} 2404}
2403 2405
2404static void nvme_ns_remove(struct nvme_ns *ns) 2406static void nvme_ns_remove(struct nvme_ns *ns)
@@ -2478,7 +2480,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2478 struct pci_dev *pdev = to_pci_dev(dev->dev); 2480 struct pci_dev *pdev = to_pci_dev(dev->dev);
2479 int res; 2481 int res;
2480 struct nvme_id_ctrl *ctrl; 2482 struct nvme_id_ctrl *ctrl;
2481 int shift = NVME_CAP_MPSMIN(lo_hi_readq(&dev->bar->cap)) + 12; 2483 int shift = NVME_CAP_MPSMIN(lo_hi_readq(dev->bar + NVME_REG_CAP)) + 12;
2482 2484
2483 res = nvme_identify_ctrl(dev, &ctrl); 2485 res = nvme_identify_ctrl(dev, &ctrl);
2484 if (res) { 2486 if (res) {
@@ -2554,7 +2556,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
2554 if (!dev->bar) 2556 if (!dev->bar)
2555 goto disable; 2557 goto disable;
2556 2558
2557 if (readl(&dev->bar->csts) == -1) { 2559 if (readl(dev->bar + NVME_REG_CSTS) == -1) {
2558 result = -ENODEV; 2560 result = -ENODEV;
2559 goto unmap; 2561 goto unmap;
2560 } 2562 }
@@ -2569,11 +2571,12 @@ static int nvme_dev_map(struct nvme_dev *dev)
2569 goto unmap; 2571 goto unmap;
2570 } 2572 }
2571 2573
2572 cap = lo_hi_readq(&dev->bar->cap); 2574 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2575
2573 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 2576 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
2574 dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 2577 dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2575 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2578 dev->dbs = dev->bar + 4096;
2576 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) 2579 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
2577 dev->cmb = nvme_map_cmb(dev); 2580 dev->cmb = nvme_map_cmb(dev);
2578 2581
2579 return 0; 2582 return 0;
@@ -2632,7 +2635,8 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
2632 * queues than admin tags. 2635 * queues than admin tags.
2633 */ 2636 */
2634 set_current_state(TASK_RUNNING); 2637 set_current_state(TASK_RUNNING);
2635 nvme_disable_ctrl(dev, lo_hi_readq(&dev->bar->cap)); 2638 nvme_disable_ctrl(dev,
2639 lo_hi_readq(dev->bar + NVME_REG_CAP));
2636 nvme_clear_queue(dev->queues[0]); 2640 nvme_clear_queue(dev->queues[0]);
2637 flush_kthread_worker(dq->worker); 2641 flush_kthread_worker(dq->worker);
2638 nvme_disable_queue(dev, 0); 2642 nvme_disable_queue(dev, 0);
@@ -2808,7 +2812,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2808 2812
2809 if (dev->bar) { 2813 if (dev->bar) {
2810 nvme_freeze_queues(dev); 2814 nvme_freeze_queues(dev);
2811 csts = readl(&dev->bar->csts); 2815 csts = readl(dev->bar + NVME_REG_CSTS);
2812 } 2816 }
2813 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 2817 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
2814 for (i = dev->queue_count - 1; i >= 0; i--) { 2818 for (i = dev->queue_count - 1; i >= 0; i--) {
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index c3d8d3887a31..85869946d226 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -611,7 +611,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
611 611
612 memset(inq_response, 0, alloc_len); 612 memset(inq_response, 0, alloc_len);
613 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ 613 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
614 if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) { 614 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1)) {
615 struct nvme_id_ns *id_ns; 615 struct nvme_id_ns *id_ns;
616 void *eui; 616 void *eui;
617 int len; 617 int len;
@@ -623,7 +623,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
623 623
624 eui = id_ns->eui64; 624 eui = id_ns->eui64;
625 len = sizeof(id_ns->eui64); 625 len = sizeof(id_ns->eui64);
626 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) { 626 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) {
627 if (bitmap_empty(eui, len * 8)) { 627 if (bitmap_empty(eui, len * 8)) {
628 eui = id_ns->nguid; 628 eui = id_ns->nguid;
629 len = sizeof(id_ns->nguid); 629 len = sizeof(id_ns->nguid);
@@ -2297,7 +2297,7 @@ static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2297{ 2297{
2298 struct nvme_dev *dev = ns->dev; 2298 struct nvme_dev *dev = ns->dev;
2299 2299
2300 if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) 2300 if (!(readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_RDY))
2301 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2301 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2302 NOT_READY, SCSI_ASC_LUN_NOT_READY, 2302 NOT_READY, SCSI_ASC_LUN_NOT_READY,
2303 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2303 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 3af5f454c04a..a55986f6fe38 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -17,20 +17,19 @@
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19 19
20struct nvme_bar { 20enum {
21 __u64 cap; /* Controller Capabilities */ 21 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
22 __u32 vs; /* Version */ 22 NVME_REG_VS = 0x0008, /* Version */
23 __u32 intms; /* Interrupt Mask Set */ 23 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
24 __u32 intmc; /* Interrupt Mask Clear */ 24 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Set */
25 __u32 cc; /* Controller Configuration */ 25 NVME_REG_CC = 0x0014, /* Controller Configuration */
26 __u32 rsvd1; /* Reserved */ 26 NVME_REG_CSTS = 0x001c, /* Controller Status */
27 __u32 csts; /* Controller Status */ 27 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
28 __u32 nssr; /* Subsystem Reset */ 28 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
29 __u32 aqa; /* Admin Queue Attributes */ 29 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
30 __u64 asq; /* Admin SQ Base Address */ 30 NVME_REG_ACQ = 0x0030, /* Admin SQ Base Address */
31 __u64 acq; /* Admin CQ Base Address */ 31 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
32 __u32 cmbloc; /* Controller Memory Buffer Location */ 32 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
33 __u32 cmbsz; /* Controller Memory Buffer Size */
34}; 33};
35 34
36#define NVME_CAP_MQES(cap) ((cap) & 0xffff) 35#define NVME_CAP_MQES(cap) ((cap) & 0xffff)