aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-11-26 04:06:56 -0500
committerJens Axboe <axboe@fb.com>2015-12-01 12:59:38 -0500
commit1c63dc66580d4bbb6d2b75bf184b5aa105ba5bdb (patch)
tree84fac8a49904df3f5f4997fc80803bcd1d4bece5
parent01fec28a6f3ba96d4f46a538eae089dd92189fd1 (diff)
nvme: split a new struct nvme_ctrl out of struct nvme_dev
The new struct nvme_ctrl will be used by the common NVMe code that sits on top of struct request_queue and the new nvme_ctrl_ops abstraction. It only contains the bare minimum required, which consists of values sampled during controller probe, the admin queue pointer and a second struct device pointer at the moment, but more will follow later. Only values that are not used in the I/O fast path should be moved to struct nvme_ctrl so that drivers can optimize their cache line usage easily. That's also the reason why we have two device pointers as the struct device is used for DMA mapping purposes. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/nvme/host/core.c10
-rw-r--r--drivers/nvme/host/nvme.h61
-rw-r--r--drivers/nvme/host/pci.c190
-rw-r--r--drivers/nvme/host/scsi.c89
4 files changed, 193 insertions, 157 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ce938a428928..ca54a34665ac 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -79,7 +79,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
79 return __nvme_submit_sync_cmd(q, cmd, buffer, NULL, bufflen, NULL, 0); 79 return __nvme_submit_sync_cmd(q, cmd, buffer, NULL, bufflen, NULL, 0);
80} 80}
81 81
82int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id) 82int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
83{ 83{
84 struct nvme_command c = { }; 84 struct nvme_command c = { };
85 int error; 85 int error;
@@ -99,7 +99,7 @@ int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id)
99 return error; 99 return error;
100} 100}
101 101
102int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, 102int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
103 struct nvme_id_ns **id) 103 struct nvme_id_ns **id)
104{ 104{
105 struct nvme_command c = { }; 105 struct nvme_command c = { };
@@ -120,7 +120,7 @@ int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
120 return error; 120 return error;
121} 121}
122 122
123int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 123int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
124 dma_addr_t dma_addr, u32 *result) 124 dma_addr_t dma_addr, u32 *result)
125{ 125{
126 struct nvme_command c; 126 struct nvme_command c;
@@ -135,7 +135,7 @@ int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
135 result, 0); 135 result, 0);
136} 136}
137 137
138int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, 138int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
139 dma_addr_t dma_addr, u32 *result) 139 dma_addr_t dma_addr, u32 *result)
140{ 140{
141 struct nvme_command c; 141 struct nvme_command c;
@@ -150,7 +150,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
150 result, 0); 150 result, 0);
151} 151}
152 152
153int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log) 153int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
154{ 154{
155 struct nvme_command c = { }; 155 struct nvme_command c = { };
156 int error; 156 int error;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 66550b76b05c..19583e1125e6 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -30,46 +30,16 @@ enum {
30 NVME_NS_LIGHTNVM = 1, 30 NVME_NS_LIGHTNVM = 1,
31}; 31};
32 32
33/* 33struct nvme_ctrl {
34 * Represents an NVM Express device. Each nvme_dev is a PCI function. 34 const struct nvme_ctrl_ops *ops;
35 */
36struct nvme_dev {
37 struct list_head node;
38 struct nvme_queue **queues;
39 struct request_queue *admin_q; 35 struct request_queue *admin_q;
40 struct blk_mq_tag_set tagset;
41 struct blk_mq_tag_set admin_tagset;
42 u32 __iomem *dbs;
43 struct device *dev; 36 struct device *dev;
44 struct dma_pool *prp_page_pool;
45 struct dma_pool *prp_small_pool;
46 int instance; 37 int instance;
47 unsigned queue_count; 38
48 unsigned online_queues;
49 unsigned max_qid;
50 int q_depth;
51 u32 db_stride;
52 u32 ctrl_config;
53 struct msix_entry *entry;
54 void __iomem *bar;
55 struct list_head namespaces;
56 struct kref kref;
57 struct device *device;
58 struct work_struct reset_work;
59 struct work_struct probe_work;
60 struct work_struct scan_work;
61 char name[12]; 39 char name[12];
62 char serial[20]; 40 char serial[20];
63 char model[40]; 41 char model[40];
64 char firmware_rev[8]; 42 char firmware_rev[8];
65 bool subsystem;
66 u32 max_hw_sectors;
67 u32 stripe_size;
68 u32 page_size;
69 void __iomem *cmb;
70 dma_addr_t cmb_dma_addr;
71 u64 cmb_size;
72 u32 cmbsz;
73 u16 oncs; 43 u16 oncs;
74 u16 abort_limit; 44 u16 abort_limit;
75 u8 event_limit; 45 u8 event_limit;
@@ -82,7 +52,7 @@ struct nvme_dev {
82struct nvme_ns { 52struct nvme_ns {
83 struct list_head list; 53 struct list_head list;
84 54
85 struct nvme_dev *dev; 55 struct nvme_ctrl *ctrl;
86 struct request_queue *queue; 56 struct request_queue *queue;
87 struct gendisk *disk; 57 struct gendisk *disk;
88 struct kref kref; 58 struct kref kref;
@@ -97,6 +67,19 @@ struct nvme_ns {
97 u32 mode_select_block_len; 67 u32 mode_select_block_len;
98}; 68};
99 69
70struct nvme_ctrl_ops {
71 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
72};
73
74static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
75{
76 u32 val = 0;
77
78 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
79 return false;
80 return val & NVME_CSTS_RDY;
81}
82
100static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) 83static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
101{ 84{
102 return (sector >> (ns->lba_shift - 9)); 85 return (sector >> (ns->lba_shift - 9));
@@ -107,13 +90,13 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
107int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 90int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
108 void *buffer, void __user *ubuffer, unsigned bufflen, 91 void *buffer, void __user *ubuffer, unsigned bufflen,
109 u32 *result, unsigned timeout); 92 u32 *result, unsigned timeout);
110int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id); 93int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
111int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, 94int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
112 struct nvme_id_ns **id); 95 struct nvme_id_ns **id);
113int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log); 96int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
114int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 97int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
115 dma_addr_t dma_addr, u32 *result); 98 dma_addr_t dma_addr, u32 *result);
116int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, 99int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
117 dma_addr_t dma_addr, u32 *result); 100 dma_addr_t dma_addr, u32 *result);
118 101
119struct sg_io_hdr; 102struct sg_io_hdr;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index bfea7ec22b98..8a564f4ecf99 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -87,6 +87,9 @@ static wait_queue_head_t nvme_kthread_wait;
87 87
88static struct class *nvme_class; 88static struct class *nvme_class;
89 89
90struct nvme_dev;
91struct nvme_queue;
92
90static int __nvme_reset(struct nvme_dev *dev); 93static int __nvme_reset(struct nvme_dev *dev);
91static int nvme_reset(struct nvme_dev *dev); 94static int nvme_reset(struct nvme_dev *dev);
92static void nvme_process_cq(struct nvme_queue *nvmeq); 95static void nvme_process_cq(struct nvme_queue *nvmeq);
@@ -102,6 +105,49 @@ struct async_cmd_info {
102}; 105};
103 106
104/* 107/*
108 * Represents an NVM Express device. Each nvme_dev is a PCI function.
109 */
110struct nvme_dev {
111 struct list_head node;
112 struct nvme_queue **queues;
113 struct blk_mq_tag_set tagset;
114 struct blk_mq_tag_set admin_tagset;
115 u32 __iomem *dbs;
116 struct device *dev;
117 struct dma_pool *prp_page_pool;
118 struct dma_pool *prp_small_pool;
119 unsigned queue_count;
120 unsigned online_queues;
121 unsigned max_qid;
122 int q_depth;
123 u32 db_stride;
124 u32 ctrl_config;
125 struct msix_entry *entry;
126 void __iomem *bar;
127 struct list_head namespaces;
128 struct kref kref;
129 struct device *device;
130 struct work_struct reset_work;
131 struct work_struct probe_work;
132 struct work_struct scan_work;
133 bool subsystem;
134 u32 max_hw_sectors;
135 u32 stripe_size;
136 u32 page_size;
137 void __iomem *cmb;
138 dma_addr_t cmb_dma_addr;
139 u64 cmb_size;
140 u32 cmbsz;
141
142 struct nvme_ctrl ctrl;
143};
144
145static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
146{
147 return container_of(ctrl, struct nvme_dev, ctrl);
148}
149
150/*
105 * An NVM Express queue. Each device has at least two (one for admin 151 * An NVM Express queue. Each device has at least two (one for admin
106 * commands and one for I/O commands). 152 * commands and one for I/O commands).
107 */ 153 */
@@ -333,7 +379,7 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
333 u16 status = le16_to_cpup(&cqe->status) >> 1; 379 u16 status = le16_to_cpup(&cqe->status) >> 1;
334 380
335 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) 381 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
336 ++nvmeq->dev->event_limit; 382 ++nvmeq->dev->ctrl.event_limit;
337 if (status != NVME_SC_SUCCESS) 383 if (status != NVME_SC_SUCCESS)
338 return; 384 return;
339 385
@@ -357,7 +403,7 @@ static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
357 blk_mq_free_request(req); 403 blk_mq_free_request(req);
358 404
359 dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result); 405 dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
360 ++nvmeq->dev->abort_limit; 406 ++nvmeq->dev->ctrl.abort_limit;
361} 407}
362 408
363static void async_completion(struct nvme_queue *nvmeq, void *ctx, 409static void async_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -1051,7 +1097,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
1051 struct nvme_cmd_info *cmd_info; 1097 struct nvme_cmd_info *cmd_info;
1052 struct request *req; 1098 struct request *req;
1053 1099
1054 req = blk_mq_alloc_request(dev->admin_q, WRITE, 1100 req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE,
1055 BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED); 1101 BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED);
1056 if (IS_ERR(req)) 1102 if (IS_ERR(req))
1057 return PTR_ERR(req); 1103 return PTR_ERR(req);
@@ -1077,7 +1123,7 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
1077 struct request *req; 1123 struct request *req;
1078 struct nvme_cmd_info *cmd_rq; 1124 struct nvme_cmd_info *cmd_rq;
1079 1125
1080 req = blk_mq_alloc_request(dev->admin_q, WRITE, 0); 1126 req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, 0);
1081 if (IS_ERR(req)) 1127 if (IS_ERR(req))
1082 return PTR_ERR(req); 1128 return PTR_ERR(req);
1083 1129
@@ -1101,7 +1147,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1101 c.delete_queue.opcode = opcode; 1147 c.delete_queue.opcode = opcode;
1102 c.delete_queue.qid = cpu_to_le16(id); 1148 c.delete_queue.qid = cpu_to_le16(id);
1103 1149
1104 return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0); 1150 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1105} 1151}
1106 1152
1107static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1153static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
@@ -1122,7 +1168,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1122 c.create_cq.cq_flags = cpu_to_le16(flags); 1168 c.create_cq.cq_flags = cpu_to_le16(flags);
1123 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 1169 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
1124 1170
1125 return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0); 1171 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1126} 1172}
1127 1173
1128static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 1174static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
@@ -1143,7 +1189,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1143 c.create_sq.sq_flags = cpu_to_le16(flags); 1189 c.create_sq.sq_flags = cpu_to_le16(flags);
1144 c.create_sq.cqid = cpu_to_le16(qid); 1190 c.create_sq.cqid = cpu_to_le16(qid);
1145 1191
1146 return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0); 1192 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1147} 1193}
1148 1194
1149static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 1195static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
@@ -1182,10 +1228,10 @@ static void nvme_abort_req(struct request *req)
1182 return; 1228 return;
1183 } 1229 }
1184 1230
1185 if (!dev->abort_limit) 1231 if (!dev->ctrl.abort_limit)
1186 return; 1232 return;
1187 1233
1188 abort_req = blk_mq_alloc_request(dev->admin_q, WRITE, 1234 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE,
1189 BLK_MQ_REQ_NOWAIT); 1235 BLK_MQ_REQ_NOWAIT);
1190 if (IS_ERR(abort_req)) 1236 if (IS_ERR(abort_req))
1191 return; 1237 return;
@@ -1199,7 +1245,7 @@ static void nvme_abort_req(struct request *req)
1199 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 1245 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1200 cmd.abort.command_id = abort_req->tag; 1246 cmd.abort.command_id = abort_req->tag;
1201 1247
1202 --dev->abort_limit; 1248 --dev->ctrl.abort_limit;
1203 cmd_rq->aborted = 1; 1249 cmd_rq->aborted = 1;
1204 1250
1205 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag, 1251 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
@@ -1294,8 +1340,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1294 nvmeq->cq_vector = -1; 1340 nvmeq->cq_vector = -1;
1295 spin_unlock_irq(&nvmeq->q_lock); 1341 spin_unlock_irq(&nvmeq->q_lock);
1296 1342
1297 if (!nvmeq->qid && nvmeq->dev->admin_q) 1343 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1298 blk_mq_freeze_queue_start(nvmeq->dev->admin_q); 1344 blk_mq_freeze_queue_start(nvmeq->dev->ctrl.admin_q);
1299 1345
1300 irq_set_affinity_hint(vector, NULL); 1346 irq_set_affinity_hint(vector, NULL);
1301 free_irq(vector, nvmeq); 1347 free_irq(vector, nvmeq);
@@ -1391,7 +1437,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1391 nvmeq->q_dmadev = dev->dev; 1437 nvmeq->q_dmadev = dev->dev;
1392 nvmeq->dev = dev; 1438 nvmeq->dev = dev;
1393 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", 1439 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1394 dev->instance, qid); 1440 dev->ctrl.instance, qid);
1395 spin_lock_init(&nvmeq->q_lock); 1441 spin_lock_init(&nvmeq->q_lock);
1396 nvmeq->cq_head = 0; 1442 nvmeq->cq_head = 0;
1397 nvmeq->cq_phase = 1; 1443 nvmeq->cq_phase = 1;
@@ -1559,15 +1605,15 @@ static struct blk_mq_ops nvme_mq_ops = {
1559 1605
1560static void nvme_dev_remove_admin(struct nvme_dev *dev) 1606static void nvme_dev_remove_admin(struct nvme_dev *dev)
1561{ 1607{
1562 if (dev->admin_q && !blk_queue_dying(dev->admin_q)) { 1608 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1563 blk_cleanup_queue(dev->admin_q); 1609 blk_cleanup_queue(dev->ctrl.admin_q);
1564 blk_mq_free_tag_set(&dev->admin_tagset); 1610 blk_mq_free_tag_set(&dev->admin_tagset);
1565 } 1611 }
1566} 1612}
1567 1613
1568static int nvme_alloc_admin_tags(struct nvme_dev *dev) 1614static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1569{ 1615{
1570 if (!dev->admin_q) { 1616 if (!dev->ctrl.admin_q) {
1571 dev->admin_tagset.ops = &nvme_mq_admin_ops; 1617 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1572 dev->admin_tagset.nr_hw_queues = 1; 1618 dev->admin_tagset.nr_hw_queues = 1;
1573 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1; 1619 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
@@ -1580,18 +1626,18 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1580 if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 1626 if (blk_mq_alloc_tag_set(&dev->admin_tagset))
1581 return -ENOMEM; 1627 return -ENOMEM;
1582 1628
1583 dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); 1629 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
1584 if (IS_ERR(dev->admin_q)) { 1630 if (IS_ERR(dev->ctrl.admin_q)) {
1585 blk_mq_free_tag_set(&dev->admin_tagset); 1631 blk_mq_free_tag_set(&dev->admin_tagset);
1586 return -ENOMEM; 1632 return -ENOMEM;
1587 } 1633 }
1588 if (!blk_get_queue(dev->admin_q)) { 1634 if (!blk_get_queue(dev->ctrl.admin_q)) {
1589 nvme_dev_remove_admin(dev); 1635 nvme_dev_remove_admin(dev);
1590 dev->admin_q = NULL; 1636 dev->ctrl.admin_q = NULL;
1591 return -ENODEV; 1637 return -ENODEV;
1592 } 1638 }
1593 } else 1639 } else
1594 blk_mq_unfreeze_queue(dev->admin_q); 1640 blk_mq_unfreeze_queue(dev->ctrl.admin_q);
1595 1641
1596 return 0; 1642 return 0;
1597} 1643}
@@ -1670,7 +1716,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1670 1716
1671static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1717static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1672{ 1718{
1673 struct nvme_dev *dev = ns->dev; 1719 struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
1674 struct nvme_user_io io; 1720 struct nvme_user_io io;
1675 struct nvme_command c; 1721 struct nvme_command c;
1676 unsigned length, meta_len; 1722 unsigned length, meta_len;
@@ -1745,7 +1791,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1745 return status; 1791 return status;
1746} 1792}
1747 1793
1748static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, 1794static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1749 struct nvme_passthru_cmd __user *ucmd) 1795 struct nvme_passthru_cmd __user *ucmd)
1750{ 1796{
1751 struct nvme_passthru_cmd cmd; 1797 struct nvme_passthru_cmd cmd;
@@ -1774,7 +1820,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
1774 if (cmd.timeout_ms) 1820 if (cmd.timeout_ms)
1775 timeout = msecs_to_jiffies(cmd.timeout_ms); 1821 timeout = msecs_to_jiffies(cmd.timeout_ms);
1776 1822
1777 status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, 1823 status = __nvme_submit_sync_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1778 NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1824 NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1779 &cmd.result, timeout); 1825 &cmd.result, timeout);
1780 if (status >= 0) { 1826 if (status >= 0) {
@@ -1804,9 +1850,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1804 force_successful_syscall_return(); 1850 force_successful_syscall_return();
1805 return ns->ns_id; 1851 return ns->ns_id;
1806 case NVME_IOCTL_ADMIN_CMD: 1852 case NVME_IOCTL_ADMIN_CMD:
1807 return nvme_user_cmd(ns->dev, NULL, (void __user *)arg); 1853 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
1808 case NVME_IOCTL_IO_CMD: 1854 case NVME_IOCTL_IO_CMD:
1809 return nvme_user_cmd(ns->dev, ns, (void __user *)arg); 1855 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
1810 case NVME_IOCTL_SUBMIT_IO: 1856 case NVME_IOCTL_SUBMIT_IO:
1811 return nvme_submit_io(ns, (void __user *)arg); 1857 return nvme_submit_io(ns, (void __user *)arg);
1812 case SG_GET_VERSION_NUM: 1858 case SG_GET_VERSION_NUM:
@@ -1836,6 +1882,7 @@ static void nvme_free_dev(struct kref *kref);
1836static void nvme_free_ns(struct kref *kref) 1882static void nvme_free_ns(struct kref *kref)
1837{ 1883{
1838 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 1884 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
1885 struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
1839 1886
1840 if (ns->type == NVME_NS_LIGHTNVM) 1887 if (ns->type == NVME_NS_LIGHTNVM)
1841 nvme_nvm_unregister(ns->queue, ns->disk->disk_name); 1888 nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
@@ -1844,7 +1891,7 @@ static void nvme_free_ns(struct kref *kref)
1844 ns->disk->private_data = NULL; 1891 ns->disk->private_data = NULL;
1845 spin_unlock(&dev_list_lock); 1892 spin_unlock(&dev_list_lock);
1846 1893
1847 kref_put(&ns->dev->kref, nvme_free_dev); 1894 kref_put(&dev->kref, nvme_free_dev);
1848 put_disk(ns->disk); 1895 put_disk(ns->disk);
1849 kfree(ns); 1896 kfree(ns);
1850} 1897}
@@ -1893,15 +1940,15 @@ static void nvme_config_discard(struct nvme_ns *ns)
1893static int nvme_revalidate_disk(struct gendisk *disk) 1940static int nvme_revalidate_disk(struct gendisk *disk)
1894{ 1941{
1895 struct nvme_ns *ns = disk->private_data; 1942 struct nvme_ns *ns = disk->private_data;
1896 struct nvme_dev *dev = ns->dev; 1943 struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
1897 struct nvme_id_ns *id; 1944 struct nvme_id_ns *id;
1898 u8 lbaf, pi_type; 1945 u8 lbaf, pi_type;
1899 u16 old_ms; 1946 u16 old_ms;
1900 unsigned short bs; 1947 unsigned short bs;
1901 1948
1902 if (nvme_identify_ns(dev, ns->ns_id, &id)) { 1949 if (nvme_identify_ns(&dev->ctrl, ns->ns_id, &id)) {
1903 dev_warn(dev->dev, "%s: Identify failure nvme%dn%d\n", __func__, 1950 dev_warn(dev->dev, "%s: Identify failure nvme%dn%d\n", __func__,
1904 dev->instance, ns->ns_id); 1951 dev->ctrl.instance, ns->ns_id);
1905 return -ENODEV; 1952 return -ENODEV;
1906 } 1953 }
1907 if (id->ncap == 0) { 1954 if (id->ncap == 0) {
@@ -1957,7 +2004,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
1957 else 2004 else
1958 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 2005 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1959 2006
1960 if (dev->oncs & NVME_CTRL_ONCS_DSM) 2007 if (dev->ctrl.oncs & NVME_CTRL_ONCS_DSM)
1961 nvme_config_discard(ns); 2008 nvme_config_discard(ns);
1962 blk_mq_unfreeze_queue(disk->queue); 2009 blk_mq_unfreeze_queue(disk->queue);
1963 2010
@@ -2095,10 +2142,10 @@ static int nvme_kthread(void *data)
2095 spin_lock_irq(&nvmeq->q_lock); 2142 spin_lock_irq(&nvmeq->q_lock);
2096 nvme_process_cq(nvmeq); 2143 nvme_process_cq(nvmeq);
2097 2144
2098 while ((i == 0) && (dev->event_limit > 0)) { 2145 while (i == 0 && dev->ctrl.event_limit > 0) {
2099 if (nvme_submit_async_admin_req(dev)) 2146 if (nvme_submit_async_admin_req(dev))
2100 break; 2147 break;
2101 dev->event_limit--; 2148 dev->ctrl.event_limit--;
2102 } 2149 }
2103 spin_unlock_irq(&nvmeq->q_lock); 2150 spin_unlock_irq(&nvmeq->q_lock);
2104 } 2151 }
@@ -2124,7 +2171,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2124 goto out_free_ns; 2171 goto out_free_ns;
2125 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 2172 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
2126 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 2173 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
2127 ns->dev = dev; 2174 ns->ctrl = &dev->ctrl;
2128 ns->queue->queuedata = ns; 2175 ns->queue->queuedata = ns;
2129 2176
2130 disk = alloc_disk_node(0, node); 2177 disk = alloc_disk_node(0, node);
@@ -2145,7 +2192,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2145 } 2192 }
2146 if (dev->stripe_size) 2193 if (dev->stripe_size)
2147 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); 2194 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
2148 if (dev->vwc & NVME_CTRL_VWC_PRESENT) 2195 if (dev->ctrl.vwc & NVME_CTRL_VWC_PRESENT)
2149 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA); 2196 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
2150 blk_queue_virt_boundary(ns->queue, dev->page_size - 1); 2197 blk_queue_virt_boundary(ns->queue, dev->page_size - 1);
2151 2198
@@ -2156,7 +2203,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2156 disk->queue = ns->queue; 2203 disk->queue = ns->queue;
2157 disk->driverfs_dev = dev->device; 2204 disk->driverfs_dev = dev->device;
2158 disk->flags = GENHD_FL_EXT_DEVT; 2205 disk->flags = GENHD_FL_EXT_DEVT;
2159 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 2206 sprintf(disk->disk_name, "nvme%dn%d", dev->ctrl.instance, nsid);
2160 2207
2161 /* 2208 /*
2162 * Initialize capacity to 0 until we establish the namespace format and 2209 * Initialize capacity to 0 until we establish the namespace format and
@@ -2221,7 +2268,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
2221 u32 result; 2268 u32 result;
2222 u32 q_count = (count - 1) | ((count - 1) << 16); 2269 u32 q_count = (count - 1) | ((count - 1) << 16);
2223 2270
2224 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 2271 status = nvme_set_features(&dev->ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
2225 &result); 2272 &result);
2226 if (status < 0) 2273 if (status < 0)
2227 return status; 2274 return status;
@@ -2405,7 +2452,8 @@ static inline bool nvme_io_incapable(struct nvme_dev *dev)
2405 2452
2406static void nvme_ns_remove(struct nvme_ns *ns) 2453static void nvme_ns_remove(struct nvme_ns *ns)
2407{ 2454{
2408 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); 2455 bool kill = nvme_io_incapable(to_nvme_dev(ns->ctrl)) &&
2456 !blk_queue_dying(ns->queue);
2409 2457
2410 if (kill) 2458 if (kill)
2411 blk_set_queue_dying(ns->queue); 2459 blk_set_queue_dying(ns->queue);
@@ -2462,7 +2510,7 @@ static void nvme_dev_scan(struct work_struct *work)
2462 2510
2463 if (!dev->tagset.tags) 2511 if (!dev->tagset.tags)
2464 return; 2512 return;
2465 if (nvme_identify_ctrl(dev, &ctrl)) 2513 if (nvme_identify_ctrl(&dev->ctrl, &ctrl))
2466 return; 2514 return;
2467 nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); 2515 nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
2468 kfree(ctrl); 2516 kfree(ctrl);
@@ -2482,18 +2530,18 @@ static int nvme_dev_add(struct nvme_dev *dev)
2482 struct nvme_id_ctrl *ctrl; 2530 struct nvme_id_ctrl *ctrl;
2483 int shift = NVME_CAP_MPSMIN(lo_hi_readq(dev->bar + NVME_REG_CAP)) + 12; 2531 int shift = NVME_CAP_MPSMIN(lo_hi_readq(dev->bar + NVME_REG_CAP)) + 12;
2484 2532
2485 res = nvme_identify_ctrl(dev, &ctrl); 2533 res = nvme_identify_ctrl(&dev->ctrl, &ctrl);
2486 if (res) { 2534 if (res) {
2487 dev_err(dev->dev, "Identify Controller failed (%d)\n", res); 2535 dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
2488 return -EIO; 2536 return -EIO;
2489 } 2537 }
2490 2538
2491 dev->oncs = le16_to_cpup(&ctrl->oncs); 2539 dev->ctrl.oncs = le16_to_cpup(&ctrl->oncs);
2492 dev->abort_limit = ctrl->acl + 1; 2540 dev->ctrl.abort_limit = ctrl->acl + 1;
2493 dev->vwc = ctrl->vwc; 2541 dev->ctrl.vwc = ctrl->vwc;
2494 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 2542 memcpy(dev->ctrl.serial, ctrl->sn, sizeof(ctrl->sn));
2495 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 2543 memcpy(dev->ctrl.model, ctrl->mn, sizeof(ctrl->mn));
2496 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 2544 memcpy(dev->ctrl.firmware_rev, ctrl->fr, sizeof(ctrl->fr));
2497 if (ctrl->mdts) 2545 if (ctrl->mdts)
2498 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); 2546 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
2499 else 2547 else
@@ -2728,7 +2776,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
2728 DEFINE_KTHREAD_WORKER_ONSTACK(worker); 2776 DEFINE_KTHREAD_WORKER_ONSTACK(worker);
2729 struct nvme_delq_ctx dq; 2777 struct nvme_delq_ctx dq;
2730 struct task_struct *kworker_task = kthread_run(kthread_worker_fn, 2778 struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
2731 &worker, "nvme%d", dev->instance); 2779 &worker, "nvme%d", dev->ctrl.instance);
2732 2780
2733 if (IS_ERR(kworker_task)) { 2781 if (IS_ERR(kworker_task)) {
2734 dev_err(dev->dev, 2782 dev_err(dev->dev,
@@ -2879,14 +2927,14 @@ static int nvme_set_instance(struct nvme_dev *dev)
2879 if (error) 2927 if (error)
2880 return -ENODEV; 2928 return -ENODEV;
2881 2929
2882 dev->instance = instance; 2930 dev->ctrl.instance = instance;
2883 return 0; 2931 return 0;
2884} 2932}
2885 2933
2886static void nvme_release_instance(struct nvme_dev *dev) 2934static void nvme_release_instance(struct nvme_dev *dev)
2887{ 2935{
2888 spin_lock(&dev_list_lock); 2936 spin_lock(&dev_list_lock);
2889 ida_remove(&nvme_instance_ida, dev->instance); 2937 ida_remove(&nvme_instance_ida, dev->ctrl.instance);
2890 spin_unlock(&dev_list_lock); 2938 spin_unlock(&dev_list_lock);
2891} 2939}
2892 2940
@@ -2899,8 +2947,8 @@ static void nvme_free_dev(struct kref *kref)
2899 nvme_release_instance(dev); 2947 nvme_release_instance(dev);
2900 if (dev->tagset.tags) 2948 if (dev->tagset.tags)
2901 blk_mq_free_tag_set(&dev->tagset); 2949 blk_mq_free_tag_set(&dev->tagset);
2902 if (dev->admin_q) 2950 if (dev->ctrl.admin_q)
2903 blk_put_queue(dev->admin_q); 2951 blk_put_queue(dev->ctrl.admin_q);
2904 kfree(dev->queues); 2952 kfree(dev->queues);
2905 kfree(dev->entry); 2953 kfree(dev->entry);
2906 kfree(dev); 2954 kfree(dev);
@@ -2914,8 +2962,8 @@ static int nvme_dev_open(struct inode *inode, struct file *f)
2914 2962
2915 spin_lock(&dev_list_lock); 2963 spin_lock(&dev_list_lock);
2916 list_for_each_entry(dev, &dev_list, node) { 2964 list_for_each_entry(dev, &dev_list, node) {
2917 if (dev->instance == instance) { 2965 if (dev->ctrl.instance == instance) {
2918 if (!dev->admin_q) { 2966 if (!dev->ctrl.admin_q) {
2919 ret = -EWOULDBLOCK; 2967 ret = -EWOULDBLOCK;
2920 break; 2968 break;
2921 } 2969 }
@@ -2945,12 +2993,12 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2945 2993
2946 switch (cmd) { 2994 switch (cmd) {
2947 case NVME_IOCTL_ADMIN_CMD: 2995 case NVME_IOCTL_ADMIN_CMD:
2948 return nvme_user_cmd(dev, NULL, (void __user *)arg); 2996 return nvme_user_cmd(&dev->ctrl, NULL, (void __user *)arg);
2949 case NVME_IOCTL_IO_CMD: 2997 case NVME_IOCTL_IO_CMD:
2950 if (list_empty(&dev->namespaces)) 2998 if (list_empty(&dev->namespaces))
2951 return -ENOTTY; 2999 return -ENOTTY;
2952 ns = list_first_entry(&dev->namespaces, struct nvme_ns, list); 3000 ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
2953 return nvme_user_cmd(dev, ns, (void __user *)arg); 3001 return nvme_user_cmd(&dev->ctrl, ns, (void __user *)arg);
2954 case NVME_IOCTL_RESET: 3002 case NVME_IOCTL_RESET:
2955 dev_warn(dev->dev, "resetting controller\n"); 3003 dev_warn(dev->dev, "resetting controller\n");
2956 return nvme_reset(dev); 3004 return nvme_reset(dev);
@@ -3011,7 +3059,7 @@ static void nvme_probe_work(struct work_struct *work)
3011 if (result) 3059 if (result)
3012 goto free_tags; 3060 goto free_tags;
3013 3061
3014 dev->event_limit = 1; 3062 dev->ctrl.event_limit = 1;
3015 3063
3016 /* 3064 /*
3017 * Keep the controller around but remove all namespaces if we don't have 3065 * Keep the controller around but remove all namespaces if we don't have
@@ -3029,8 +3077,8 @@ static void nvme_probe_work(struct work_struct *work)
3029 3077
3030 free_tags: 3078 free_tags:
3031 nvme_dev_remove_admin(dev); 3079 nvme_dev_remove_admin(dev);
3032 blk_put_queue(dev->admin_q); 3080 blk_put_queue(dev->ctrl.admin_q);
3033 dev->admin_q = NULL; 3081 dev->ctrl.admin_q = NULL;
3034 dev->queues[0]->tags = NULL; 3082 dev->queues[0]->tags = NULL;
3035 disable: 3083 disable:
3036 nvme_disable_queue(dev, 0); 3084 nvme_disable_queue(dev, 0);
@@ -3058,7 +3106,7 @@ static void nvme_dead_ctrl(struct nvme_dev *dev)
3058 dev_warn(dev->dev, "Device failed to resume\n"); 3106 dev_warn(dev->dev, "Device failed to resume\n");
3059 kref_get(&dev->kref); 3107 kref_get(&dev->kref);
3060 if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d", 3108 if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
3061 dev->instance))) { 3109 dev->ctrl.instance))) {
3062 dev_err(dev->dev, 3110 dev_err(dev->dev,
3063 "Failed to start controller remove task\n"); 3111 "Failed to start controller remove task\n");
3064 kref_put(&dev->kref, nvme_free_dev); 3112 kref_put(&dev->kref, nvme_free_dev);
@@ -3100,7 +3148,7 @@ static int nvme_reset(struct nvme_dev *dev)
3100{ 3148{
3101 int ret; 3149 int ret;
3102 3150
3103 if (!dev->admin_q || blk_queue_dying(dev->admin_q)) 3151 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
3104 return -ENODEV; 3152 return -ENODEV;
3105 3153
3106 spin_lock(&dev_list_lock); 3154 spin_lock(&dev_list_lock);
@@ -3131,6 +3179,16 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
3131} 3179}
3132static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 3180static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3133 3181
3182static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
3183{
3184 *val = readl(to_nvme_dev(ctrl)->bar + off);
3185 return 0;
3186}
3187
3188static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
3189 .reg_read32 = nvme_pci_reg_read32,
3190};
3191
3134static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3192static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3135{ 3193{
3136 int node, result = -ENOMEM; 3194 int node, result = -ENOMEM;
@@ -3156,6 +3214,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3156 INIT_WORK(&dev->reset_work, nvme_reset_work); 3214 INIT_WORK(&dev->reset_work, nvme_reset_work);
3157 dev->dev = get_device(&pdev->dev); 3215 dev->dev = get_device(&pdev->dev);
3158 pci_set_drvdata(pdev, dev); 3216 pci_set_drvdata(pdev, dev);
3217
3218 dev->ctrl.ops = &nvme_pci_ctrl_ops;
3219 dev->ctrl.dev = dev->dev;
3220
3159 result = nvme_set_instance(dev); 3221 result = nvme_set_instance(dev);
3160 if (result) 3222 if (result)
3161 goto put_pci; 3223 goto put_pci;
@@ -3166,8 +3228,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3166 3228
3167 kref_init(&dev->kref); 3229 kref_init(&dev->kref);
3168 dev->device = device_create(nvme_class, &pdev->dev, 3230 dev->device = device_create(nvme_class, &pdev->dev,
3169 MKDEV(nvme_char_major, dev->instance), 3231 MKDEV(nvme_char_major, dev->ctrl.instance),
3170 dev, "nvme%d", dev->instance); 3232 dev, "nvme%d", dev->ctrl.instance);
3171 if (IS_ERR(dev->device)) { 3233 if (IS_ERR(dev->device)) {
3172 result = PTR_ERR(dev->device); 3234 result = PTR_ERR(dev->device);
3173 goto release_pools; 3235 goto release_pools;
@@ -3186,7 +3248,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3186 return 0; 3248 return 0;
3187 3249
3188 put_dev: 3250 put_dev:
3189 device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance)); 3251 device_destroy(nvme_class, MKDEV(nvme_char_major, dev->ctrl.instance));
3190 put_device(dev->device); 3252 put_device(dev->device);
3191 release_pools: 3253 release_pools:
3192 nvme_release_prp_pools(dev); 3254 nvme_release_prp_pools(dev);
@@ -3233,7 +3295,7 @@ static void nvme_remove(struct pci_dev *pdev)
3233 nvme_dev_remove(dev); 3295 nvme_dev_remove(dev);
3234 nvme_dev_shutdown(dev); 3296 nvme_dev_shutdown(dev);
3235 nvme_dev_remove_admin(dev); 3297 nvme_dev_remove_admin(dev);
3236 device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance)); 3298 device_destroy(nvme_class, MKDEV(nvme_char_major, dev->ctrl.instance));
3237 nvme_free_queues(dev, 0); 3299 nvme_free_queues(dev, 0);
3238 nvme_release_cmb(dev); 3300 nvme_release_cmb(dev);
3239 nvme_release_prp_pools(dev); 3301 nvme_release_prp_pools(dev);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index 0bf90b62ec27..bba29553bc94 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -524,7 +524,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
524 struct sg_io_hdr *hdr, u8 *inq_response, 524 struct sg_io_hdr *hdr, u8 *inq_response,
525 int alloc_len) 525 int alloc_len)
526{ 526{
527 struct nvme_dev *dev = ns->dev; 527 struct nvme_ctrl *ctrl = ns->ctrl;
528 struct nvme_id_ns *id_ns; 528 struct nvme_id_ns *id_ns;
529 int res; 529 int res;
530 int nvme_sc; 530 int nvme_sc;
@@ -532,10 +532,10 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
532 u8 resp_data_format = 0x02; 532 u8 resp_data_format = 0x02;
533 u8 protect; 533 u8 protect;
534 u8 cmdque = 0x01 << 1; 534 u8 cmdque = 0x01 << 1;
535 u8 fw_offset = sizeof(dev->firmware_rev); 535 u8 fw_offset = sizeof(ctrl->firmware_rev);
536 536
537 /* nvme ns identify - use DPS value for PROTECT field */ 537 /* nvme ns identify - use DPS value for PROTECT field */
538 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); 538 nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
539 res = nvme_trans_status_code(hdr, nvme_sc); 539 res = nvme_trans_status_code(hdr, nvme_sc);
540 if (res) 540 if (res)
541 return res; 541 return res;
@@ -553,12 +553,12 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
553 inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */ 553 inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
554 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ 554 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
555 strncpy(&inq_response[8], "NVMe ", 8); 555 strncpy(&inq_response[8], "NVMe ", 8);
556 strncpy(&inq_response[16], dev->model, 16); 556 strncpy(&inq_response[16], ctrl->model, 16);
557 557
558 while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4) 558 while (ctrl->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
559 fw_offset--; 559 fw_offset--;
560 fw_offset -= 4; 560 fw_offset -= 4;
561 strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4); 561 strncpy(&inq_response[32], ctrl->firmware_rev + fw_offset, 4);
562 562
563 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 563 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
564 return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 564 return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -588,27 +588,26 @@ static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
588 struct sg_io_hdr *hdr, u8 *inq_response, 588 struct sg_io_hdr *hdr, u8 *inq_response,
589 int alloc_len) 589 int alloc_len)
590{ 590{
591 struct nvme_dev *dev = ns->dev;
592 int xfer_len; 591 int xfer_len;
593 592
594 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 593 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
595 inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */ 594 inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
596 inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */ 595 inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
597 strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH); 596 strncpy(&inq_response[4], ns->ctrl->serial, INQ_SERIAL_NUMBER_LENGTH);
598 597
599 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 598 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
600 return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 599 return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
601} 600}
602 601
603static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, 602static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
604 u8 *inq_response, int alloc_len) 603 u8 *inq_response, int alloc_len, u32 vs)
605{ 604{
606 struct nvme_id_ns *id_ns; 605 struct nvme_id_ns *id_ns;
607 int nvme_sc, res; 606 int nvme_sc, res;
608 size_t len; 607 size_t len;
609 void *eui; 608 void *eui;
610 609
611 nvme_sc = nvme_identify_ns(ns->dev, ns->ns_id, &id_ns); 610 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
612 res = nvme_trans_status_code(hdr, nvme_sc); 611 res = nvme_trans_status_code(hdr, nvme_sc);
613 if (res) 612 if (res)
614 return res; 613 return res;
@@ -616,7 +615,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
616 eui = id_ns->eui64; 615 eui = id_ns->eui64;
617 len = sizeof(id_ns->eui64); 616 len = sizeof(id_ns->eui64);
618 617
619 if (readl(ns->dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) { 618 if (vs >= NVME_VS(1, 2)) {
620 if (bitmap_empty(eui, len * 8)) { 619 if (bitmap_empty(eui, len * 8)) {
621 eui = id_ns->nguid; 620 eui = id_ns->nguid;
622 len = sizeof(id_ns->nguid); 621 len = sizeof(id_ns->nguid);
@@ -648,7 +647,7 @@ out_free_id:
648static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns, 647static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
649 struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len) 648 struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len)
650{ 649{
651 struct nvme_dev *dev = ns->dev; 650 struct nvme_ctrl *ctrl = ns->ctrl;
652 struct nvme_id_ctrl *id_ctrl; 651 struct nvme_id_ctrl *id_ctrl;
653 int nvme_sc, res; 652 int nvme_sc, res;
654 653
@@ -659,7 +658,7 @@ static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
659 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 658 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
660 } 659 }
661 660
662 nvme_sc = nvme_identify_ctrl(dev, &id_ctrl); 661 nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
663 res = nvme_trans_status_code(hdr, nvme_sc); 662 res = nvme_trans_status_code(hdr, nvme_sc);
664 if (res) 663 if (res)
665 return res; 664 return res;
@@ -675,9 +674,9 @@ static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
675 inq_response[7] = 0x44; /* Designator Length */ 674 inq_response[7] = 0x44; /* Designator Length */
676 675
677 sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid)); 676 sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid));
678 memcpy(&inq_response[12], dev->model, sizeof(dev->model)); 677 memcpy(&inq_response[12], ctrl->model, sizeof(ctrl->model));
679 sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id)); 678 sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id));
680 memcpy(&inq_response[56], dev->serial, sizeof(dev->serial)); 679 memcpy(&inq_response[56], ctrl->serial, sizeof(ctrl->serial));
681 680
682 res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len); 681 res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
683 kfree(id_ctrl); 682 kfree(id_ctrl);
@@ -688,9 +687,14 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
688 u8 *resp, int alloc_len) 687 u8 *resp, int alloc_len)
689{ 688{
690 int res; 689 int res;
690 u32 vs;
691 691
692 if (readl(ns->dev->bar + NVME_REG_VS) >= NVME_VS(1, 1)) { 692 res = ns->ctrl->ops->reg_read32(ns->ctrl, NVME_REG_VS, &vs);
693 res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); 693 if (res)
694 return res;
695
696 if (vs >= NVME_VS(1, 1)) {
697 res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len, vs);
694 if (res != -EOPNOTSUPP) 698 if (res != -EOPNOTSUPP)
695 return res; 699 return res;
696 } 700 }
@@ -704,7 +708,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
704 u8 *inq_response; 708 u8 *inq_response;
705 int res; 709 int res;
706 int nvme_sc; 710 int nvme_sc;
707 struct nvme_dev *dev = ns->dev; 711 struct nvme_ctrl *ctrl = ns->ctrl;
708 struct nvme_id_ctrl *id_ctrl; 712 struct nvme_id_ctrl *id_ctrl;
709 struct nvme_id_ns *id_ns; 713 struct nvme_id_ns *id_ns;
710 int xfer_len; 714 int xfer_len;
@@ -720,7 +724,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
720 if (inq_response == NULL) 724 if (inq_response == NULL)
721 return -ENOMEM; 725 return -ENOMEM;
722 726
723 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); 727 nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
724 res = nvme_trans_status_code(hdr, nvme_sc); 728 res = nvme_trans_status_code(hdr, nvme_sc);
725 if (res) 729 if (res)
726 goto out_free_inq; 730 goto out_free_inq;
@@ -736,7 +740,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
736 app_chk = protect << 1; 740 app_chk = protect << 1;
737 ref_chk = protect; 741 ref_chk = protect;
738 742
739 nvme_sc = nvme_identify_ctrl(dev, &id_ctrl); 743 nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
740 res = nvme_trans_status_code(hdr, nvme_sc); 744 res = nvme_trans_status_code(hdr, nvme_sc);
741 if (res) 745 if (res)
742 goto out_free_inq; 746 goto out_free_inq;
@@ -847,7 +851,6 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
847 int res; 851 int res;
848 int xfer_len; 852 int xfer_len;
849 u8 *log_response; 853 u8 *log_response;
850 struct nvme_dev *dev = ns->dev;
851 struct nvme_smart_log *smart_log; 854 struct nvme_smart_log *smart_log;
852 u8 temp_c; 855 u8 temp_c;
853 u16 temp_k; 856 u16 temp_k;
@@ -856,7 +859,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
856 if (log_response == NULL) 859 if (log_response == NULL)
857 return -ENOMEM; 860 return -ENOMEM;
858 861
859 res = nvme_get_log_page(dev, &smart_log); 862 res = nvme_get_log_page(ns->ctrl, &smart_log);
860 if (res < 0) 863 if (res < 0)
861 goto out_free_response; 864 goto out_free_response;
862 865
@@ -894,7 +897,6 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
894 int res; 897 int res;
895 int xfer_len; 898 int xfer_len;
896 u8 *log_response; 899 u8 *log_response;
897 struct nvme_dev *dev = ns->dev;
898 struct nvme_smart_log *smart_log; 900 struct nvme_smart_log *smart_log;
899 u32 feature_resp; 901 u32 feature_resp;
900 u8 temp_c_cur, temp_c_thresh; 902 u8 temp_c_cur, temp_c_thresh;
@@ -904,7 +906,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
904 if (log_response == NULL) 906 if (log_response == NULL)
905 return -ENOMEM; 907 return -ENOMEM;
906 908
907 res = nvme_get_log_page(dev, &smart_log); 909 res = nvme_get_log_page(ns->ctrl, &smart_log);
908 if (res < 0) 910 if (res < 0)
909 goto out_free_response; 911 goto out_free_response;
910 912
@@ -918,7 +920,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
918 kfree(smart_log); 920 kfree(smart_log);
919 921
920 /* Get Features for Temp Threshold */ 922 /* Get Features for Temp Threshold */
921 res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0, 923 res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0,
922 &feature_resp); 924 &feature_resp);
923 if (res != NVME_SC_SUCCESS) 925 if (res != NVME_SC_SUCCESS)
924 temp_c_thresh = LOG_TEMP_UNKNOWN; 926 temp_c_thresh = LOG_TEMP_UNKNOWN;
@@ -980,7 +982,6 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
980{ 982{
981 int res; 983 int res;
982 int nvme_sc; 984 int nvme_sc;
983 struct nvme_dev *dev = ns->dev;
984 struct nvme_id_ns *id_ns; 985 struct nvme_id_ns *id_ns;
985 u8 flbas; 986 u8 flbas;
986 u32 lba_length; 987 u32 lba_length;
@@ -990,7 +991,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
990 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) 991 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
991 return -EINVAL; 992 return -EINVAL;
992 993
993 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); 994 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
994 res = nvme_trans_status_code(hdr, nvme_sc); 995 res = nvme_trans_status_code(hdr, nvme_sc);
995 if (res) 996 if (res)
996 return res; 997 return res;
@@ -1046,14 +1047,13 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1046{ 1047{
1047 int res = 0; 1048 int res = 0;
1048 int nvme_sc; 1049 int nvme_sc;
1049 struct nvme_dev *dev = ns->dev;
1050 u32 feature_resp; 1050 u32 feature_resp;
1051 u8 vwc; 1051 u8 vwc;
1052 1052
1053 if (len < MODE_PAGE_CACHING_LEN) 1053 if (len < MODE_PAGE_CACHING_LEN)
1054 return -EINVAL; 1054 return -EINVAL;
1055 1055
1056 nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0, 1056 nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0,
1057 &feature_resp); 1057 &feature_resp);
1058 res = nvme_trans_status_code(hdr, nvme_sc); 1058 res = nvme_trans_status_code(hdr, nvme_sc);
1059 if (res) 1059 if (res)
@@ -1239,12 +1239,11 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1239{ 1239{
1240 int res; 1240 int res;
1241 int nvme_sc; 1241 int nvme_sc;
1242 struct nvme_dev *dev = ns->dev;
1243 struct nvme_id_ctrl *id_ctrl; 1242 struct nvme_id_ctrl *id_ctrl;
1244 int lowest_pow_st; /* max npss = lowest power consumption */ 1243 int lowest_pow_st; /* max npss = lowest power consumption */
1245 unsigned ps_desired = 0; 1244 unsigned ps_desired = 0;
1246 1245
1247 nvme_sc = nvme_identify_ctrl(dev, &id_ctrl); 1246 nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
1248 res = nvme_trans_status_code(hdr, nvme_sc); 1247 res = nvme_trans_status_code(hdr, nvme_sc);
1249 if (res) 1248 if (res)
1250 return res; 1249 return res;
@@ -1288,7 +1287,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1288 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 1287 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1289 break; 1288 break;
1290 } 1289 }
1291 nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0, 1290 nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_POWER_MGMT, ps_desired, 0,
1292 NULL); 1291 NULL);
1293 return nvme_trans_status_code(hdr, nvme_sc); 1292 return nvme_trans_status_code(hdr, nvme_sc);
1294} 1293}
@@ -1312,7 +1311,6 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
1312 u8 buffer_id) 1311 u8 buffer_id)
1313{ 1312{
1314 int nvme_sc; 1313 int nvme_sc;
1315 struct nvme_dev *dev = ns->dev;
1316 struct nvme_command c; 1314 struct nvme_command c;
1317 1315
1318 if (hdr->iovec_count > 0) { 1316 if (hdr->iovec_count > 0) {
@@ -1329,7 +1327,7 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
1329 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); 1327 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1330 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); 1328 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
1331 1329
1332 nvme_sc = __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 1330 nvme_sc = __nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL,
1333 hdr->dxferp, tot_len, NULL, 0); 1331 hdr->dxferp, tot_len, NULL, 0);
1334 return nvme_trans_status_code(hdr, nvme_sc); 1332 return nvme_trans_status_code(hdr, nvme_sc);
1335} 1333}
@@ -1396,14 +1394,13 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1396{ 1394{
1397 int res = 0; 1395 int res = 0;
1398 int nvme_sc; 1396 int nvme_sc;
1399 struct nvme_dev *dev = ns->dev;
1400 unsigned dword11; 1397 unsigned dword11;
1401 1398
1402 switch (page_code) { 1399 switch (page_code) {
1403 case MODE_PAGE_CACHING: 1400 case MODE_PAGE_CACHING:
1404 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); 1401 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1405 nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11, 1402 nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
1406 0, NULL); 1403 dword11, 0, NULL);
1407 res = nvme_trans_status_code(hdr, nvme_sc); 1404 res = nvme_trans_status_code(hdr, nvme_sc);
1408 break; 1405 break;
1409 case MODE_PAGE_CONTROL: 1406 case MODE_PAGE_CONTROL:
@@ -1505,7 +1502,6 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1505{ 1502{
1506 int res = 0; 1503 int res = 0;
1507 int nvme_sc; 1504 int nvme_sc;
1508 struct nvme_dev *dev = ns->dev;
1509 u8 flbas; 1505 u8 flbas;
1510 1506
1511 /* 1507 /*
@@ -1518,7 +1514,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1518 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { 1514 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
1519 struct nvme_id_ns *id_ns; 1515 struct nvme_id_ns *id_ns;
1520 1516
1521 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); 1517 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
1522 res = nvme_trans_status_code(hdr, nvme_sc); 1518 res = nvme_trans_status_code(hdr, nvme_sc);
1523 if (res) 1519 if (res)
1524 return res; 1520 return res;
@@ -1602,7 +1598,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1602{ 1598{
1603 int res; 1599 int res;
1604 int nvme_sc; 1600 int nvme_sc;
1605 struct nvme_dev *dev = ns->dev;
1606 struct nvme_id_ns *id_ns; 1601 struct nvme_id_ns *id_ns;
1607 u8 i; 1602 u8 i;
1608 u8 flbas, nlbaf; 1603 u8 flbas, nlbaf;
@@ -1611,7 +1606,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1611 struct nvme_command c; 1606 struct nvme_command c;
1612 1607
1613 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ 1608 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
1614 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); 1609 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
1615 res = nvme_trans_status_code(hdr, nvme_sc); 1610 res = nvme_trans_status_code(hdr, nvme_sc);
1616 if (res) 1611 if (res)
1617 return res; 1612 return res;
@@ -1643,7 +1638,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1643 c.format.nsid = cpu_to_le32(ns->ns_id); 1638 c.format.nsid = cpu_to_le32(ns->ns_id);
1644 c.format.cdw10 = cpu_to_le32(cdw10); 1639 c.format.cdw10 = cpu_to_le32(cdw10);
1645 1640
1646 nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0); 1641 nvme_sc = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, 0);
1647 res = nvme_trans_status_code(hdr, nvme_sc); 1642 res = nvme_trans_status_code(hdr, nvme_sc);
1648 1643
1649 kfree(id_ns); 1644 kfree(id_ns);
@@ -2072,7 +2067,6 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2072 u32 alloc_len; 2067 u32 alloc_len;
2073 u32 resp_size; 2068 u32 resp_size;
2074 u32 xfer_len; 2069 u32 xfer_len;
2075 struct nvme_dev *dev = ns->dev;
2076 struct nvme_id_ns *id_ns; 2070 struct nvme_id_ns *id_ns;
2077 u8 *response; 2071 u8 *response;
2078 2072
@@ -2084,7 +2078,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2084 resp_size = READ_CAP_10_RESP_SIZE; 2078 resp_size = READ_CAP_10_RESP_SIZE;
2085 } 2079 }
2086 2080
2087 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); 2081 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
2088 res = nvme_trans_status_code(hdr, nvme_sc); 2082 res = nvme_trans_status_code(hdr, nvme_sc);
2089 if (res) 2083 if (res)
2090 return res; 2084 return res;
@@ -2112,7 +2106,6 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2112 int nvme_sc; 2106 int nvme_sc;
2113 u32 alloc_len, xfer_len, resp_size; 2107 u32 alloc_len, xfer_len, resp_size;
2114 u8 *response; 2108 u8 *response;
2115 struct nvme_dev *dev = ns->dev;
2116 struct nvme_id_ctrl *id_ctrl; 2109 struct nvme_id_ctrl *id_ctrl;
2117 u32 ll_length, lun_id; 2110 u32 ll_length, lun_id;
2118 u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET; 2111 u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
@@ -2126,7 +2119,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2126 case ALL_LUNS_RETURNED: 2119 case ALL_LUNS_RETURNED:
2127 case ALL_WELL_KNOWN_LUNS_RETURNED: 2120 case ALL_WELL_KNOWN_LUNS_RETURNED:
2128 case RESTRICTED_LUNS_RETURNED: 2121 case RESTRICTED_LUNS_RETURNED:
2129 nvme_sc = nvme_identify_ctrl(dev, &id_ctrl); 2122 nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
2130 res = nvme_trans_status_code(hdr, nvme_sc); 2123 res = nvme_trans_status_code(hdr, nvme_sc);
2131 if (res) 2124 if (res)
2132 return res; 2125 return res;
@@ -2327,9 +2320,7 @@ static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2327 struct sg_io_hdr *hdr, 2320 struct sg_io_hdr *hdr,
2328 u8 *cmd) 2321 u8 *cmd)
2329{ 2322{
2330 struct nvme_dev *dev = ns->dev; 2323 if (nvme_ctrl_ready(ns->ctrl))
2331
2332 if (!(readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_RDY))
2333 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, 2324 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2334 NOT_READY, SCSI_ASC_LUN_NOT_READY, 2325 NOT_READY, SCSI_ASC_LUN_NOT_READY,
2335 SCSI_ASCQ_CAUSE_NOT_REPORTABLE); 2326 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);