diff options
Diffstat (limited to 'drivers/nvme/host/nvme.h')
-rw-r--r-- | drivers/nvme/host/nvme.h | 242 |
1 files changed, 191 insertions, 51 deletions
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 044253dca30a..4fb5bb737868 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -19,58 +19,77 @@ | |||
19 | #include <linux/kref.h> | 19 | #include <linux/kref.h> |
20 | #include <linux/blk-mq.h> | 20 | #include <linux/blk-mq.h> |
21 | 21 | ||
22 | enum { | ||
23 | /* | ||
24 | * Driver internal status code for commands that were cancelled due | ||
25 | * to timeouts or controller shutdown. The value is negative so | ||
26 | * that it a) doesn't overlap with the unsigned hardware error codes, | ||
27 | * and b) can easily be tested for. | ||
28 | */ | ||
29 | NVME_SC_CANCELLED = -EINTR, | ||
30 | }; | ||
31 | |||
22 | extern unsigned char nvme_io_timeout; | 32 | extern unsigned char nvme_io_timeout; |
23 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) | 33 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) |
24 | 34 | ||
35 | extern unsigned char admin_timeout; | ||
36 | #define ADMIN_TIMEOUT (admin_timeout * HZ) | ||
37 | |||
38 | extern unsigned char shutdown_timeout; | ||
39 | #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ) | ||
40 | |||
25 | enum { | 41 | enum { |
26 | NVME_NS_LBA = 0, | 42 | NVME_NS_LBA = 0, |
27 | NVME_NS_LIGHTNVM = 1, | 43 | NVME_NS_LIGHTNVM = 1, |
28 | }; | 44 | }; |
29 | 45 | ||
30 | /* | 46 | /* |
31 | * Represents an NVM Express device. Each nvme_dev is a PCI function. | 47 | * List of workarounds for devices that required behavior not specified in |
48 | * the standard. | ||
32 | */ | 49 | */ |
33 | struct nvme_dev { | 50 | enum nvme_quirks { |
34 | struct list_head node; | 51 | /* |
35 | struct nvme_queue **queues; | 52 | * Prefers I/O aligned to a stripe size specified in a vendor |
53 | * specific Identify field. | ||
54 | */ | ||
55 | NVME_QUIRK_STRIPE_SIZE = (1 << 0), | ||
56 | |||
57 | /* | ||
58 | * The controller doesn't handle Identify value others than 0 or 1 | ||
59 | * correctly. | ||
60 | */ | ||
61 | NVME_QUIRK_IDENTIFY_CNS = (1 << 1), | ||
62 | }; | ||
63 | |||
64 | struct nvme_ctrl { | ||
65 | const struct nvme_ctrl_ops *ops; | ||
36 | struct request_queue *admin_q; | 66 | struct request_queue *admin_q; |
37 | struct blk_mq_tag_set tagset; | ||
38 | struct blk_mq_tag_set admin_tagset; | ||
39 | u32 __iomem *dbs; | ||
40 | struct device *dev; | 67 | struct device *dev; |
41 | struct dma_pool *prp_page_pool; | 68 | struct kref kref; |
42 | struct dma_pool *prp_small_pool; | ||
43 | int instance; | 69 | int instance; |
44 | unsigned queue_count; | 70 | struct blk_mq_tag_set *tagset; |
45 | unsigned online_queues; | ||
46 | unsigned max_qid; | ||
47 | int q_depth; | ||
48 | u32 db_stride; | ||
49 | u32 ctrl_config; | ||
50 | struct msix_entry *entry; | ||
51 | struct nvme_bar __iomem *bar; | ||
52 | struct list_head namespaces; | 71 | struct list_head namespaces; |
53 | struct kref kref; | 72 | struct mutex namespaces_mutex; |
54 | struct device *device; | 73 | struct device *device; /* char device */ |
55 | struct work_struct reset_work; | 74 | struct list_head node; |
56 | struct work_struct probe_work; | 75 | |
57 | struct work_struct scan_work; | ||
58 | char name[12]; | 76 | char name[12]; |
59 | char serial[20]; | 77 | char serial[20]; |
60 | char model[40]; | 78 | char model[40]; |
61 | char firmware_rev[8]; | 79 | char firmware_rev[8]; |
62 | bool subsystem; | 80 | |
81 | u32 ctrl_config; | ||
82 | |||
83 | u32 page_size; | ||
63 | u32 max_hw_sectors; | 84 | u32 max_hw_sectors; |
64 | u32 stripe_size; | 85 | u32 stripe_size; |
65 | u32 page_size; | ||
66 | void __iomem *cmb; | ||
67 | dma_addr_t cmb_dma_addr; | ||
68 | u64 cmb_size; | ||
69 | u32 cmbsz; | ||
70 | u16 oncs; | 86 | u16 oncs; |
71 | u16 abort_limit; | 87 | atomic_t abort_limit; |
72 | u8 event_limit; | 88 | u8 event_limit; |
73 | u8 vwc; | 89 | u8 vwc; |
90 | u32 vs; | ||
91 | bool subsystem; | ||
92 | unsigned long quirks; | ||
74 | }; | 93 | }; |
75 | 94 | ||
76 | /* | 95 | /* |
@@ -79,11 +98,14 @@ struct nvme_dev { | |||
79 | struct nvme_ns { | 98 | struct nvme_ns { |
80 | struct list_head list; | 99 | struct list_head list; |
81 | 100 | ||
82 | struct nvme_dev *dev; | 101 | struct nvme_ctrl *ctrl; |
83 | struct request_queue *queue; | 102 | struct request_queue *queue; |
84 | struct gendisk *disk; | 103 | struct gendisk *disk; |
85 | struct kref kref; | 104 | struct kref kref; |
86 | 105 | ||
106 | u8 eui[8]; | ||
107 | u8 uuid[16]; | ||
108 | |||
87 | unsigned ns_id; | 109 | unsigned ns_id; |
88 | int lba_shift; | 110 | int lba_shift; |
89 | u16 ms; | 111 | u16 ms; |
@@ -94,41 +116,156 @@ struct nvme_ns { | |||
94 | u32 mode_select_block_len; | 116 | u32 mode_select_block_len; |
95 | }; | 117 | }; |
96 | 118 | ||
97 | /* | 119 | struct nvme_ctrl_ops { |
98 | * The nvme_iod describes the data in an I/O, including the list of PRP | 120 | int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); |
99 | * entries. You can't see it in this data structure because C doesn't let | 121 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); |
100 | * me express that. Use nvme_alloc_iod to ensure there's enough space | 122 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
101 | * allocated to store the PRP list. | 123 | bool (*io_incapable)(struct nvme_ctrl *ctrl); |
102 | */ | 124 | int (*reset_ctrl)(struct nvme_ctrl *ctrl); |
103 | struct nvme_iod { | 125 | void (*free_ctrl)(struct nvme_ctrl *ctrl); |
104 | unsigned long private; /* For the use of the submitter of the I/O */ | ||
105 | int npages; /* In the PRP list. 0 means small pool in use */ | ||
106 | int offset; /* Of PRP list */ | ||
107 | int nents; /* Used in scatterlist */ | ||
108 | int length; /* Of data, in bytes */ | ||
109 | dma_addr_t first_dma; | ||
110 | struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */ | ||
111 | struct scatterlist sg[0]; | ||
112 | }; | 126 | }; |
113 | 127 | ||
128 | static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) | ||
129 | { | ||
130 | u32 val = 0; | ||
131 | |||
132 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) | ||
133 | return false; | ||
134 | return val & NVME_CSTS_RDY; | ||
135 | } | ||
136 | |||
137 | static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl) | ||
138 | { | ||
139 | u32 val = 0; | ||
140 | |||
141 | if (ctrl->ops->io_incapable(ctrl)) | ||
142 | return false; | ||
143 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) | ||
144 | return false; | ||
145 | return val & NVME_CSTS_CFS; | ||
146 | } | ||
147 | |||
148 | static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) | ||
149 | { | ||
150 | if (!ctrl->subsystem) | ||
151 | return -ENOTTY; | ||
152 | return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); | ||
153 | } | ||
154 | |||
114 | static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) | 155 | static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) |
115 | { | 156 | { |
116 | return (sector >> (ns->lba_shift - 9)); | 157 | return (sector >> (ns->lba_shift - 9)); |
117 | } | 158 | } |
118 | 159 | ||
160 | static inline void nvme_setup_flush(struct nvme_ns *ns, | ||
161 | struct nvme_command *cmnd) | ||
162 | { | ||
163 | memset(cmnd, 0, sizeof(*cmnd)); | ||
164 | cmnd->common.opcode = nvme_cmd_flush; | ||
165 | cmnd->common.nsid = cpu_to_le32(ns->ns_id); | ||
166 | } | ||
167 | |||
168 | static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, | ||
169 | struct nvme_command *cmnd) | ||
170 | { | ||
171 | u16 control = 0; | ||
172 | u32 dsmgmt = 0; | ||
173 | |||
174 | if (req->cmd_flags & REQ_FUA) | ||
175 | control |= NVME_RW_FUA; | ||
176 | if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) | ||
177 | control |= NVME_RW_LR; | ||
178 | |||
179 | if (req->cmd_flags & REQ_RAHEAD) | ||
180 | dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; | ||
181 | |||
182 | memset(cmnd, 0, sizeof(*cmnd)); | ||
183 | cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); | ||
184 | cmnd->rw.command_id = req->tag; | ||
185 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); | ||
186 | cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); | ||
187 | cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); | ||
188 | |||
189 | if (ns->ms) { | ||
190 | switch (ns->pi_type) { | ||
191 | case NVME_NS_DPS_PI_TYPE3: | ||
192 | control |= NVME_RW_PRINFO_PRCHK_GUARD; | ||
193 | break; | ||
194 | case NVME_NS_DPS_PI_TYPE1: | ||
195 | case NVME_NS_DPS_PI_TYPE2: | ||
196 | control |= NVME_RW_PRINFO_PRCHK_GUARD | | ||
197 | NVME_RW_PRINFO_PRCHK_REF; | ||
198 | cmnd->rw.reftag = cpu_to_le32( | ||
199 | nvme_block_nr(ns, blk_rq_pos(req))); | ||
200 | break; | ||
201 | } | ||
202 | if (!blk_integrity_rq(req)) | ||
203 | control |= NVME_RW_PRINFO_PRACT; | ||
204 | } | ||
205 | |||
206 | cmnd->rw.control = cpu_to_le16(control); | ||
207 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); | ||
208 | } | ||
209 | |||
210 | |||
211 | static inline int nvme_error_status(u16 status) | ||
212 | { | ||
213 | switch (status & 0x7ff) { | ||
214 | case NVME_SC_SUCCESS: | ||
215 | return 0; | ||
216 | case NVME_SC_CAP_EXCEEDED: | ||
217 | return -ENOSPC; | ||
218 | default: | ||
219 | return -EIO; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | static inline bool nvme_req_needs_retry(struct request *req, u16 status) | ||
224 | { | ||
225 | return !(status & NVME_SC_DNR || blk_noretry_request(req)) && | ||
226 | (jiffies - req->start_time) < req->timeout; | ||
227 | } | ||
228 | |||
229 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); | ||
230 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); | ||
231 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); | ||
232 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, | ||
233 | const struct nvme_ctrl_ops *ops, unsigned long quirks); | ||
234 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); | ||
235 | void nvme_put_ctrl(struct nvme_ctrl *ctrl); | ||
236 | int nvme_init_identify(struct nvme_ctrl *ctrl); | ||
237 | |||
238 | void nvme_scan_namespaces(struct nvme_ctrl *ctrl); | ||
239 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl); | ||
240 | |||
241 | void nvme_stop_queues(struct nvme_ctrl *ctrl); | ||
242 | void nvme_start_queues(struct nvme_ctrl *ctrl); | ||
243 | |||
244 | struct request *nvme_alloc_request(struct request_queue *q, | ||
245 | struct nvme_command *cmd, unsigned int flags); | ||
246 | void nvme_requeue_req(struct request *req); | ||
119 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 247 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
120 | void *buf, unsigned bufflen); | 248 | void *buf, unsigned bufflen); |
121 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 249 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
122 | void *buffer, void __user *ubuffer, unsigned bufflen, | 250 | void *buffer, unsigned bufflen, u32 *result, unsigned timeout); |
251 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | ||
252 | void __user *ubuffer, unsigned bufflen, u32 *result, | ||
253 | unsigned timeout); | ||
254 | int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | ||
255 | void __user *ubuffer, unsigned bufflen, | ||
256 | void __user *meta_buffer, unsigned meta_len, u32 meta_seed, | ||
123 | u32 *result, unsigned timeout); | 257 | u32 *result, unsigned timeout); |
124 | int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id); | 258 | int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id); |
125 | int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, | 259 | int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, |
126 | struct nvme_id_ns **id); | 260 | struct nvme_id_ns **id); |
127 | int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log); | 261 | int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log); |
128 | int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, | 262 | int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, |
129 | dma_addr_t dma_addr, u32 *result); | 263 | dma_addr_t dma_addr, u32 *result); |
130 | int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, | 264 | int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, |
131 | dma_addr_t dma_addr, u32 *result); | 265 | dma_addr_t dma_addr, u32 *result); |
266 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); | ||
267 | |||
268 | extern spinlock_t dev_list_lock; | ||
132 | 269 | ||
133 | struct sg_io_hdr; | 270 | struct sg_io_hdr; |
134 | 271 | ||
@@ -154,4 +291,7 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i | |||
154 | } | 291 | } |
155 | #endif /* CONFIG_NVM */ | 292 | #endif /* CONFIG_NVM */ |
156 | 293 | ||
294 | int __init nvme_core_init(void); | ||
295 | void nvme_core_exit(void); | ||
296 | |||
157 | #endif /* _NVME_H */ | 297 | #endif /* _NVME_H */ |