diff options
Diffstat (limited to 'drivers/nvme/host/lightnvm.c')
-rw-r--r-- | drivers/nvme/host/lightnvm.c | 187 |
1 files changed, 130 insertions, 57 deletions
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e0b7b95813bc..15f2acb4d5cd 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
@@ -22,8 +22,6 @@ | |||
22 | 22 | ||
23 | #include "nvme.h" | 23 | #include "nvme.h" |
24 | 24 | ||
25 | #ifdef CONFIG_NVM | ||
26 | |||
27 | #include <linux/nvme.h> | 25 | #include <linux/nvme.h> |
28 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
29 | #include <linux/lightnvm.h> | 27 | #include <linux/lightnvm.h> |
@@ -93,7 +91,7 @@ struct nvme_nvm_l2ptbl { | |||
93 | __le16 cdw14[6]; | 91 | __le16 cdw14[6]; |
94 | }; | 92 | }; |
95 | 93 | ||
96 | struct nvme_nvm_bbtbl { | 94 | struct nvme_nvm_getbbtbl { |
97 | __u8 opcode; | 95 | __u8 opcode; |
98 | __u8 flags; | 96 | __u8 flags; |
99 | __u16 command_id; | 97 | __u16 command_id; |
@@ -101,10 +99,23 @@ struct nvme_nvm_bbtbl { | |||
101 | __u64 rsvd[2]; | 99 | __u64 rsvd[2]; |
102 | __le64 prp1; | 100 | __le64 prp1; |
103 | __le64 prp2; | 101 | __le64 prp2; |
104 | __le32 prp1_len; | 102 | __le64 spba; |
105 | __le32 prp2_len; | 103 | __u32 rsvd4[4]; |
106 | __le32 lbb; | 104 | }; |
107 | __u32 rsvd11[3]; | 105 | |
106 | struct nvme_nvm_setbbtbl { | ||
107 | __u8 opcode; | ||
108 | __u8 flags; | ||
109 | __u16 command_id; | ||
110 | __le32 nsid; | ||
111 | __le64 rsvd[2]; | ||
112 | __le64 prp1; | ||
113 | __le64 prp2; | ||
114 | __le64 spba; | ||
115 | __le16 nlb; | ||
116 | __u8 value; | ||
117 | __u8 rsvd3; | ||
118 | __u32 rsvd4[3]; | ||
108 | }; | 119 | }; |
109 | 120 | ||
110 | struct nvme_nvm_erase_blk { | 121 | struct nvme_nvm_erase_blk { |
@@ -129,8 +140,8 @@ struct nvme_nvm_command { | |||
129 | struct nvme_nvm_hb_rw hb_rw; | 140 | struct nvme_nvm_hb_rw hb_rw; |
130 | struct nvme_nvm_ph_rw ph_rw; | 141 | struct nvme_nvm_ph_rw ph_rw; |
131 | struct nvme_nvm_l2ptbl l2p; | 142 | struct nvme_nvm_l2ptbl l2p; |
132 | struct nvme_nvm_bbtbl get_bb; | 143 | struct nvme_nvm_getbbtbl get_bb; |
133 | struct nvme_nvm_bbtbl set_bb; | 144 | struct nvme_nvm_setbbtbl set_bb; |
134 | struct nvme_nvm_erase_blk erase; | 145 | struct nvme_nvm_erase_blk erase; |
135 | }; | 146 | }; |
136 | }; | 147 | }; |
@@ -142,11 +153,13 @@ struct nvme_nvm_id_group { | |||
142 | __u8 num_ch; | 153 | __u8 num_ch; |
143 | __u8 num_lun; | 154 | __u8 num_lun; |
144 | __u8 num_pln; | 155 | __u8 num_pln; |
156 | __u8 rsvd1; | ||
145 | __le16 num_blk; | 157 | __le16 num_blk; |
146 | __le16 num_pg; | 158 | __le16 num_pg; |
147 | __le16 fpg_sz; | 159 | __le16 fpg_sz; |
148 | __le16 csecs; | 160 | __le16 csecs; |
149 | __le16 sos; | 161 | __le16 sos; |
162 | __le16 rsvd2; | ||
150 | __le32 trdt; | 163 | __le32 trdt; |
151 | __le32 trdm; | 164 | __le32 trdm; |
152 | __le32 tprt; | 165 | __le32 tprt; |
@@ -154,8 +167,9 @@ struct nvme_nvm_id_group { | |||
154 | __le32 tbet; | 167 | __le32 tbet; |
155 | __le32 tbem; | 168 | __le32 tbem; |
156 | __le32 mpos; | 169 | __le32 mpos; |
170 | __le32 mccap; | ||
157 | __le16 cpar; | 171 | __le16 cpar; |
158 | __u8 reserved[913]; | 172 | __u8 reserved[906]; |
159 | } __packed; | 173 | } __packed; |
160 | 174 | ||
161 | struct nvme_nvm_addr_format { | 175 | struct nvme_nvm_addr_format { |
@@ -178,15 +192,28 @@ struct nvme_nvm_id { | |||
178 | __u8 ver_id; | 192 | __u8 ver_id; |
179 | __u8 vmnt; | 193 | __u8 vmnt; |
180 | __u8 cgrps; | 194 | __u8 cgrps; |
181 | __u8 res[5]; | 195 | __u8 res; |
182 | __le32 cap; | 196 | __le32 cap; |
183 | __le32 dom; | 197 | __le32 dom; |
184 | struct nvme_nvm_addr_format ppaf; | 198 | struct nvme_nvm_addr_format ppaf; |
185 | __u8 ppat; | 199 | __u8 resv[228]; |
186 | __u8 resv[223]; | ||
187 | struct nvme_nvm_id_group groups[4]; | 200 | struct nvme_nvm_id_group groups[4]; |
188 | } __packed; | 201 | } __packed; |
189 | 202 | ||
203 | struct nvme_nvm_bb_tbl { | ||
204 | __u8 tblid[4]; | ||
205 | __le16 verid; | ||
206 | __le16 revid; | ||
207 | __le32 rvsd1; | ||
208 | __le32 tblks; | ||
209 | __le32 tfact; | ||
210 | __le32 tgrown; | ||
211 | __le32 tdresv; | ||
212 | __le32 thresv; | ||
213 | __le32 rsvd2[8]; | ||
214 | __u8 blk[0]; | ||
215 | }; | ||
216 | |||
190 | /* | 217 | /* |
191 | * Check we didn't inadvertently grow the command struct | 218 | * Check we didn't inadvertently grow the command struct |
192 | */ | 219 | */ |
@@ -195,12 +222,14 @@ static inline void _nvme_nvm_check_size(void) | |||
195 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); | 222 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); |
196 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); | 223 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); |
197 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); | 224 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); |
198 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); | 225 | BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); |
226 | BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); | ||
199 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); | 227 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); |
200 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); | 228 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); |
201 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); | 229 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); |
202 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); | 230 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); |
203 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); | 231 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); |
232 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); | ||
204 | } | 233 | } |
205 | 234 | ||
206 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | 235 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) |
@@ -234,6 +263,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
234 | dst->tbet = le32_to_cpu(src->tbet); | 263 | dst->tbet = le32_to_cpu(src->tbet); |
235 | dst->tbem = le32_to_cpu(src->tbem); | 264 | dst->tbem = le32_to_cpu(src->tbem); |
236 | dst->mpos = le32_to_cpu(src->mpos); | 265 | dst->mpos = le32_to_cpu(src->mpos); |
266 | dst->mccap = le32_to_cpu(src->mccap); | ||
237 | 267 | ||
238 | dst->cpar = le16_to_cpu(src->cpar); | 268 | dst->cpar = le16_to_cpu(src->cpar); |
239 | } | 269 | } |
@@ -241,9 +271,10 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
241 | return 0; | 271 | return 0; |
242 | } | 272 | } |
243 | 273 | ||
244 | static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | 274 | static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) |
245 | { | 275 | { |
246 | struct nvme_ns *ns = q->queuedata; | 276 | struct nvme_ns *ns = nvmdev->q->queuedata; |
277 | struct nvme_dev *dev = ns->dev; | ||
247 | struct nvme_nvm_id *nvme_nvm_id; | 278 | struct nvme_nvm_id *nvme_nvm_id; |
248 | struct nvme_nvm_command c = {}; | 279 | struct nvme_nvm_command c = {}; |
249 | int ret; | 280 | int ret; |
@@ -256,8 +287,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | |||
256 | if (!nvme_nvm_id) | 287 | if (!nvme_nvm_id) |
257 | return -ENOMEM; | 288 | return -ENOMEM; |
258 | 289 | ||
259 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, | 290 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, |
260 | sizeof(struct nvme_nvm_id)); | 291 | nvme_nvm_id, sizeof(struct nvme_nvm_id)); |
261 | if (ret) { | 292 | if (ret) { |
262 | ret = -EIO; | 293 | ret = -EIO; |
263 | goto out; | 294 | goto out; |
@@ -268,6 +299,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | |||
268 | nvm_id->cgrps = nvme_nvm_id->cgrps; | 299 | nvm_id->cgrps = nvme_nvm_id->cgrps; |
269 | nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); | 300 | nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); |
270 | nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); | 301 | nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); |
302 | memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, | ||
303 | sizeof(struct nvme_nvm_addr_format)); | ||
271 | 304 | ||
272 | ret = init_grps(nvm_id, nvme_nvm_id); | 305 | ret = init_grps(nvm_id, nvme_nvm_id); |
273 | out: | 306 | out: |
@@ -275,13 +308,13 @@ out: | |||
275 | return ret; | 308 | return ret; |
276 | } | 309 | } |
277 | 310 | ||
278 | static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, | 311 | static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb, |
279 | nvm_l2p_update_fn *update_l2p, void *priv) | 312 | nvm_l2p_update_fn *update_l2p, void *priv) |
280 | { | 313 | { |
281 | struct nvme_ns *ns = q->queuedata; | 314 | struct nvme_ns *ns = nvmdev->q->queuedata; |
282 | struct nvme_dev *dev = ns->dev; | 315 | struct nvme_dev *dev = ns->dev; |
283 | struct nvme_nvm_command c = {}; | 316 | struct nvme_nvm_command c = {}; |
284 | u32 len = queue_max_hw_sectors(q) << 9; | 317 | u32 len = queue_max_hw_sectors(dev->admin_q) << 9; |
285 | u32 nlb_pr_rq = len / sizeof(u64); | 318 | u32 nlb_pr_rq = len / sizeof(u64); |
286 | u64 cmd_slba = slba; | 319 | u64 cmd_slba = slba; |
287 | void *entries; | 320 | void *entries; |
@@ -299,8 +332,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, | |||
299 | c.l2p.slba = cpu_to_le64(cmd_slba); | 332 | c.l2p.slba = cpu_to_le64(cmd_slba); |
300 | c.l2p.nlb = cpu_to_le32(cmd_nlb); | 333 | c.l2p.nlb = cpu_to_le32(cmd_nlb); |
301 | 334 | ||
302 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, | 335 | ret = nvme_submit_sync_cmd(dev->admin_q, |
303 | entries, len); | 336 | (struct nvme_command *)&c, entries, len); |
304 | if (ret) { | 337 | if (ret) { |
305 | dev_err(dev->dev, "L2P table transfer failed (%d)\n", | 338 | dev_err(dev->dev, "L2P table transfer failed (%d)\n", |
306 | ret); | 339 | ret); |
@@ -322,43 +355,84 @@ out: | |||
322 | return ret; | 355 | return ret; |
323 | } | 356 | } |
324 | 357 | ||
325 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, | 358 | static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, |
326 | unsigned int nr_blocks, | 359 | int nr_blocks, nvm_bb_update_fn *update_bbtbl, |
327 | nvm_bb_update_fn *update_bbtbl, void *priv) | 360 | void *priv) |
328 | { | 361 | { |
362 | struct request_queue *q = nvmdev->q; | ||
329 | struct nvme_ns *ns = q->queuedata; | 363 | struct nvme_ns *ns = q->queuedata; |
330 | struct nvme_dev *dev = ns->dev; | 364 | struct nvme_dev *dev = ns->dev; |
331 | struct nvme_nvm_command c = {}; | 365 | struct nvme_nvm_command c = {}; |
332 | void *bb_bitmap; | 366 | struct nvme_nvm_bb_tbl *bb_tbl; |
333 | u16 bb_bitmap_size; | 367 | int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; |
334 | int ret = 0; | 368 | int ret = 0; |
335 | 369 | ||
336 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; | 370 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; |
337 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); | 371 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); |
338 | c.get_bb.lbb = cpu_to_le32(lunid); | 372 | c.get_bb.spba = cpu_to_le64(ppa.ppa); |
339 | bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; | ||
340 | bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); | ||
341 | if (!bb_bitmap) | ||
342 | return -ENOMEM; | ||
343 | 373 | ||
344 | bitmap_zero(bb_bitmap, nr_blocks); | 374 | bb_tbl = kzalloc(tblsz, GFP_KERNEL); |
375 | if (!bb_tbl) | ||
376 | return -ENOMEM; | ||
345 | 377 | ||
346 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, | 378 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, |
347 | bb_bitmap_size); | 379 | bb_tbl, tblsz); |
348 | if (ret) { | 380 | if (ret) { |
349 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); | 381 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); |
350 | ret = -EIO; | 382 | ret = -EIO; |
351 | goto out; | 383 | goto out; |
352 | } | 384 | } |
353 | 385 | ||
354 | ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); | 386 | if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || |
387 | bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { | ||
388 | dev_err(dev->dev, "bbt format mismatch\n"); | ||
389 | ret = -EINVAL; | ||
390 | goto out; | ||
391 | } | ||
392 | |||
393 | if (le16_to_cpu(bb_tbl->verid) != 1) { | ||
394 | ret = -EINVAL; | ||
395 | dev_err(dev->dev, "bbt version not supported\n"); | ||
396 | goto out; | ||
397 | } | ||
398 | |||
399 | if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { | ||
400 | ret = -EINVAL; | ||
401 | dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", | ||
402 | le32_to_cpu(bb_tbl->tblks), nr_blocks); | ||
403 | goto out; | ||
404 | } | ||
405 | |||
406 | ppa = dev_to_generic_addr(nvmdev, ppa); | ||
407 | ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); | ||
355 | if (ret) { | 408 | if (ret) { |
356 | ret = -EINTR; | 409 | ret = -EINTR; |
357 | goto out; | 410 | goto out; |
358 | } | 411 | } |
359 | 412 | ||
360 | out: | 413 | out: |
361 | kfree(bb_bitmap); | 414 | kfree(bb_tbl); |
415 | return ret; | ||
416 | } | ||
417 | |||
418 | static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd, | ||
419 | int type) | ||
420 | { | ||
421 | struct nvme_ns *ns = nvmdev->q->queuedata; | ||
422 | struct nvme_dev *dev = ns->dev; | ||
423 | struct nvme_nvm_command c = {}; | ||
424 | int ret = 0; | ||
425 | |||
426 | c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; | ||
427 | c.set_bb.nsid = cpu_to_le32(ns->ns_id); | ||
428 | c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); | ||
429 | c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); | ||
430 | c.set_bb.value = type; | ||
431 | |||
432 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, | ||
433 | NULL, 0); | ||
434 | if (ret) | ||
435 | dev_err(dev->dev, "set bad block table failed (%d)\n", ret); | ||
362 | return ret; | 436 | return ret; |
363 | } | 437 | } |
364 | 438 | ||
@@ -381,7 +455,7 @@ static void nvme_nvm_end_io(struct request *rq, int error) | |||
381 | struct nvm_rq *rqd = rq->end_io_data; | 455 | struct nvm_rq *rqd = rq->end_io_data; |
382 | struct nvm_dev *dev = rqd->dev; | 456 | struct nvm_dev *dev = rqd->dev; |
383 | 457 | ||
384 | if (dev->mt->end_io(rqd, error)) | 458 | if (dev->mt && dev->mt->end_io(rqd, error)) |
385 | pr_err("nvme: err status: %x result: %lx\n", | 459 | pr_err("nvme: err status: %x result: %lx\n", |
386 | rq->errors, (unsigned long)rq->special); | 460 | rq->errors, (unsigned long)rq->special); |
387 | 461 | ||
@@ -389,8 +463,9 @@ static void nvme_nvm_end_io(struct request *rq, int error) | |||
389 | blk_mq_free_request(rq); | 463 | blk_mq_free_request(rq); |
390 | } | 464 | } |
391 | 465 | ||
392 | static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) | 466 | static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) |
393 | { | 467 | { |
468 | struct request_queue *q = dev->q; | ||
394 | struct nvme_ns *ns = q->queuedata; | 469 | struct nvme_ns *ns = q->queuedata; |
395 | struct request *rq; | 470 | struct request *rq; |
396 | struct bio *bio = rqd->bio; | 471 | struct bio *bio = rqd->bio; |
@@ -428,8 +503,9 @@ static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) | |||
428 | return 0; | 503 | return 0; |
429 | } | 504 | } |
430 | 505 | ||
431 | static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd) | 506 | static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd) |
432 | { | 507 | { |
508 | struct request_queue *q = dev->q; | ||
433 | struct nvme_ns *ns = q->queuedata; | 509 | struct nvme_ns *ns = q->queuedata; |
434 | struct nvme_nvm_command c = {}; | 510 | struct nvme_nvm_command c = {}; |
435 | 511 | ||
@@ -441,9 +517,9 @@ static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd) | |||
441 | return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); | 517 | return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); |
442 | } | 518 | } |
443 | 519 | ||
444 | static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name) | 520 | static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name) |
445 | { | 521 | { |
446 | struct nvme_ns *ns = q->queuedata; | 522 | struct nvme_ns *ns = nvmdev->q->queuedata; |
447 | struct nvme_dev *dev = ns->dev; | 523 | struct nvme_dev *dev = ns->dev; |
448 | 524 | ||
449 | return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0); | 525 | return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0); |
@@ -456,7 +532,7 @@ static void nvme_nvm_destroy_dma_pool(void *pool) | |||
456 | dma_pool_destroy(dma_pool); | 532 | dma_pool_destroy(dma_pool); |
457 | } | 533 | } |
458 | 534 | ||
459 | static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool, | 535 | static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, |
460 | gfp_t mem_flags, dma_addr_t *dma_handler) | 536 | gfp_t mem_flags, dma_addr_t *dma_handler) |
461 | { | 537 | { |
462 | return dma_pool_alloc(pool, mem_flags, dma_handler); | 538 | return dma_pool_alloc(pool, mem_flags, dma_handler); |
@@ -474,6 +550,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { | |||
474 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, | 550 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, |
475 | 551 | ||
476 | .get_bb_tbl = nvme_nvm_get_bb_tbl, | 552 | .get_bb_tbl = nvme_nvm_get_bb_tbl, |
553 | .set_bb_tbl = nvme_nvm_set_bb_tbl, | ||
477 | 554 | ||
478 | .submit_io = nvme_nvm_submit_io, | 555 | .submit_io = nvme_nvm_submit_io, |
479 | .erase_block = nvme_nvm_erase_block, | 556 | .erase_block = nvme_nvm_erase_block, |
@@ -496,31 +573,27 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name) | |||
496 | nvm_unregister(disk_name); | 573 | nvm_unregister(disk_name); |
497 | } | 574 | } |
498 | 575 | ||
576 | /* move to shared place when used in multiple places. */ | ||
577 | #define PCI_VENDOR_ID_CNEX 0x1d1d | ||
578 | #define PCI_DEVICE_ID_CNEX_WL 0x2807 | ||
579 | #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f | ||
580 | |||
499 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) | 581 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) |
500 | { | 582 | { |
501 | struct nvme_dev *dev = ns->dev; | 583 | struct nvme_dev *dev = ns->dev; |
502 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 584 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
503 | 585 | ||
504 | /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ | 586 | /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ |
505 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 && | 587 | if (pdev->vendor == PCI_VENDOR_ID_CNEX && |
588 | pdev->device == PCI_DEVICE_ID_CNEX_QEMU && | ||
506 | id->vs[0] == 0x1) | 589 | id->vs[0] == 0x1) |
507 | return 1; | 590 | return 1; |
508 | 591 | ||
509 | /* CNEX Labs - PCI ID + Vendor specific bit */ | 592 | /* CNEX Labs - PCI ID + Vendor specific bit */ |
510 | if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 && | 593 | if (pdev->vendor == PCI_VENDOR_ID_CNEX && |
594 | pdev->device == PCI_DEVICE_ID_CNEX_WL && | ||
511 | id->vs[0] == 0x1) | 595 | id->vs[0] == 0x1) |
512 | return 1; | 596 | return 1; |
513 | 597 | ||
514 | return 0; | 598 | return 0; |
515 | } | 599 | } |
516 | #else | ||
517 | int nvme_nvm_register(struct request_queue *q, char *disk_name) | ||
518 | { | ||
519 | return 0; | ||
520 | } | ||
521 | void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; | ||
522 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) | ||
523 | { | ||
524 | return 0; | ||
525 | } | ||
526 | #endif /* CONFIG_NVM */ | ||