diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-09 19:35:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-09 19:35:00 -0400 |
commit | 2d4fe27850420606155fb1f7d18ab2b40153e67b (patch) | |
tree | 56b0d465e1189babf4ac668a5b048a747bfb9682 /drivers/block | |
parent | 2e99f3a12b20ab3afad0e042cc0bdd0ee855dca0 (diff) | |
parent | 94f370cab6e5ac514b658c6b2b3aa308cefc5c7a (diff) |
Merge git://git.infradead.org/users/willy/linux-nvme
Pull NVMe driver update from Matthew Wilcox:
"Lots of exciting new features in the NVM Express driver this time,
including support for emulating SCSI commands, discard support and the
ability to submit per-sector metadata with I/Os.
It's still mostly bugfixes though!"
* git://git.infradead.org/users/willy/linux-nvme: (27 commits)
NVMe: Use user defined admin ioctl timeout
NVMe: Simplify Firmware Activate code slightly
NVMe: Only clear the enable bit when disabling controller
NVMe: Wait for device to acknowledge shutdown
NVMe: Schedule timeout for sync commands
NVMe: Meta-data support in NVME_IOCTL_SUBMIT_IO
NVMe: Device specific stripe size handling
NVMe: Split non-mergeable bio requests
NVMe: Remove dead code in nvme_dev_add
NVMe: Check for NULL memory in nvme_dev_add
NVMe: Fix error clean-up on nvme_alloc_queue
NVMe: Free admin queue on request_irq error
NVMe: Add scsi unmap to SG_IO
NVMe: queue usage fixes in nvme-scsi
NVMe: Set TASK_INTERRUPTIBLE before processing queues
NVMe: Add a character device for each nvme device
NVMe: Fix endian-related problems in user I/O submission path
NVMe: Fix I/O cancellation status on big-endian machines
NVMe: Fix sparse warnings in scsi emulation
NVMe: Don't fail initialisation unnecessarily
...
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/Makefile | 1 | ||||
-rw-r--r-- | drivers/block/nvme-core.c (renamed from drivers/block/nvme.c) | 594 | ||||
-rw-r--r-- | drivers/block/nvme-scsi.c | 3053 |
3 files changed, 3484 insertions, 164 deletions
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index a3b40232c6ab..ca07399a8d99 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
@@ -42,4 +42,5 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ | |||
42 | 42 | ||
43 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ | 43 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ |
44 | 44 | ||
45 | nvme-y := nvme-core.o nvme-scsi.o | ||
45 | swim_mod-y := swim.o swim_asm.o | 46 | swim_mod-y := swim.o swim_asm.o |
diff --git a/drivers/block/nvme.c b/drivers/block/nvme-core.c index 9dcefe40380b..8efdfaa44a59 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme-core.c | |||
@@ -39,14 +39,13 @@ | |||
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/types.h> | 41 | #include <linux/types.h> |
42 | 42 | #include <scsi/sg.h> | |
43 | #include <asm-generic/io-64-nonatomic-lo-hi.h> | 43 | #include <asm-generic/io-64-nonatomic-lo-hi.h> |
44 | 44 | ||
45 | #define NVME_Q_DEPTH 1024 | 45 | #define NVME_Q_DEPTH 1024 |
46 | #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) | 46 | #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) |
47 | #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) | 47 | #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) |
48 | #define NVME_MINORS 64 | 48 | #define NVME_MINORS 64 |
49 | #define NVME_IO_TIMEOUT (5 * HZ) | ||
50 | #define ADMIN_TIMEOUT (60 * HZ) | 49 | #define ADMIN_TIMEOUT (60 * HZ) |
51 | 50 | ||
52 | static int nvme_major; | 51 | static int nvme_major; |
@@ -60,43 +59,6 @@ static LIST_HEAD(dev_list); | |||
60 | static struct task_struct *nvme_thread; | 59 | static struct task_struct *nvme_thread; |
61 | 60 | ||
62 | /* | 61 | /* |
63 | * Represents an NVM Express device. Each nvme_dev is a PCI function. | ||
64 | */ | ||
65 | struct nvme_dev { | ||
66 | struct list_head node; | ||
67 | struct nvme_queue **queues; | ||
68 | u32 __iomem *dbs; | ||
69 | struct pci_dev *pci_dev; | ||
70 | struct dma_pool *prp_page_pool; | ||
71 | struct dma_pool *prp_small_pool; | ||
72 | int instance; | ||
73 | int queue_count; | ||
74 | int db_stride; | ||
75 | u32 ctrl_config; | ||
76 | struct msix_entry *entry; | ||
77 | struct nvme_bar __iomem *bar; | ||
78 | struct list_head namespaces; | ||
79 | char serial[20]; | ||
80 | char model[40]; | ||
81 | char firmware_rev[8]; | ||
82 | u32 max_hw_sectors; | ||
83 | }; | ||
84 | |||
85 | /* | ||
86 | * An NVM Express namespace is equivalent to a SCSI LUN | ||
87 | */ | ||
88 | struct nvme_ns { | ||
89 | struct list_head list; | ||
90 | |||
91 | struct nvme_dev *dev; | ||
92 | struct request_queue *queue; | ||
93 | struct gendisk *disk; | ||
94 | |||
95 | int ns_id; | ||
96 | int lba_shift; | ||
97 | }; | ||
98 | |||
99 | /* | ||
100 | * An NVM Express queue. Each device has at least two (one for admin | 62 | * An NVM Express queue. Each device has at least two (one for admin |
101 | * commands and one for I/O commands). | 63 | * commands and one for I/O commands). |
102 | */ | 64 | */ |
@@ -131,6 +93,7 @@ static inline void _nvme_check_size(void) | |||
131 | BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); | 93 | BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); |
132 | BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); | 94 | BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); |
133 | BUILD_BUG_ON(sizeof(struct nvme_features) != 64); | 95 | BUILD_BUG_ON(sizeof(struct nvme_features) != 64); |
96 | BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); | ||
134 | BUILD_BUG_ON(sizeof(struct nvme_command) != 64); | 97 | BUILD_BUG_ON(sizeof(struct nvme_command) != 64); |
135 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); | 98 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); |
136 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); | 99 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); |
@@ -261,12 +224,12 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, | |||
261 | return ctx; | 224 | return ctx; |
262 | } | 225 | } |
263 | 226 | ||
264 | static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) | 227 | struct nvme_queue *get_nvmeq(struct nvme_dev *dev) |
265 | { | 228 | { |
266 | return dev->queues[get_cpu() + 1]; | 229 | return dev->queues[get_cpu() + 1]; |
267 | } | 230 | } |
268 | 231 | ||
269 | static void put_nvmeq(struct nvme_queue *nvmeq) | 232 | void put_nvmeq(struct nvme_queue *nvmeq) |
270 | { | 233 | { |
271 | put_cpu(); | 234 | put_cpu(); |
272 | } | 235 | } |
@@ -294,22 +257,6 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) | |||
294 | return 0; | 257 | return 0; |
295 | } | 258 | } |
296 | 259 | ||
297 | /* | ||
298 | * The nvme_iod describes the data in an I/O, including the list of PRP | ||
299 | * entries. You can't see it in this data structure because C doesn't let | ||
300 | * me express that. Use nvme_alloc_iod to ensure there's enough space | ||
301 | * allocated to store the PRP list. | ||
302 | */ | ||
303 | struct nvme_iod { | ||
304 | void *private; /* For the use of the submitter of the I/O */ | ||
305 | int npages; /* In the PRP list. 0 means small pool in use */ | ||
306 | int offset; /* Of PRP list */ | ||
307 | int nents; /* Used in scatterlist */ | ||
308 | int length; /* Of data, in bytes */ | ||
309 | dma_addr_t first_dma; | ||
310 | struct scatterlist sg[0]; | ||
311 | }; | ||
312 | |||
313 | static __le64 **iod_list(struct nvme_iod *iod) | 260 | static __le64 **iod_list(struct nvme_iod *iod) |
314 | { | 261 | { |
315 | return ((void *)iod) + iod->offset; | 262 | return ((void *)iod) + iod->offset; |
@@ -343,7 +290,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) | |||
343 | return iod; | 290 | return iod; |
344 | } | 291 | } |
345 | 292 | ||
346 | static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) | 293 | void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) |
347 | { | 294 | { |
348 | const int last_prp = PAGE_SIZE / 8 - 1; | 295 | const int last_prp = PAGE_SIZE / 8 - 1; |
349 | int i; | 296 | int i; |
@@ -361,16 +308,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) | |||
361 | kfree(iod); | 308 | kfree(iod); |
362 | } | 309 | } |
363 | 310 | ||
364 | static void requeue_bio(struct nvme_dev *dev, struct bio *bio) | ||
365 | { | ||
366 | struct nvme_queue *nvmeq = get_nvmeq(dev); | ||
367 | if (bio_list_empty(&nvmeq->sq_cong)) | ||
368 | add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); | ||
369 | bio_list_add(&nvmeq->sq_cong, bio); | ||
370 | put_nvmeq(nvmeq); | ||
371 | wake_up_process(nvme_thread); | ||
372 | } | ||
373 | |||
374 | static void bio_completion(struct nvme_dev *dev, void *ctx, | 311 | static void bio_completion(struct nvme_dev *dev, void *ctx, |
375 | struct nvme_completion *cqe) | 312 | struct nvme_completion *cqe) |
376 | { | 313 | { |
@@ -382,19 +319,15 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, | |||
382 | dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, | 319 | dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, |
383 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 320 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
384 | nvme_free_iod(dev, iod); | 321 | nvme_free_iod(dev, iod); |
385 | if (status) { | 322 | if (status) |
386 | bio_endio(bio, -EIO); | 323 | bio_endio(bio, -EIO); |
387 | } else if (bio->bi_vcnt > bio->bi_idx) { | 324 | else |
388 | requeue_bio(dev, bio); | ||
389 | } else { | ||
390 | bio_endio(bio, 0); | 325 | bio_endio(bio, 0); |
391 | } | ||
392 | } | 326 | } |
393 | 327 | ||
394 | /* length is in bytes. gfp flags indicates whether we may sleep. */ | 328 | /* length is in bytes. gfp flags indicates whether we may sleep. */ |
395 | static int nvme_setup_prps(struct nvme_dev *dev, | 329 | int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, |
396 | struct nvme_common_command *cmd, struct nvme_iod *iod, | 330 | struct nvme_iod *iod, int total_len, gfp_t gfp) |
397 | int total_len, gfp_t gfp) | ||
398 | { | 331 | { |
399 | struct dma_pool *pool; | 332 | struct dma_pool *pool; |
400 | int length = total_len; | 333 | int length = total_len; |
@@ -473,43 +406,193 @@ static int nvme_setup_prps(struct nvme_dev *dev, | |||
473 | return total_len; | 406 | return total_len; |
474 | } | 407 | } |
475 | 408 | ||
409 | struct nvme_bio_pair { | ||
410 | struct bio b1, b2, *parent; | ||
411 | struct bio_vec *bv1, *bv2; | ||
412 | int err; | ||
413 | atomic_t cnt; | ||
414 | }; | ||
415 | |||
416 | static void nvme_bio_pair_endio(struct bio *bio, int err) | ||
417 | { | ||
418 | struct nvme_bio_pair *bp = bio->bi_private; | ||
419 | |||
420 | if (err) | ||
421 | bp->err = err; | ||
422 | |||
423 | if (atomic_dec_and_test(&bp->cnt)) { | ||
424 | bio_endio(bp->parent, bp->err); | ||
425 | if (bp->bv1) | ||
426 | kfree(bp->bv1); | ||
427 | if (bp->bv2) | ||
428 | kfree(bp->bv2); | ||
429 | kfree(bp); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, | ||
434 | int len, int offset) | ||
435 | { | ||
436 | struct nvme_bio_pair *bp; | ||
437 | |||
438 | BUG_ON(len > bio->bi_size); | ||
439 | BUG_ON(idx > bio->bi_vcnt); | ||
440 | |||
441 | bp = kmalloc(sizeof(*bp), GFP_ATOMIC); | ||
442 | if (!bp) | ||
443 | return NULL; | ||
444 | bp->err = 0; | ||
445 | |||
446 | bp->b1 = *bio; | ||
447 | bp->b2 = *bio; | ||
448 | |||
449 | bp->b1.bi_size = len; | ||
450 | bp->b2.bi_size -= len; | ||
451 | bp->b1.bi_vcnt = idx; | ||
452 | bp->b2.bi_idx = idx; | ||
453 | bp->b2.bi_sector += len >> 9; | ||
454 | |||
455 | if (offset) { | ||
456 | bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), | ||
457 | GFP_ATOMIC); | ||
458 | if (!bp->bv1) | ||
459 | goto split_fail_1; | ||
460 | |||
461 | bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), | ||
462 | GFP_ATOMIC); | ||
463 | if (!bp->bv2) | ||
464 | goto split_fail_2; | ||
465 | |||
466 | memcpy(bp->bv1, bio->bi_io_vec, | ||
467 | bio->bi_max_vecs * sizeof(struct bio_vec)); | ||
468 | memcpy(bp->bv2, bio->bi_io_vec, | ||
469 | bio->bi_max_vecs * sizeof(struct bio_vec)); | ||
470 | |||
471 | bp->b1.bi_io_vec = bp->bv1; | ||
472 | bp->b2.bi_io_vec = bp->bv2; | ||
473 | bp->b2.bi_io_vec[idx].bv_offset += offset; | ||
474 | bp->b2.bi_io_vec[idx].bv_len -= offset; | ||
475 | bp->b1.bi_io_vec[idx].bv_len = offset; | ||
476 | bp->b1.bi_vcnt++; | ||
477 | } else | ||
478 | bp->bv1 = bp->bv2 = NULL; | ||
479 | |||
480 | bp->b1.bi_private = bp; | ||
481 | bp->b2.bi_private = bp; | ||
482 | |||
483 | bp->b1.bi_end_io = nvme_bio_pair_endio; | ||
484 | bp->b2.bi_end_io = nvme_bio_pair_endio; | ||
485 | |||
486 | bp->parent = bio; | ||
487 | atomic_set(&bp->cnt, 2); | ||
488 | |||
489 | return bp; | ||
490 | |||
491 | split_fail_2: | ||
492 | kfree(bp->bv1); | ||
493 | split_fail_1: | ||
494 | kfree(bp); | ||
495 | return NULL; | ||
496 | } | ||
497 | |||
498 | static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, | ||
499 | int idx, int len, int offset) | ||
500 | { | ||
501 | struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); | ||
502 | if (!bp) | ||
503 | return -ENOMEM; | ||
504 | |||
505 | if (bio_list_empty(&nvmeq->sq_cong)) | ||
506 | add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); | ||
507 | bio_list_add(&nvmeq->sq_cong, &bp->b1); | ||
508 | bio_list_add(&nvmeq->sq_cong, &bp->b2); | ||
509 | |||
510 | return 0; | ||
511 | } | ||
512 | |||
476 | /* NVMe scatterlists require no holes in the virtual address */ | 513 | /* NVMe scatterlists require no holes in the virtual address */ |
477 | #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ | 514 | #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ |
478 | (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) | 515 | (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) |
479 | 516 | ||
480 | static int nvme_map_bio(struct device *dev, struct nvme_iod *iod, | 517 | static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, |
481 | struct bio *bio, enum dma_data_direction dma_dir, int psegs) | 518 | struct bio *bio, enum dma_data_direction dma_dir, int psegs) |
482 | { | 519 | { |
483 | struct bio_vec *bvec, *bvprv = NULL; | 520 | struct bio_vec *bvec, *bvprv = NULL; |
484 | struct scatterlist *sg = NULL; | 521 | struct scatterlist *sg = NULL; |
485 | int i, old_idx, length = 0, nsegs = 0; | 522 | int i, length = 0, nsegs = 0, split_len = bio->bi_size; |
523 | |||
524 | if (nvmeq->dev->stripe_size) | ||
525 | split_len = nvmeq->dev->stripe_size - | ||
526 | ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); | ||
486 | 527 | ||
487 | sg_init_table(iod->sg, psegs); | 528 | sg_init_table(iod->sg, psegs); |
488 | old_idx = bio->bi_idx; | ||
489 | bio_for_each_segment(bvec, bio, i) { | 529 | bio_for_each_segment(bvec, bio, i) { |
490 | if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { | 530 | if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { |
491 | sg->length += bvec->bv_len; | 531 | sg->length += bvec->bv_len; |
492 | } else { | 532 | } else { |
493 | if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) | 533 | if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) |
494 | break; | 534 | return nvme_split_and_submit(bio, nvmeq, i, |
535 | length, 0); | ||
536 | |||
495 | sg = sg ? sg + 1 : iod->sg; | 537 | sg = sg ? sg + 1 : iod->sg; |
496 | sg_set_page(sg, bvec->bv_page, bvec->bv_len, | 538 | sg_set_page(sg, bvec->bv_page, bvec->bv_len, |
497 | bvec->bv_offset); | 539 | bvec->bv_offset); |
498 | nsegs++; | 540 | nsegs++; |
499 | } | 541 | } |
542 | |||
543 | if (split_len - length < bvec->bv_len) | ||
544 | return nvme_split_and_submit(bio, nvmeq, i, split_len, | ||
545 | split_len - length); | ||
500 | length += bvec->bv_len; | 546 | length += bvec->bv_len; |
501 | bvprv = bvec; | 547 | bvprv = bvec; |
502 | } | 548 | } |
503 | bio->bi_idx = i; | ||
504 | iod->nents = nsegs; | 549 | iod->nents = nsegs; |
505 | sg_mark_end(sg); | 550 | sg_mark_end(sg); |
506 | if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) { | 551 | if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) |
507 | bio->bi_idx = old_idx; | ||
508 | return -ENOMEM; | 552 | return -ENOMEM; |
509 | } | 553 | |
554 | BUG_ON(length != bio->bi_size); | ||
510 | return length; | 555 | return length; |
511 | } | 556 | } |
512 | 557 | ||
558 | /* | ||
559 | * We reuse the small pool to allocate the 16-byte range here as it is not | ||
560 | * worth having a special pool for these or additional cases to handle freeing | ||
561 | * the iod. | ||
562 | */ | ||
563 | static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, | ||
564 | struct bio *bio, struct nvme_iod *iod, int cmdid) | ||
565 | { | ||
566 | struct nvme_dsm_range *range; | ||
567 | struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; | ||
568 | |||
569 | range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, | ||
570 | &iod->first_dma); | ||
571 | if (!range) | ||
572 | return -ENOMEM; | ||
573 | |||
574 | iod_list(iod)[0] = (__le64 *)range; | ||
575 | iod->npages = 0; | ||
576 | |||
577 | range->cattr = cpu_to_le32(0); | ||
578 | range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); | ||
579 | range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); | ||
580 | |||
581 | memset(cmnd, 0, sizeof(*cmnd)); | ||
582 | cmnd->dsm.opcode = nvme_cmd_dsm; | ||
583 | cmnd->dsm.command_id = cmdid; | ||
584 | cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); | ||
585 | cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); | ||
586 | cmnd->dsm.nr = 0; | ||
587 | cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); | ||
588 | |||
589 | if (++nvmeq->sq_tail == nvmeq->q_depth) | ||
590 | nvmeq->sq_tail = 0; | ||
591 | writel(nvmeq->sq_tail, nvmeq->q_db); | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
513 | static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, | 596 | static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, |
514 | int cmdid) | 597 | int cmdid) |
515 | { | 598 | { |
@@ -527,7 +610,7 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
527 | return 0; | 610 | return 0; |
528 | } | 611 | } |
529 | 612 | ||
530 | static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) | 613 | int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) |
531 | { | 614 | { |
532 | int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, | 615 | int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, |
533 | special_completion, NVME_IO_TIMEOUT); | 616 | special_completion, NVME_IO_TIMEOUT); |
@@ -567,6 +650,12 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
567 | if (unlikely(cmdid < 0)) | 650 | if (unlikely(cmdid < 0)) |
568 | goto free_iod; | 651 | goto free_iod; |
569 | 652 | ||
653 | if (bio->bi_rw & REQ_DISCARD) { | ||
654 | result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); | ||
655 | if (result) | ||
656 | goto free_cmdid; | ||
657 | return result; | ||
658 | } | ||
570 | if ((bio->bi_rw & REQ_FLUSH) && !psegs) | 659 | if ((bio->bi_rw & REQ_FLUSH) && !psegs) |
571 | return nvme_submit_flush(nvmeq, ns, cmdid); | 660 | return nvme_submit_flush(nvmeq, ns, cmdid); |
572 | 661 | ||
@@ -591,8 +680,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
591 | dma_dir = DMA_FROM_DEVICE; | 680 | dma_dir = DMA_FROM_DEVICE; |
592 | } | 681 | } |
593 | 682 | ||
594 | result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); | 683 | result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs); |
595 | if (result < 0) | 684 | if (result <= 0) |
596 | goto free_cmdid; | 685 | goto free_cmdid; |
597 | length = result; | 686 | length = result; |
598 | 687 | ||
@@ -600,13 +689,11 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
600 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); | 689 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); |
601 | length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, | 690 | length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, |
602 | GFP_ATOMIC); | 691 | GFP_ATOMIC); |
603 | cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); | 692 | cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); |
604 | cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); | 693 | cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); |
605 | cmnd->rw.control = cpu_to_le16(control); | 694 | cmnd->rw.control = cpu_to_le16(control); |
606 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); | 695 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); |
607 | 696 | ||
608 | bio->bi_sector += length >> 9; | ||
609 | |||
610 | if (++nvmeq->sq_tail == nvmeq->q_depth) | 697 | if (++nvmeq->sq_tail == nvmeq->q_depth) |
611 | nvmeq->sq_tail = 0; | 698 | nvmeq->sq_tail = 0; |
612 | writel(nvmeq->sq_tail, nvmeq->q_db); | 699 | writel(nvmeq->sq_tail, nvmeq->q_db); |
@@ -724,8 +811,8 @@ static void sync_completion(struct nvme_dev *dev, void *ctx, | |||
724 | * Returns 0 on success. If the result is negative, it's a Linux error code; | 811 | * Returns 0 on success. If the result is negative, it's a Linux error code; |
725 | * if the result is positive, it's an NVM Express status code | 812 | * if the result is positive, it's an NVM Express status code |
726 | */ | 813 | */ |
727 | static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, | 814 | int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, |
728 | struct nvme_command *cmd, u32 *result, unsigned timeout) | 815 | u32 *result, unsigned timeout) |
729 | { | 816 | { |
730 | int cmdid; | 817 | int cmdid; |
731 | struct sync_cmd_info cmdinfo; | 818 | struct sync_cmd_info cmdinfo; |
@@ -741,7 +828,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, | |||
741 | 828 | ||
742 | set_current_state(TASK_KILLABLE); | 829 | set_current_state(TASK_KILLABLE); |
743 | nvme_submit_cmd(nvmeq, cmd); | 830 | nvme_submit_cmd(nvmeq, cmd); |
744 | schedule(); | 831 | schedule_timeout(timeout); |
745 | 832 | ||
746 | if (cmdinfo.status == -EINTR) { | 833 | if (cmdinfo.status == -EINTR) { |
747 | nvme_abort_command(nvmeq, cmdid); | 834 | nvme_abort_command(nvmeq, cmdid); |
@@ -754,7 +841,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, | |||
754 | return cmdinfo.status; | 841 | return cmdinfo.status; |
755 | } | 842 | } |
756 | 843 | ||
757 | static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, | 844 | int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, |
758 | u32 *result) | 845 | u32 *result) |
759 | { | 846 | { |
760 | return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); | 847 | return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); |
@@ -827,7 +914,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) | |||
827 | return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); | 914 | return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); |
828 | } | 915 | } |
829 | 916 | ||
830 | static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, | 917 | int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, |
831 | dma_addr_t dma_addr) | 918 | dma_addr_t dma_addr) |
832 | { | 919 | { |
833 | struct nvme_command c; | 920 | struct nvme_command c; |
@@ -841,7 +928,7 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, | |||
841 | return nvme_submit_admin_cmd(dev, &c, NULL); | 928 | return nvme_submit_admin_cmd(dev, &c, NULL); |
842 | } | 929 | } |
843 | 930 | ||
844 | static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, | 931 | int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, |
845 | dma_addr_t dma_addr, u32 *result) | 932 | dma_addr_t dma_addr, u32 *result) |
846 | { | 933 | { |
847 | struct nvme_command c; | 934 | struct nvme_command c; |
@@ -855,8 +942,8 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, | |||
855 | return nvme_submit_admin_cmd(dev, &c, result); | 942 | return nvme_submit_admin_cmd(dev, &c, result); |
856 | } | 943 | } |
857 | 944 | ||
858 | static int nvme_set_features(struct nvme_dev *dev, unsigned fid, | 945 | int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, |
859 | unsigned dword11, dma_addr_t dma_addr, u32 *result) | 946 | dma_addr_t dma_addr, u32 *result) |
860 | { | 947 | { |
861 | struct nvme_command c; | 948 | struct nvme_command c; |
862 | 949 | ||
@@ -885,7 +972,7 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) | |||
885 | void *ctx; | 972 | void *ctx; |
886 | nvme_completion_fn fn; | 973 | nvme_completion_fn fn; |
887 | static struct nvme_completion cqe = { | 974 | static struct nvme_completion cqe = { |
888 | .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, | 975 | .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), |
889 | }; | 976 | }; |
890 | 977 | ||
891 | if (timeout && !time_after(now, info[cmdid].timeout)) | 978 | if (timeout && !time_after(now, info[cmdid].timeout)) |
@@ -966,7 +1053,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
966 | return nvmeq; | 1053 | return nvmeq; |
967 | 1054 | ||
968 | free_cqdma: | 1055 | free_cqdma: |
969 | dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, | 1056 | dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, |
970 | nvmeq->cq_dma_addr); | 1057 | nvmeq->cq_dma_addr); |
971 | free_nvmeq: | 1058 | free_nvmeq: |
972 | kfree(nvmeq); | 1059 | kfree(nvmeq); |
@@ -1021,15 +1108,60 @@ static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, | |||
1021 | return ERR_PTR(result); | 1108 | return ERR_PTR(result); |
1022 | } | 1109 | } |
1023 | 1110 | ||
1111 | static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) | ||
1112 | { | ||
1113 | unsigned long timeout; | ||
1114 | u32 bit = enabled ? NVME_CSTS_RDY : 0; | ||
1115 | |||
1116 | timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; | ||
1117 | |||
1118 | while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) { | ||
1119 | msleep(100); | ||
1120 | if (fatal_signal_pending(current)) | ||
1121 | return -EINTR; | ||
1122 | if (time_after(jiffies, timeout)) { | ||
1123 | dev_err(&dev->pci_dev->dev, | ||
1124 | "Device not ready; aborting initialisation\n"); | ||
1125 | return -ENODEV; | ||
1126 | } | ||
1127 | } | ||
1128 | |||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1132 | /* | ||
1133 | * If the device has been passed off to us in an enabled state, just clear | ||
1134 | * the enabled bit. The spec says we should set the 'shutdown notification | ||
1135 | * bits', but doing so may cause the device to complete commands to the | ||
1136 | * admin queue ... and we don't know what memory that might be pointing at! | ||
1137 | */ | ||
1138 | static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap) | ||
1139 | { | ||
1140 | u32 cc = readl(&dev->bar->cc); | ||
1141 | |||
1142 | if (cc & NVME_CC_ENABLE) | ||
1143 | writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc); | ||
1144 | return nvme_wait_ready(dev, cap, false); | ||
1145 | } | ||
1146 | |||
1147 | static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap) | ||
1148 | { | ||
1149 | return nvme_wait_ready(dev, cap, true); | ||
1150 | } | ||
1151 | |||
1024 | static int nvme_configure_admin_queue(struct nvme_dev *dev) | 1152 | static int nvme_configure_admin_queue(struct nvme_dev *dev) |
1025 | { | 1153 | { |
1026 | int result = 0; | 1154 | int result; |
1027 | u32 aqa; | 1155 | u32 aqa; |
1028 | u64 cap; | 1156 | u64 cap = readq(&dev->bar->cap); |
1029 | unsigned long timeout; | ||
1030 | struct nvme_queue *nvmeq; | 1157 | struct nvme_queue *nvmeq; |
1031 | 1158 | ||
1032 | dev->dbs = ((void __iomem *)dev->bar) + 4096; | 1159 | dev->dbs = ((void __iomem *)dev->bar) + 4096; |
1160 | dev->db_stride = NVME_CAP_STRIDE(cap); | ||
1161 | |||
1162 | result = nvme_disable_ctrl(dev, cap); | ||
1163 | if (result < 0) | ||
1164 | return result; | ||
1033 | 1165 | ||
1034 | nvmeq = nvme_alloc_queue(dev, 0, 64, 0); | 1166 | nvmeq = nvme_alloc_queue(dev, 0, 64, 0); |
1035 | if (!nvmeq) | 1167 | if (!nvmeq) |
@@ -1043,38 +1175,28 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
1043 | dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; | 1175 | dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; |
1044 | dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; | 1176 | dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; |
1045 | 1177 | ||
1046 | writel(0, &dev->bar->cc); | ||
1047 | writel(aqa, &dev->bar->aqa); | 1178 | writel(aqa, &dev->bar->aqa); |
1048 | writeq(nvmeq->sq_dma_addr, &dev->bar->asq); | 1179 | writeq(nvmeq->sq_dma_addr, &dev->bar->asq); |
1049 | writeq(nvmeq->cq_dma_addr, &dev->bar->acq); | 1180 | writeq(nvmeq->cq_dma_addr, &dev->bar->acq); |
1050 | writel(dev->ctrl_config, &dev->bar->cc); | 1181 | writel(dev->ctrl_config, &dev->bar->cc); |
1051 | 1182 | ||
1052 | cap = readq(&dev->bar->cap); | 1183 | result = nvme_enable_ctrl(dev, cap); |
1053 | timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; | 1184 | if (result) |
1054 | dev->db_stride = NVME_CAP_STRIDE(cap); | 1185 | goto free_q; |
1055 | |||
1056 | while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { | ||
1057 | msleep(100); | ||
1058 | if (fatal_signal_pending(current)) | ||
1059 | result = -EINTR; | ||
1060 | if (time_after(jiffies, timeout)) { | ||
1061 | dev_err(&dev->pci_dev->dev, | ||
1062 | "Device not ready; aborting initialisation\n"); | ||
1063 | result = -ENODEV; | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1067 | if (result) { | ||
1068 | nvme_free_queue_mem(nvmeq); | ||
1069 | return result; | ||
1070 | } | ||
1071 | 1186 | ||
1072 | result = queue_request_irq(dev, nvmeq, "nvme admin"); | 1187 | result = queue_request_irq(dev, nvmeq, "nvme admin"); |
1188 | if (result) | ||
1189 | goto free_q; | ||
1190 | |||
1073 | dev->queues[0] = nvmeq; | 1191 | dev->queues[0] = nvmeq; |
1074 | return result; | 1192 | return result; |
1193 | |||
1194 | free_q: | ||
1195 | nvme_free_queue_mem(nvmeq); | ||
1196 | return result; | ||
1075 | } | 1197 | } |
1076 | 1198 | ||
1077 | static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, | 1199 | struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, |
1078 | unsigned long addr, unsigned length) | 1200 | unsigned long addr, unsigned length) |
1079 | { | 1201 | { |
1080 | int i, err, count, nents, offset; | 1202 | int i, err, count, nents, offset; |
@@ -1130,7 +1252,7 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, | |||
1130 | return ERR_PTR(err); | 1252 | return ERR_PTR(err); |
1131 | } | 1253 | } |
1132 | 1254 | ||
1133 | static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, | 1255 | void nvme_unmap_user_pages(struct nvme_dev *dev, int write, |
1134 | struct nvme_iod *iod) | 1256 | struct nvme_iod *iod) |
1135 | { | 1257 | { |
1136 | int i; | 1258 | int i; |
@@ -1148,13 +1270,19 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1148 | struct nvme_queue *nvmeq; | 1270 | struct nvme_queue *nvmeq; |
1149 | struct nvme_user_io io; | 1271 | struct nvme_user_io io; |
1150 | struct nvme_command c; | 1272 | struct nvme_command c; |
1151 | unsigned length; | 1273 | unsigned length, meta_len; |
1152 | int status; | 1274 | int status, i; |
1153 | struct nvme_iod *iod; | 1275 | struct nvme_iod *iod, *meta_iod = NULL; |
1276 | dma_addr_t meta_dma_addr; | ||
1277 | void *meta, *uninitialized_var(meta_mem); | ||
1154 | 1278 | ||
1155 | if (copy_from_user(&io, uio, sizeof(io))) | 1279 | if (copy_from_user(&io, uio, sizeof(io))) |
1156 | return -EFAULT; | 1280 | return -EFAULT; |
1157 | length = (io.nblocks + 1) << ns->lba_shift; | 1281 | length = (io.nblocks + 1) << ns->lba_shift; |
1282 | meta_len = (io.nblocks + 1) * ns->ms; | ||
1283 | |||
1284 | if (meta_len && ((io.metadata & 3) || !io.metadata)) | ||
1285 | return -EINVAL; | ||
1158 | 1286 | ||
1159 | switch (io.opcode) { | 1287 | switch (io.opcode) { |
1160 | case nvme_cmd_write: | 1288 | case nvme_cmd_write: |
@@ -1176,11 +1304,42 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1176 | c.rw.slba = cpu_to_le64(io.slba); | 1304 | c.rw.slba = cpu_to_le64(io.slba); |
1177 | c.rw.length = cpu_to_le16(io.nblocks); | 1305 | c.rw.length = cpu_to_le16(io.nblocks); |
1178 | c.rw.control = cpu_to_le16(io.control); | 1306 | c.rw.control = cpu_to_le16(io.control); |
1179 | c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); | 1307 | c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); |
1180 | c.rw.reftag = io.reftag; | 1308 | c.rw.reftag = cpu_to_le32(io.reftag); |
1181 | c.rw.apptag = io.apptag; | 1309 | c.rw.apptag = cpu_to_le16(io.apptag); |
1182 | c.rw.appmask = io.appmask; | 1310 | c.rw.appmask = cpu_to_le16(io.appmask); |
1183 | /* XXX: metadata */ | 1311 | |
1312 | if (meta_len) { | ||
1313 | meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len); | ||
1314 | if (IS_ERR(meta_iod)) { | ||
1315 | status = PTR_ERR(meta_iod); | ||
1316 | meta_iod = NULL; | ||
1317 | goto unmap; | ||
1318 | } | ||
1319 | |||
1320 | meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, | ||
1321 | &meta_dma_addr, GFP_KERNEL); | ||
1322 | if (!meta_mem) { | ||
1323 | status = -ENOMEM; | ||
1324 | goto unmap; | ||
1325 | } | ||
1326 | |||
1327 | if (io.opcode & 1) { | ||
1328 | int meta_offset = 0; | ||
1329 | |||
1330 | for (i = 0; i < meta_iod->nents; i++) { | ||
1331 | meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + | ||
1332 | meta_iod->sg[i].offset; | ||
1333 | memcpy(meta_mem + meta_offset, meta, | ||
1334 | meta_iod->sg[i].length); | ||
1335 | kunmap_atomic(meta); | ||
1336 | meta_offset += meta_iod->sg[i].length; | ||
1337 | } | ||
1338 | } | ||
1339 | |||
1340 | c.rw.metadata = cpu_to_le64(meta_dma_addr); | ||
1341 | } | ||
1342 | |||
1184 | length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); | 1343 | length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); |
1185 | 1344 | ||
1186 | nvmeq = get_nvmeq(dev); | 1345 | nvmeq = get_nvmeq(dev); |
@@ -1196,8 +1355,33 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1196 | else | 1355 | else |
1197 | status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); | 1356 | status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); |
1198 | 1357 | ||
1358 | if (meta_len) { | ||
1359 | if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { | ||
1360 | int meta_offset = 0; | ||
1361 | |||
1362 | for (i = 0; i < meta_iod->nents; i++) { | ||
1363 | meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + | ||
1364 | meta_iod->sg[i].offset; | ||
1365 | memcpy(meta, meta_mem + meta_offset, | ||
1366 | meta_iod->sg[i].length); | ||
1367 | kunmap_atomic(meta); | ||
1368 | meta_offset += meta_iod->sg[i].length; | ||
1369 | } | ||
1370 | } | ||
1371 | |||
1372 | dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, | ||
1373 | meta_dma_addr); | ||
1374 | } | ||
1375 | |||
1376 | unmap: | ||
1199 | nvme_unmap_user_pages(dev, io.opcode & 1, iod); | 1377 | nvme_unmap_user_pages(dev, io.opcode & 1, iod); |
1200 | nvme_free_iod(dev, iod); | 1378 | nvme_free_iod(dev, iod); |
1379 | |||
1380 | if (meta_iod) { | ||
1381 | nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); | ||
1382 | nvme_free_iod(dev, meta_iod); | ||
1383 | } | ||
1384 | |||
1201 | return status; | 1385 | return status; |
1202 | } | 1386 | } |
1203 | 1387 | ||
@@ -1208,6 +1392,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, | |||
1208 | struct nvme_command c; | 1392 | struct nvme_command c; |
1209 | int status, length; | 1393 | int status, length; |
1210 | struct nvme_iod *uninitialized_var(iod); | 1394 | struct nvme_iod *uninitialized_var(iod); |
1395 | unsigned timeout; | ||
1211 | 1396 | ||
1212 | if (!capable(CAP_SYS_ADMIN)) | 1397 | if (!capable(CAP_SYS_ADMIN)) |
1213 | return -EACCES; | 1398 | return -EACCES; |
@@ -1237,10 +1422,13 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, | |||
1237 | GFP_KERNEL); | 1422 | GFP_KERNEL); |
1238 | } | 1423 | } |
1239 | 1424 | ||
1425 | timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : | ||
1426 | ADMIN_TIMEOUT; | ||
1240 | if (length != cmd.data_len) | 1427 | if (length != cmd.data_len) |
1241 | status = -ENOMEM; | 1428 | status = -ENOMEM; |
1242 | else | 1429 | else |
1243 | status = nvme_submit_admin_cmd(dev, &c, &cmd.result); | 1430 | status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result, |
1431 | timeout); | ||
1244 | 1432 | ||
1245 | if (cmd.data_len) { | 1433 | if (cmd.data_len) { |
1246 | nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); | 1434 | nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); |
@@ -1266,6 +1454,10 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, | |||
1266 | return nvme_user_admin_cmd(ns->dev, (void __user *)arg); | 1454 | return nvme_user_admin_cmd(ns->dev, (void __user *)arg); |
1267 | case NVME_IOCTL_SUBMIT_IO: | 1455 | case NVME_IOCTL_SUBMIT_IO: |
1268 | return nvme_submit_io(ns, (void __user *)arg); | 1456 | return nvme_submit_io(ns, (void __user *)arg); |
1457 | case SG_GET_VERSION_NUM: | ||
1458 | return nvme_sg_get_version_num((void __user *)arg); | ||
1459 | case SG_IO: | ||
1460 | return nvme_sg_io(ns, (void __user *)arg); | ||
1269 | default: | 1461 | default: |
1270 | return -ENOTTY; | 1462 | return -ENOTTY; |
1271 | } | 1463 | } |
@@ -1282,13 +1474,17 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq) | |||
1282 | while (bio_list_peek(&nvmeq->sq_cong)) { | 1474 | while (bio_list_peek(&nvmeq->sq_cong)) { |
1283 | struct bio *bio = bio_list_pop(&nvmeq->sq_cong); | 1475 | struct bio *bio = bio_list_pop(&nvmeq->sq_cong); |
1284 | struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; | 1476 | struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; |
1477 | |||
1478 | if (bio_list_empty(&nvmeq->sq_cong)) | ||
1479 | remove_wait_queue(&nvmeq->sq_full, | ||
1480 | &nvmeq->sq_cong_wait); | ||
1285 | if (nvme_submit_bio_queue(nvmeq, ns, bio)) { | 1481 | if (nvme_submit_bio_queue(nvmeq, ns, bio)) { |
1482 | if (bio_list_empty(&nvmeq->sq_cong)) | ||
1483 | add_wait_queue(&nvmeq->sq_full, | ||
1484 | &nvmeq->sq_cong_wait); | ||
1286 | bio_list_add_head(&nvmeq->sq_cong, bio); | 1485 | bio_list_add_head(&nvmeq->sq_cong, bio); |
1287 | break; | 1486 | break; |
1288 | } | 1487 | } |
1289 | if (bio_list_empty(&nvmeq->sq_cong)) | ||
1290 | remove_wait_queue(&nvmeq->sq_full, | ||
1291 | &nvmeq->sq_cong_wait); | ||
1292 | } | 1488 | } |
1293 | } | 1489 | } |
1294 | 1490 | ||
@@ -1297,7 +1493,7 @@ static int nvme_kthread(void *data) | |||
1297 | struct nvme_dev *dev; | 1493 | struct nvme_dev *dev; |
1298 | 1494 | ||
1299 | while (!kthread_should_stop()) { | 1495 | while (!kthread_should_stop()) { |
1300 | __set_current_state(TASK_RUNNING); | 1496 | set_current_state(TASK_INTERRUPTIBLE); |
1301 | spin_lock(&dev_list_lock); | 1497 | spin_lock(&dev_list_lock); |
1302 | list_for_each_entry(dev, &dev_list, node) { | 1498 | list_for_each_entry(dev, &dev_list, node) { |
1303 | int i; | 1499 | int i; |
@@ -1314,8 +1510,7 @@ static int nvme_kthread(void *data) | |||
1314 | } | 1510 | } |
1315 | } | 1511 | } |
1316 | spin_unlock(&dev_list_lock); | 1512 | spin_unlock(&dev_list_lock); |
1317 | set_current_state(TASK_INTERRUPTIBLE); | 1513 | schedule_timeout(round_jiffies_relative(HZ)); |
1318 | schedule_timeout(HZ); | ||
1319 | } | 1514 | } |
1320 | return 0; | 1515 | return 0; |
1321 | } | 1516 | } |
@@ -1347,6 +1542,16 @@ static void nvme_put_ns_idx(int index) | |||
1347 | spin_unlock(&dev_list_lock); | 1542 | spin_unlock(&dev_list_lock); |
1348 | } | 1543 | } |
1349 | 1544 | ||
1545 | static void nvme_config_discard(struct nvme_ns *ns) | ||
1546 | { | ||
1547 | u32 logical_block_size = queue_logical_block_size(ns->queue); | ||
1548 | ns->queue->limits.discard_zeroes_data = 0; | ||
1549 | ns->queue->limits.discard_alignment = logical_block_size; | ||
1550 | ns->queue->limits.discard_granularity = logical_block_size; | ||
1551 | ns->queue->limits.max_discard_sectors = 0xffffffff; | ||
1552 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); | ||
1553 | } | ||
1554 | |||
1350 | static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, | 1555 | static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, |
1351 | struct nvme_id_ns *id, struct nvme_lba_range_type *rt) | 1556 | struct nvme_id_ns *id, struct nvme_lba_range_type *rt) |
1352 | { | 1557 | { |
@@ -1366,7 +1571,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, | |||
1366 | ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; | 1571 | ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; |
1367 | queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); | 1572 | queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); |
1368 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); | 1573 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); |
1369 | /* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */ | ||
1370 | blk_queue_make_request(ns->queue, nvme_make_request); | 1574 | blk_queue_make_request(ns->queue, nvme_make_request); |
1371 | ns->dev = dev; | 1575 | ns->dev = dev; |
1372 | ns->queue->queuedata = ns; | 1576 | ns->queue->queuedata = ns; |
@@ -1378,6 +1582,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, | |||
1378 | ns->disk = disk; | 1582 | ns->disk = disk; |
1379 | lbaf = id->flbas & 0xf; | 1583 | lbaf = id->flbas & 0xf; |
1380 | ns->lba_shift = id->lbaf[lbaf].ds; | 1584 | ns->lba_shift = id->lbaf[lbaf].ds; |
1585 | ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); | ||
1381 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); | 1586 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); |
1382 | if (dev->max_hw_sectors) | 1587 | if (dev->max_hw_sectors) |
1383 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); | 1588 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); |
@@ -1392,6 +1597,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, | |||
1392 | sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); | 1597 | sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); |
1393 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); | 1598 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); |
1394 | 1599 | ||
1600 | if (dev->oncs & NVME_CTRL_ONCS_DSM) | ||
1601 | nvme_config_discard(ns); | ||
1602 | |||
1395 | return ns; | 1603 | return ns; |
1396 | 1604 | ||
1397 | out_free_queue: | 1605 | out_free_queue: |
@@ -1496,14 +1704,21 @@ static void nvme_free_queues(struct nvme_dev *dev) | |||
1496 | nvme_free_queue(dev, i); | 1704 | nvme_free_queue(dev, i); |
1497 | } | 1705 | } |
1498 | 1706 | ||
1707 | /* | ||
1708 | * Return: error value if an error occurred setting up the queues or calling | ||
1709 | * Identify Device. 0 if these succeeded, even if adding some of the | ||
1710 | * namespaces failed. At the moment, these failures are silent. TBD which | ||
1711 | * failures should be reported. | ||
1712 | */ | ||
1499 | static int nvme_dev_add(struct nvme_dev *dev) | 1713 | static int nvme_dev_add(struct nvme_dev *dev) |
1500 | { | 1714 | { |
1501 | int res, nn, i; | 1715 | int res, nn, i; |
1502 | struct nvme_ns *ns, *next; | 1716 | struct nvme_ns *ns; |
1503 | struct nvme_id_ctrl *ctrl; | 1717 | struct nvme_id_ctrl *ctrl; |
1504 | struct nvme_id_ns *id_ns; | 1718 | struct nvme_id_ns *id_ns; |
1505 | void *mem; | 1719 | void *mem; |
1506 | dma_addr_t dma_addr; | 1720 | dma_addr_t dma_addr; |
1721 | int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; | ||
1507 | 1722 | ||
1508 | res = nvme_setup_io_queues(dev); | 1723 | res = nvme_setup_io_queues(dev); |
1509 | if (res) | 1724 | if (res) |
@@ -1511,22 +1726,26 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
1511 | 1726 | ||
1512 | mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, | 1727 | mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, |
1513 | GFP_KERNEL); | 1728 | GFP_KERNEL); |
1729 | if (!mem) | ||
1730 | return -ENOMEM; | ||
1514 | 1731 | ||
1515 | res = nvme_identify(dev, 0, 1, dma_addr); | 1732 | res = nvme_identify(dev, 0, 1, dma_addr); |
1516 | if (res) { | 1733 | if (res) { |
1517 | res = -EIO; | 1734 | res = -EIO; |
1518 | goto out_free; | 1735 | goto out; |
1519 | } | 1736 | } |
1520 | 1737 | ||
1521 | ctrl = mem; | 1738 | ctrl = mem; |
1522 | nn = le32_to_cpup(&ctrl->nn); | 1739 | nn = le32_to_cpup(&ctrl->nn); |
1740 | dev->oncs = le16_to_cpup(&ctrl->oncs); | ||
1523 | memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); | 1741 | memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); |
1524 | memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); | 1742 | memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); |
1525 | memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); | 1743 | memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); |
1526 | if (ctrl->mdts) { | 1744 | if (ctrl->mdts) |
1527 | int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; | ||
1528 | dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); | 1745 | dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); |
1529 | } | 1746 | if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) && |
1747 | (dev->pci_dev->device == 0x0953) && ctrl->vs[3]) | ||
1748 | dev->stripe_size = 1 << (ctrl->vs[3] + shift); | ||
1530 | 1749 | ||
1531 | id_ns = mem; | 1750 | id_ns = mem; |
1532 | for (i = 1; i <= nn; i++) { | 1751 | for (i = 1; i <= nn; i++) { |
@@ -1548,14 +1767,7 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
1548 | } | 1767 | } |
1549 | list_for_each_entry(ns, &dev->namespaces, list) | 1768 | list_for_each_entry(ns, &dev->namespaces, list) |
1550 | add_disk(ns->disk); | 1769 | add_disk(ns->disk); |
1551 | 1770 | res = 0; | |
1552 | goto out; | ||
1553 | |||
1554 | out_free: | ||
1555 | list_for_each_entry_safe(ns, next, &dev->namespaces, list) { | ||
1556 | list_del(&ns->list); | ||
1557 | nvme_ns_free(ns); | ||
1558 | } | ||
1559 | 1771 | ||
1560 | out: | 1772 | out: |
1561 | dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); | 1773 | dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); |
@@ -1634,6 +1846,56 @@ static void nvme_release_instance(struct nvme_dev *dev) | |||
1634 | spin_unlock(&dev_list_lock); | 1846 | spin_unlock(&dev_list_lock); |
1635 | } | 1847 | } |
1636 | 1848 | ||
1849 | static void nvme_free_dev(struct kref *kref) | ||
1850 | { | ||
1851 | struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); | ||
1852 | nvme_dev_remove(dev); | ||
1853 | pci_disable_msix(dev->pci_dev); | ||
1854 | iounmap(dev->bar); | ||
1855 | nvme_release_instance(dev); | ||
1856 | nvme_release_prp_pools(dev); | ||
1857 | pci_disable_device(dev->pci_dev); | ||
1858 | pci_release_regions(dev->pci_dev); | ||
1859 | kfree(dev->queues); | ||
1860 | kfree(dev->entry); | ||
1861 | kfree(dev); | ||
1862 | } | ||
1863 | |||
1864 | static int nvme_dev_open(struct inode *inode, struct file *f) | ||
1865 | { | ||
1866 | struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, | ||
1867 | miscdev); | ||
1868 | kref_get(&dev->kref); | ||
1869 | f->private_data = dev; | ||
1870 | return 0; | ||
1871 | } | ||
1872 | |||
1873 | static int nvme_dev_release(struct inode *inode, struct file *f) | ||
1874 | { | ||
1875 | struct nvme_dev *dev = f->private_data; | ||
1876 | kref_put(&dev->kref, nvme_free_dev); | ||
1877 | return 0; | ||
1878 | } | ||
1879 | |||
1880 | static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | ||
1881 | { | ||
1882 | struct nvme_dev *dev = f->private_data; | ||
1883 | switch (cmd) { | ||
1884 | case NVME_IOCTL_ADMIN_CMD: | ||
1885 | return nvme_user_admin_cmd(dev, (void __user *)arg); | ||
1886 | default: | ||
1887 | return -ENOTTY; | ||
1888 | } | ||
1889 | } | ||
1890 | |||
1891 | static const struct file_operations nvme_dev_fops = { | ||
1892 | .owner = THIS_MODULE, | ||
1893 | .open = nvme_dev_open, | ||
1894 | .release = nvme_dev_release, | ||
1895 | .unlocked_ioctl = nvme_dev_ioctl, | ||
1896 | .compat_ioctl = nvme_dev_ioctl, | ||
1897 | }; | ||
1898 | |||
1637 | static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 1899 | static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
1638 | { | 1900 | { |
1639 | int bars, result = -ENOMEM; | 1901 | int bars, result = -ENOMEM; |
@@ -1692,8 +1954,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1692 | if (result) | 1954 | if (result) |
1693 | goto delete; | 1955 | goto delete; |
1694 | 1956 | ||
1957 | scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance); | ||
1958 | dev->miscdev.minor = MISC_DYNAMIC_MINOR; | ||
1959 | dev->miscdev.parent = &pdev->dev; | ||
1960 | dev->miscdev.name = dev->name; | ||
1961 | dev->miscdev.fops = &nvme_dev_fops; | ||
1962 | result = misc_register(&dev->miscdev); | ||
1963 | if (result) | ||
1964 | goto remove; | ||
1965 | |||
1966 | kref_init(&dev->kref); | ||
1695 | return 0; | 1967 | return 0; |
1696 | 1968 | ||
1969 | remove: | ||
1970 | nvme_dev_remove(dev); | ||
1697 | delete: | 1971 | delete: |
1698 | spin_lock(&dev_list_lock); | 1972 | spin_lock(&dev_list_lock); |
1699 | list_del(&dev->node); | 1973 | list_del(&dev->node); |
@@ -1719,16 +1993,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1719 | static void nvme_remove(struct pci_dev *pdev) | 1993 | static void nvme_remove(struct pci_dev *pdev) |
1720 | { | 1994 | { |
1721 | struct nvme_dev *dev = pci_get_drvdata(pdev); | 1995 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
1722 | nvme_dev_remove(dev); | 1996 | misc_deregister(&dev->miscdev); |
1723 | pci_disable_msix(pdev); | 1997 | kref_put(&dev->kref, nvme_free_dev); |
1724 | iounmap(dev->bar); | ||
1725 | nvme_release_instance(dev); | ||
1726 | nvme_release_prp_pools(dev); | ||
1727 | pci_disable_device(pdev); | ||
1728 | pci_release_regions(pdev); | ||
1729 | kfree(dev->queues); | ||
1730 | kfree(dev->entry); | ||
1731 | kfree(dev); | ||
1732 | } | 1998 | } |
1733 | 1999 | ||
1734 | /* These functions are yet to be implemented */ | 2000 | /* These functions are yet to be implemented */ |
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c new file mode 100644 index 000000000000..fed54b039893 --- /dev/null +++ b/drivers/block/nvme-scsi.c | |||
@@ -0,0 +1,3053 @@ | |||
1 | /* | ||
2 | * NVM Express device driver | ||
3 | * Copyright (c) 2011, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * Refer to the SCSI-NVMe Translation spec for details on how | ||
21 | * each command is translated. | ||
22 | */ | ||
23 | |||
24 | #include <linux/nvme.h> | ||
25 | #include <linux/bio.h> | ||
26 | #include <linux/bitops.h> | ||
27 | #include <linux/blkdev.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/fs.h> | ||
31 | #include <linux/genhd.h> | ||
32 | #include <linux/idr.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <linux/kdev_t.h> | ||
37 | #include <linux/kthread.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/mm.h> | ||
40 | #include <linux/module.h> | ||
41 | #include <linux/moduleparam.h> | ||
42 | #include <linux/pci.h> | ||
43 | #include <linux/poison.h> | ||
44 | #include <linux/sched.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <linux/types.h> | ||
47 | #include <linux/version.h> | ||
48 | #include <scsi/sg.h> | ||
49 | #include <scsi/scsi.h> | ||
50 | |||
51 | |||
52 | static int sg_version_num = 30534; /* 2 digits for each component */ | ||
53 | |||
54 | #define SNTI_TRANSLATION_SUCCESS 0 | ||
55 | #define SNTI_INTERNAL_ERROR 1 | ||
56 | |||
57 | /* VPD Page Codes */ | ||
58 | #define VPD_SUPPORTED_PAGES 0x00 | ||
59 | #define VPD_SERIAL_NUMBER 0x80 | ||
60 | #define VPD_DEVICE_IDENTIFIERS 0x83 | ||
61 | #define VPD_EXTENDED_INQUIRY 0x86 | ||
62 | #define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 | ||
63 | |||
64 | /* CDB offsets */ | ||
65 | #define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6 | ||
66 | #define REPORT_LUNS_SR_OFFSET 2 | ||
67 | #define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10 | ||
68 | #define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4 | ||
69 | #define REQUEST_SENSE_DESC_OFFSET 1 | ||
70 | #define REQUEST_SENSE_DESC_MASK 0x01 | ||
71 | #define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1 | ||
72 | #define INQUIRY_EVPD_BYTE_OFFSET 1 | ||
73 | #define INQUIRY_PAGE_CODE_BYTE_OFFSET 2 | ||
74 | #define INQUIRY_EVPD_BIT_MASK 1 | ||
75 | #define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3 | ||
76 | #define START_STOP_UNIT_CDB_IMMED_OFFSET 1 | ||
77 | #define START_STOP_UNIT_CDB_IMMED_MASK 0x1 | ||
78 | #define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3 | ||
79 | #define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF | ||
80 | #define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4 | ||
81 | #define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0 | ||
82 | #define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4 | ||
83 | #define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4 | ||
84 | #define START_STOP_UNIT_CDB_START_OFFSET 4 | ||
85 | #define START_STOP_UNIT_CDB_START_MASK 0x1 | ||
86 | #define WRITE_BUFFER_CDB_MODE_OFFSET 1 | ||
87 | #define WRITE_BUFFER_CDB_MODE_MASK 0x1F | ||
88 | #define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2 | ||
89 | #define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3 | ||
90 | #define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6 | ||
91 | #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1 | ||
92 | #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0 | ||
93 | #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6 | ||
94 | #define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1 | ||
95 | #define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20 | ||
96 | #define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1 | ||
97 | #define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10 | ||
98 | #define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4 | ||
99 | #define FORMAT_UNIT_LONG_PARM_LIST_LEN 8 | ||
100 | #define FORMAT_UNIT_PROT_INT_OFFSET 3 | ||
101 | #define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0 | ||
102 | #define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07 | ||
103 | #define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7 | ||
104 | |||
105 | /* Misc. defines */ | ||
106 | #define NIBBLE_SHIFT 4 | ||
107 | #define FIXED_SENSE_DATA 0x70 | ||
108 | #define DESC_FORMAT_SENSE_DATA 0x72 | ||
109 | #define FIXED_SENSE_DATA_ADD_LENGTH 10 | ||
110 | #define LUN_ENTRY_SIZE 8 | ||
111 | #define LUN_DATA_HEADER_SIZE 8 | ||
112 | #define ALL_LUNS_RETURNED 0x02 | ||
113 | #define ALL_WELL_KNOWN_LUNS_RETURNED 0x01 | ||
114 | #define RESTRICTED_LUNS_RETURNED 0x00 | ||
115 | #define NVME_POWER_STATE_START_VALID 0x00 | ||
116 | #define NVME_POWER_STATE_ACTIVE 0x01 | ||
117 | #define NVME_POWER_STATE_IDLE 0x02 | ||
118 | #define NVME_POWER_STATE_STANDBY 0x03 | ||
119 | #define NVME_POWER_STATE_LU_CONTROL 0x07 | ||
120 | #define POWER_STATE_0 0 | ||
121 | #define POWER_STATE_1 1 | ||
122 | #define POWER_STATE_2 2 | ||
123 | #define POWER_STATE_3 3 | ||
124 | #define DOWNLOAD_SAVE_ACTIVATE 0x05 | ||
125 | #define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E | ||
126 | #define ACTIVATE_DEFERRED_MICROCODE 0x0F | ||
127 | #define FORMAT_UNIT_IMMED_MASK 0x2 | ||
128 | #define FORMAT_UNIT_IMMED_OFFSET 1 | ||
129 | #define KELVIN_TEMP_FACTOR 273 | ||
130 | #define FIXED_FMT_SENSE_DATA_SIZE 18 | ||
131 | #define DESC_FMT_SENSE_DATA_SIZE 8 | ||
132 | |||
133 | /* SCSI/NVMe defines and bit masks */ | ||
134 | #define INQ_STANDARD_INQUIRY_PAGE 0x00 | ||
135 | #define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00 | ||
136 | #define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 | ||
137 | #define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 | ||
138 | #define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 | ||
139 | #define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 | ||
140 | #define INQ_SERIAL_NUMBER_LENGTH 0x14 | ||
141 | #define INQ_NUM_SUPPORTED_VPD_PAGES 5 | ||
142 | #define VERSION_SPC_4 0x06 | ||
143 | #define ACA_UNSUPPORTED 0 | ||
144 | #define STANDARD_INQUIRY_LENGTH 36 | ||
145 | #define ADDITIONAL_STD_INQ_LENGTH 31 | ||
146 | #define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C | ||
147 | #define RESERVED_FIELD 0 | ||
148 | |||
149 | /* SCSI READ/WRITE Defines */ | ||
150 | #define IO_CDB_WP_MASK 0xE0 | ||
151 | #define IO_CDB_WP_SHIFT 5 | ||
152 | #define IO_CDB_FUA_MASK 0x8 | ||
153 | #define IO_6_CDB_LBA_OFFSET 0 | ||
154 | #define IO_6_CDB_LBA_MASK 0x001FFFFF | ||
155 | #define IO_6_CDB_TX_LEN_OFFSET 4 | ||
156 | #define IO_6_DEFAULT_TX_LEN 256 | ||
157 | #define IO_10_CDB_LBA_OFFSET 2 | ||
158 | #define IO_10_CDB_TX_LEN_OFFSET 7 | ||
159 | #define IO_10_CDB_WP_OFFSET 1 | ||
160 | #define IO_10_CDB_FUA_OFFSET 1 | ||
161 | #define IO_12_CDB_LBA_OFFSET 2 | ||
162 | #define IO_12_CDB_TX_LEN_OFFSET 6 | ||
163 | #define IO_12_CDB_WP_OFFSET 1 | ||
164 | #define IO_12_CDB_FUA_OFFSET 1 | ||
165 | #define IO_16_CDB_FUA_OFFSET 1 | ||
166 | #define IO_16_CDB_WP_OFFSET 1 | ||
167 | #define IO_16_CDB_LBA_OFFSET 2 | ||
168 | #define IO_16_CDB_TX_LEN_OFFSET 10 | ||
169 | |||
170 | /* Mode Sense/Select defines */ | ||
171 | #define MODE_PAGE_INFO_EXCEP 0x1C | ||
172 | #define MODE_PAGE_CACHING 0x08 | ||
173 | #define MODE_PAGE_CONTROL 0x0A | ||
174 | #define MODE_PAGE_POWER_CONDITION 0x1A | ||
175 | #define MODE_PAGE_RETURN_ALL 0x3F | ||
176 | #define MODE_PAGE_BLK_DES_LEN 0x08 | ||
177 | #define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10 | ||
178 | #define MODE_PAGE_CACHING_LEN 0x14 | ||
179 | #define MODE_PAGE_CONTROL_LEN 0x0C | ||
180 | #define MODE_PAGE_POW_CND_LEN 0x28 | ||
181 | #define MODE_PAGE_INF_EXC_LEN 0x0C | ||
182 | #define MODE_PAGE_ALL_LEN 0x54 | ||
183 | #define MODE_SENSE6_MPH_SIZE 4 | ||
184 | #define MODE_SENSE6_ALLOC_LEN_OFFSET 4 | ||
185 | #define MODE_SENSE_PAGE_CONTROL_OFFSET 2 | ||
186 | #define MODE_SENSE_PAGE_CONTROL_MASK 0xC0 | ||
187 | #define MODE_SENSE_PAGE_CODE_OFFSET 2 | ||
188 | #define MODE_SENSE_PAGE_CODE_MASK 0x3F | ||
189 | #define MODE_SENSE_LLBAA_OFFSET 1 | ||
190 | #define MODE_SENSE_LLBAA_MASK 0x10 | ||
191 | #define MODE_SENSE_LLBAA_SHIFT 4 | ||
192 | #define MODE_SENSE_DBD_OFFSET 1 | ||
193 | #define MODE_SENSE_DBD_MASK 8 | ||
194 | #define MODE_SENSE_DBD_SHIFT 3 | ||
195 | #define MODE_SENSE10_MPH_SIZE 8 | ||
196 | #define MODE_SENSE10_ALLOC_LEN_OFFSET 7 | ||
197 | #define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1 | ||
198 | #define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1 | ||
199 | #define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4 | ||
200 | #define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7 | ||
201 | #define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10 | ||
202 | #define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1 | ||
203 | #define MODE_SELECT_6_BD_OFFSET 3 | ||
204 | #define MODE_SELECT_10_BD_OFFSET 6 | ||
205 | #define MODE_SELECT_10_LLBAA_OFFSET 4 | ||
206 | #define MODE_SELECT_10_LLBAA_MASK 1 | ||
207 | #define MODE_SELECT_6_MPH_SIZE 4 | ||
208 | #define MODE_SELECT_10_MPH_SIZE 8 | ||
209 | #define CACHING_MODE_PAGE_WCE_MASK 0x04 | ||
210 | #define MODE_SENSE_BLK_DESC_ENABLED 0 | ||
211 | #define MODE_SENSE_BLK_DESC_COUNT 1 | ||
212 | #define MODE_SELECT_PAGE_CODE_MASK 0x3F | ||
213 | #define SHORT_DESC_BLOCK 8 | ||
214 | #define LONG_DESC_BLOCK 16 | ||
215 | #define MODE_PAGE_POW_CND_LEN_FIELD 0x26 | ||
216 | #define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A | ||
217 | #define MODE_PAGE_CACHING_LEN_FIELD 0x12 | ||
218 | #define MODE_PAGE_CONTROL_LEN_FIELD 0x0A | ||
219 | #define MODE_SENSE_PC_CURRENT_VALUES 0 | ||
220 | |||
221 | /* Log Sense defines */ | ||
222 | #define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00 | ||
223 | #define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07 | ||
224 | #define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F | ||
225 | #define LOG_PAGE_TEMPERATURE_PAGE 0x0D | ||
226 | #define LOG_SENSE_CDB_SP_OFFSET 1 | ||
227 | #define LOG_SENSE_CDB_SP_NOT_ENABLED 0 | ||
228 | #define LOG_SENSE_CDB_PC_OFFSET 2 | ||
229 | #define LOG_SENSE_CDB_PC_MASK 0xC0 | ||
230 | #define LOG_SENSE_CDB_PC_SHIFT 6 | ||
231 | #define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1 | ||
232 | #define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F | ||
233 | #define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7 | ||
234 | #define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8 | ||
235 | #define LOG_INFO_EXCP_PAGE_LENGTH 0xC | ||
236 | #define REMAINING_TEMP_PAGE_LENGTH 0xC | ||
237 | #define LOG_TEMP_PAGE_LENGTH 0x10 | ||
238 | #define LOG_TEMP_UNKNOWN 0xFF | ||
239 | #define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3 | ||
240 | |||
241 | /* Read Capacity defines */ | ||
242 | #define READ_CAP_10_RESP_SIZE 8 | ||
243 | #define READ_CAP_16_RESP_SIZE 32 | ||
244 | |||
245 | /* NVMe Namespace and Command Defines */ | ||
246 | #define NVME_GET_SMART_LOG_PAGE 0x02 | ||
247 | #define NVME_GET_FEAT_TEMP_THRESH 0x04 | ||
248 | #define BYTES_TO_DWORDS 4 | ||
249 | #define NVME_MAX_FIRMWARE_SLOT 7 | ||
250 | |||
251 | /* Report LUNs defines */ | ||
252 | #define REPORT_LUNS_FIRST_LUN_OFFSET 8 | ||
253 | |||
254 | /* SCSI ADDITIONAL SENSE Codes */ | ||
255 | |||
256 | #define SCSI_ASC_NO_SENSE 0x00 | ||
257 | #define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03 | ||
258 | #define SCSI_ASC_LUN_NOT_READY 0x04 | ||
259 | #define SCSI_ASC_WARNING 0x0B | ||
260 | #define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10 | ||
261 | #define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10 | ||
262 | #define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10 | ||
263 | #define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11 | ||
264 | #define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D | ||
265 | #define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20 | ||
266 | #define SCSI_ASC_ILLEGAL_COMMAND 0x20 | ||
267 | #define SCSI_ASC_ILLEGAL_BLOCK 0x21 | ||
268 | #define SCSI_ASC_INVALID_CDB 0x24 | ||
269 | #define SCSI_ASC_INVALID_LUN 0x25 | ||
270 | #define SCSI_ASC_INVALID_PARAMETER 0x26 | ||
271 | #define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31 | ||
272 | #define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44 | ||
273 | |||
274 | /* SCSI ADDITIONAL SENSE Code Qualifiers */ | ||
275 | |||
276 | #define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00 | ||
277 | #define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01 | ||
278 | #define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01 | ||
279 | #define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02 | ||
280 | #define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03 | ||
281 | #define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04 | ||
282 | #define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08 | ||
283 | #define SCSI_ASCQ_INVALID_LUN_ID 0x09 | ||
284 | |||
285 | /** | ||
286 | * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to | ||
287 | * enable DPOFUA support type 0x10 value. | ||
288 | */ | ||
289 | #define DEVICE_SPECIFIC_PARAMETER 0 | ||
290 | #define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR) | ||
291 | |||
292 | /* MACROs to extract information from CDBs */ | ||
293 | |||
294 | #define GET_OPCODE(cdb) cdb[0] | ||
295 | |||
296 | #define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0) | ||
297 | |||
298 | #define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0)) | ||
299 | |||
300 | #define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \ | ||
301 | (cdb[index + 1] << 8) | \ | ||
302 | (cdb[index + 2] << 0)) | ||
303 | |||
304 | #define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \ | ||
305 | (cdb[index + 1] << 16) | \ | ||
306 | (cdb[index + 2] << 8) | \ | ||
307 | (cdb[index + 3] << 0)) | ||
308 | |||
309 | #define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \ | ||
310 | (((u64)cdb[index + 1]) << 48) | \ | ||
311 | (((u64)cdb[index + 2]) << 40) | \ | ||
312 | (((u64)cdb[index + 3]) << 32) | \ | ||
313 | (((u64)cdb[index + 4]) << 24) | \ | ||
314 | (((u64)cdb[index + 5]) << 16) | \ | ||
315 | (((u64)cdb[index + 6]) << 8) | \ | ||
316 | (((u64)cdb[index + 7]) << 0)) | ||
317 | |||
318 | /* Inquiry Helper Macros */ | ||
319 | #define GET_INQ_EVPD_BIT(cdb) \ | ||
320 | ((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \ | ||
321 | INQUIRY_EVPD_BIT_MASK) ? 1 : 0) | ||
322 | |||
323 | #define GET_INQ_PAGE_CODE(cdb) \ | ||
324 | (GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET)) | ||
325 | |||
326 | #define GET_INQ_ALLOC_LENGTH(cdb) \ | ||
327 | (GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET)) | ||
328 | |||
329 | /* Report LUNs Helper Macros */ | ||
330 | #define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \ | ||
331 | (GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET)) | ||
332 | |||
333 | /* Read Capacity Helper Macros */ | ||
334 | #define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \ | ||
335 | (GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET)) | ||
336 | |||
337 | #define IS_READ_CAP_16(cdb) \ | ||
338 | ((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0) | ||
339 | |||
340 | /* Request Sense Helper Macros */ | ||
341 | #define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \ | ||
342 | (GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET)) | ||
343 | |||
344 | /* Mode Sense Helper Macros */ | ||
345 | #define GET_MODE_SENSE_DBD(cdb) \ | ||
346 | ((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \ | ||
347 | MODE_SENSE_DBD_SHIFT) | ||
348 | |||
349 | #define GET_MODE_SENSE_LLBAA(cdb) \ | ||
350 | ((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \ | ||
351 | MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT) | ||
352 | |||
353 | #define GET_MODE_SENSE_MPH_SIZE(cdb10) \ | ||
354 | (cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE) | ||
355 | |||
356 | |||
357 | /* Struct to gather data that needs to be extracted from a SCSI CDB. | ||
358 | Not conforming to any particular CDB variant, but compatible with all. */ | ||
359 | |||
360 | struct nvme_trans_io_cdb { | ||
361 | u8 fua; | ||
362 | u8 prot_info; | ||
363 | u64 lba; | ||
364 | u32 xfer_len; | ||
365 | }; | ||
366 | |||
367 | |||
368 | /* Internal Helper Functions */ | ||
369 | |||
370 | |||
371 | /* Copy data to userspace memory */ | ||
372 | |||
373 | static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from, | ||
374 | unsigned long n) | ||
375 | { | ||
376 | int res = SNTI_TRANSLATION_SUCCESS; | ||
377 | unsigned long not_copied; | ||
378 | int i; | ||
379 | void *index = from; | ||
380 | size_t remaining = n; | ||
381 | size_t xfer_len; | ||
382 | |||
383 | if (hdr->iovec_count > 0) { | ||
384 | struct sg_iovec sgl; | ||
385 | |||
386 | for (i = 0; i < hdr->iovec_count; i++) { | ||
387 | not_copied = copy_from_user(&sgl, hdr->dxferp + | ||
388 | i * sizeof(struct sg_iovec), | ||
389 | sizeof(struct sg_iovec)); | ||
390 | if (not_copied) | ||
391 | return -EFAULT; | ||
392 | xfer_len = min(remaining, sgl.iov_len); | ||
393 | not_copied = copy_to_user(sgl.iov_base, index, | ||
394 | xfer_len); | ||
395 | if (not_copied) { | ||
396 | res = -EFAULT; | ||
397 | break; | ||
398 | } | ||
399 | index += xfer_len; | ||
400 | remaining -= xfer_len; | ||
401 | if (remaining == 0) | ||
402 | break; | ||
403 | } | ||
404 | return res; | ||
405 | } | ||
406 | not_copied = copy_to_user(hdr->dxferp, from, n); | ||
407 | if (not_copied) | ||
408 | res = -EFAULT; | ||
409 | return res; | ||
410 | } | ||
411 | |||
412 | /* Copy data from userspace memory */ | ||
413 | |||
414 | static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to, | ||
415 | unsigned long n) | ||
416 | { | ||
417 | int res = SNTI_TRANSLATION_SUCCESS; | ||
418 | unsigned long not_copied; | ||
419 | int i; | ||
420 | void *index = to; | ||
421 | size_t remaining = n; | ||
422 | size_t xfer_len; | ||
423 | |||
424 | if (hdr->iovec_count > 0) { | ||
425 | struct sg_iovec sgl; | ||
426 | |||
427 | for (i = 0; i < hdr->iovec_count; i++) { | ||
428 | not_copied = copy_from_user(&sgl, hdr->dxferp + | ||
429 | i * sizeof(struct sg_iovec), | ||
430 | sizeof(struct sg_iovec)); | ||
431 | if (not_copied) | ||
432 | return -EFAULT; | ||
433 | xfer_len = min(remaining, sgl.iov_len); | ||
434 | not_copied = copy_from_user(index, sgl.iov_base, | ||
435 | xfer_len); | ||
436 | if (not_copied) { | ||
437 | res = -EFAULT; | ||
438 | break; | ||
439 | } | ||
440 | index += xfer_len; | ||
441 | remaining -= xfer_len; | ||
442 | if (remaining == 0) | ||
443 | break; | ||
444 | } | ||
445 | return res; | ||
446 | } | ||
447 | |||
448 | not_copied = copy_from_user(to, hdr->dxferp, n); | ||
449 | if (not_copied) | ||
450 | res = -EFAULT; | ||
451 | return res; | ||
452 | } | ||
453 | |||
454 | /* Status/Sense Buffer Writeback */ | ||
455 | |||
456 | static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key, | ||
457 | u8 asc, u8 ascq) | ||
458 | { | ||
459 | int res = SNTI_TRANSLATION_SUCCESS; | ||
460 | u8 xfer_len; | ||
461 | u8 resp[DESC_FMT_SENSE_DATA_SIZE]; | ||
462 | |||
463 | if (scsi_status_is_good(status)) { | ||
464 | hdr->status = SAM_STAT_GOOD; | ||
465 | hdr->masked_status = GOOD; | ||
466 | hdr->host_status = DID_OK; | ||
467 | hdr->driver_status = DRIVER_OK; | ||
468 | hdr->sb_len_wr = 0; | ||
469 | } else { | ||
470 | hdr->status = status; | ||
471 | hdr->masked_status = status >> 1; | ||
472 | hdr->host_status = DID_OK; | ||
473 | hdr->driver_status = DRIVER_OK; | ||
474 | |||
475 | memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE); | ||
476 | resp[0] = DESC_FORMAT_SENSE_DATA; | ||
477 | resp[1] = sense_key; | ||
478 | resp[2] = asc; | ||
479 | resp[3] = ascq; | ||
480 | |||
481 | xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE); | ||
482 | hdr->sb_len_wr = xfer_len; | ||
483 | if (copy_to_user(hdr->sbp, resp, xfer_len) > 0) | ||
484 | res = -EFAULT; | ||
485 | } | ||
486 | |||
487 | return res; | ||
488 | } | ||
489 | |||
490 | static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc) | ||
491 | { | ||
492 | u8 status, sense_key, asc, ascq; | ||
493 | int res = SNTI_TRANSLATION_SUCCESS; | ||
494 | |||
495 | /* For non-nvme (Linux) errors, simply return the error code */ | ||
496 | if (nvme_sc < 0) | ||
497 | return nvme_sc; | ||
498 | |||
499 | /* Mask DNR, More, and reserved fields */ | ||
500 | nvme_sc &= 0x7FF; | ||
501 | |||
502 | switch (nvme_sc) { | ||
503 | /* Generic Command Status */ | ||
504 | case NVME_SC_SUCCESS: | ||
505 | status = SAM_STAT_GOOD; | ||
506 | sense_key = NO_SENSE; | ||
507 | asc = SCSI_ASC_NO_SENSE; | ||
508 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
509 | break; | ||
510 | case NVME_SC_INVALID_OPCODE: | ||
511 | status = SAM_STAT_CHECK_CONDITION; | ||
512 | sense_key = ILLEGAL_REQUEST; | ||
513 | asc = SCSI_ASC_ILLEGAL_COMMAND; | ||
514 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
515 | break; | ||
516 | case NVME_SC_INVALID_FIELD: | ||
517 | status = SAM_STAT_CHECK_CONDITION; | ||
518 | sense_key = ILLEGAL_REQUEST; | ||
519 | asc = SCSI_ASC_INVALID_CDB; | ||
520 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
521 | break; | ||
522 | case NVME_SC_DATA_XFER_ERROR: | ||
523 | status = SAM_STAT_CHECK_CONDITION; | ||
524 | sense_key = MEDIUM_ERROR; | ||
525 | asc = SCSI_ASC_NO_SENSE; | ||
526 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
527 | break; | ||
528 | case NVME_SC_POWER_LOSS: | ||
529 | status = SAM_STAT_TASK_ABORTED; | ||
530 | sense_key = ABORTED_COMMAND; | ||
531 | asc = SCSI_ASC_WARNING; | ||
532 | ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; | ||
533 | break; | ||
534 | case NVME_SC_INTERNAL: | ||
535 | status = SAM_STAT_CHECK_CONDITION; | ||
536 | sense_key = HARDWARE_ERROR; | ||
537 | asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; | ||
538 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
539 | break; | ||
540 | case NVME_SC_ABORT_REQ: | ||
541 | status = SAM_STAT_TASK_ABORTED; | ||
542 | sense_key = ABORTED_COMMAND; | ||
543 | asc = SCSI_ASC_NO_SENSE; | ||
544 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
545 | break; | ||
546 | case NVME_SC_ABORT_QUEUE: | ||
547 | status = SAM_STAT_TASK_ABORTED; | ||
548 | sense_key = ABORTED_COMMAND; | ||
549 | asc = SCSI_ASC_NO_SENSE; | ||
550 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
551 | break; | ||
552 | case NVME_SC_FUSED_FAIL: | ||
553 | status = SAM_STAT_TASK_ABORTED; | ||
554 | sense_key = ABORTED_COMMAND; | ||
555 | asc = SCSI_ASC_NO_SENSE; | ||
556 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
557 | break; | ||
558 | case NVME_SC_FUSED_MISSING: | ||
559 | status = SAM_STAT_TASK_ABORTED; | ||
560 | sense_key = ABORTED_COMMAND; | ||
561 | asc = SCSI_ASC_NO_SENSE; | ||
562 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
563 | break; | ||
564 | case NVME_SC_INVALID_NS: | ||
565 | status = SAM_STAT_CHECK_CONDITION; | ||
566 | sense_key = ILLEGAL_REQUEST; | ||
567 | asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; | ||
568 | ascq = SCSI_ASCQ_INVALID_LUN_ID; | ||
569 | break; | ||
570 | case NVME_SC_LBA_RANGE: | ||
571 | status = SAM_STAT_CHECK_CONDITION; | ||
572 | sense_key = ILLEGAL_REQUEST; | ||
573 | asc = SCSI_ASC_ILLEGAL_BLOCK; | ||
574 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
575 | break; | ||
576 | case NVME_SC_CAP_EXCEEDED: | ||
577 | status = SAM_STAT_CHECK_CONDITION; | ||
578 | sense_key = MEDIUM_ERROR; | ||
579 | asc = SCSI_ASC_NO_SENSE; | ||
580 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
581 | break; | ||
582 | case NVME_SC_NS_NOT_READY: | ||
583 | status = SAM_STAT_CHECK_CONDITION; | ||
584 | sense_key = NOT_READY; | ||
585 | asc = SCSI_ASC_LUN_NOT_READY; | ||
586 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
587 | break; | ||
588 | |||
589 | /* Command Specific Status */ | ||
590 | case NVME_SC_INVALID_FORMAT: | ||
591 | status = SAM_STAT_CHECK_CONDITION; | ||
592 | sense_key = ILLEGAL_REQUEST; | ||
593 | asc = SCSI_ASC_FORMAT_COMMAND_FAILED; | ||
594 | ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; | ||
595 | break; | ||
596 | case NVME_SC_BAD_ATTRIBUTES: | ||
597 | status = SAM_STAT_CHECK_CONDITION; | ||
598 | sense_key = ILLEGAL_REQUEST; | ||
599 | asc = SCSI_ASC_INVALID_CDB; | ||
600 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
601 | break; | ||
602 | |||
603 | /* Media Errors */ | ||
604 | case NVME_SC_WRITE_FAULT: | ||
605 | status = SAM_STAT_CHECK_CONDITION; | ||
606 | sense_key = MEDIUM_ERROR; | ||
607 | asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; | ||
608 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
609 | break; | ||
610 | case NVME_SC_READ_ERROR: | ||
611 | status = SAM_STAT_CHECK_CONDITION; | ||
612 | sense_key = MEDIUM_ERROR; | ||
613 | asc = SCSI_ASC_UNRECOVERED_READ_ERROR; | ||
614 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
615 | break; | ||
616 | case NVME_SC_GUARD_CHECK: | ||
617 | status = SAM_STAT_CHECK_CONDITION; | ||
618 | sense_key = MEDIUM_ERROR; | ||
619 | asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; | ||
620 | ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; | ||
621 | break; | ||
622 | case NVME_SC_APPTAG_CHECK: | ||
623 | status = SAM_STAT_CHECK_CONDITION; | ||
624 | sense_key = MEDIUM_ERROR; | ||
625 | asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; | ||
626 | ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; | ||
627 | break; | ||
628 | case NVME_SC_REFTAG_CHECK: | ||
629 | status = SAM_STAT_CHECK_CONDITION; | ||
630 | sense_key = MEDIUM_ERROR; | ||
631 | asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; | ||
632 | ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; | ||
633 | break; | ||
634 | case NVME_SC_COMPARE_FAILED: | ||
635 | status = SAM_STAT_CHECK_CONDITION; | ||
636 | sense_key = MISCOMPARE; | ||
637 | asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; | ||
638 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
639 | break; | ||
640 | case NVME_SC_ACCESS_DENIED: | ||
641 | status = SAM_STAT_CHECK_CONDITION; | ||
642 | sense_key = ILLEGAL_REQUEST; | ||
643 | asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; | ||
644 | ascq = SCSI_ASCQ_INVALID_LUN_ID; | ||
645 | break; | ||
646 | |||
647 | /* Unspecified/Default */ | ||
648 | case NVME_SC_CMDID_CONFLICT: | ||
649 | case NVME_SC_CMD_SEQ_ERROR: | ||
650 | case NVME_SC_CQ_INVALID: | ||
651 | case NVME_SC_QID_INVALID: | ||
652 | case NVME_SC_QUEUE_SIZE: | ||
653 | case NVME_SC_ABORT_LIMIT: | ||
654 | case NVME_SC_ABORT_MISSING: | ||
655 | case NVME_SC_ASYNC_LIMIT: | ||
656 | case NVME_SC_FIRMWARE_SLOT: | ||
657 | case NVME_SC_FIRMWARE_IMAGE: | ||
658 | case NVME_SC_INVALID_VECTOR: | ||
659 | case NVME_SC_INVALID_LOG_PAGE: | ||
660 | default: | ||
661 | status = SAM_STAT_CHECK_CONDITION; | ||
662 | sense_key = ILLEGAL_REQUEST; | ||
663 | asc = SCSI_ASC_NO_SENSE; | ||
664 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
665 | break; | ||
666 | } | ||
667 | |||
668 | res = nvme_trans_completion(hdr, status, sense_key, asc, ascq); | ||
669 | |||
670 | return res; | ||
671 | } | ||
672 | |||
673 | /* INQUIRY Helper Functions */ | ||
674 | |||
675 | static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, | ||
676 | struct sg_io_hdr *hdr, u8 *inq_response, | ||
677 | int alloc_len) | ||
678 | { | ||
679 | struct nvme_dev *dev = ns->dev; | ||
680 | dma_addr_t dma_addr; | ||
681 | void *mem; | ||
682 | struct nvme_id_ns *id_ns; | ||
683 | int res = SNTI_TRANSLATION_SUCCESS; | ||
684 | int nvme_sc; | ||
685 | int xfer_len; | ||
686 | u8 resp_data_format = 0x02; | ||
687 | u8 protect; | ||
688 | u8 cmdque = 0x01 << 1; | ||
689 | |||
690 | mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), | ||
691 | &dma_addr, GFP_KERNEL); | ||
692 | if (mem == NULL) { | ||
693 | res = -ENOMEM; | ||
694 | goto out_dma; | ||
695 | } | ||
696 | |||
697 | /* nvme ns identify - use DPS value for PROTECT field */ | ||
698 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); | ||
699 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
700 | /* | ||
701 | * If nvme_sc was -ve, res will be -ve here. | ||
702 | * If nvme_sc was +ve, the status would bace been translated, and res | ||
703 | * can only be 0 or -ve. | ||
704 | * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc | ||
705 | * - If -ve, return because its a Linux error. | ||
706 | */ | ||
707 | if (res) | ||
708 | goto out_free; | ||
709 | if (nvme_sc) { | ||
710 | res = nvme_sc; | ||
711 | goto out_free; | ||
712 | } | ||
713 | id_ns = mem; | ||
714 | (id_ns->dps) ? (protect = 0x01) : (protect = 0); | ||
715 | |||
716 | memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); | ||
717 | inq_response[2] = VERSION_SPC_4; | ||
718 | inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */ | ||
719 | inq_response[4] = ADDITIONAL_STD_INQ_LENGTH; | ||
720 | inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */ | ||
721 | inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ | ||
722 | strncpy(&inq_response[8], "NVMe ", 8); | ||
723 | strncpy(&inq_response[16], dev->model, 16); | ||
724 | strncpy(&inq_response[32], dev->firmware_rev, 4); | ||
725 | |||
726 | xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); | ||
727 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); | ||
728 | |||
729 | out_free: | ||
730 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, | ||
731 | dma_addr); | ||
732 | out_dma: | ||
733 | return res; | ||
734 | } | ||
735 | |||
736 | static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns, | ||
737 | struct sg_io_hdr *hdr, u8 *inq_response, | ||
738 | int alloc_len) | ||
739 | { | ||
740 | int res = SNTI_TRANSLATION_SUCCESS; | ||
741 | int xfer_len; | ||
742 | |||
743 | memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); | ||
744 | inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */ | ||
745 | inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */ | ||
746 | inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE; | ||
747 | inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE; | ||
748 | inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; | ||
749 | inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; | ||
750 | inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; | ||
751 | |||
752 | xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); | ||
753 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); | ||
754 | |||
755 | return res; | ||
756 | } | ||
757 | |||
758 | static int nvme_trans_unit_serial_page(struct nvme_ns *ns, | ||
759 | struct sg_io_hdr *hdr, u8 *inq_response, | ||
760 | int alloc_len) | ||
761 | { | ||
762 | struct nvme_dev *dev = ns->dev; | ||
763 | int res = SNTI_TRANSLATION_SUCCESS; | ||
764 | int xfer_len; | ||
765 | |||
766 | memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); | ||
767 | inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */ | ||
768 | inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */ | ||
769 | strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH); | ||
770 | |||
771 | xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); | ||
772 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); | ||
773 | |||
774 | return res; | ||
775 | } | ||
776 | |||
777 | static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
778 | u8 *inq_response, int alloc_len) | ||
779 | { | ||
780 | struct nvme_dev *dev = ns->dev; | ||
781 | dma_addr_t dma_addr; | ||
782 | void *mem; | ||
783 | struct nvme_id_ctrl *id_ctrl; | ||
784 | int res = SNTI_TRANSLATION_SUCCESS; | ||
785 | int nvme_sc; | ||
786 | u8 ieee[4]; | ||
787 | int xfer_len; | ||
788 | __be32 tmp_id = cpu_to_be32(ns->ns_id); | ||
789 | |||
790 | mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), | ||
791 | &dma_addr, GFP_KERNEL); | ||
792 | if (mem == NULL) { | ||
793 | res = -ENOMEM; | ||
794 | goto out_dma; | ||
795 | } | ||
796 | |||
797 | /* nvme controller identify */ | ||
798 | nvme_sc = nvme_identify(dev, 0, 1, dma_addr); | ||
799 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
800 | if (res) | ||
801 | goto out_free; | ||
802 | if (nvme_sc) { | ||
803 | res = nvme_sc; | ||
804 | goto out_free; | ||
805 | } | ||
806 | id_ctrl = mem; | ||
807 | |||
808 | /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */ | ||
809 | ieee[0] = id_ctrl->ieee[0] << 4; | ||
810 | ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4; | ||
811 | ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4; | ||
812 | ieee[3] = id_ctrl->ieee[2] >> 4; | ||
813 | |||
814 | memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); | ||
815 | inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ | ||
816 | inq_response[3] = 20; /* Page Length */ | ||
817 | /* Designation Descriptor start */ | ||
818 | inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ | ||
819 | inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */ | ||
820 | inq_response[6] = 0x00; /* Rsvd */ | ||
821 | inq_response[7] = 16; /* Designator Length */ | ||
822 | /* Designator start */ | ||
823 | inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/ | ||
824 | inq_response[9] = ieee[2]; /* IEEE ID */ | ||
825 | inq_response[10] = ieee[1]; /* IEEE ID */ | ||
826 | inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */ | ||
827 | inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8; | ||
828 | inq_response[13] = (dev->pci_dev->vendor & 0x00FF); | ||
829 | inq_response[14] = dev->serial[0]; | ||
830 | inq_response[15] = dev->serial[1]; | ||
831 | inq_response[16] = dev->model[0]; | ||
832 | inq_response[17] = dev->model[1]; | ||
833 | memcpy(&inq_response[18], &tmp_id, sizeof(u32)); | ||
834 | /* Last 2 bytes are zero */ | ||
835 | |||
836 | xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); | ||
837 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); | ||
838 | |||
839 | out_free: | ||
840 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, | ||
841 | dma_addr); | ||
842 | out_dma: | ||
843 | return res; | ||
844 | } | ||
845 | |||
846 | static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
847 | int alloc_len) | ||
848 | { | ||
849 | u8 *inq_response; | ||
850 | int res = SNTI_TRANSLATION_SUCCESS; | ||
851 | int nvme_sc; | ||
852 | struct nvme_dev *dev = ns->dev; | ||
853 | dma_addr_t dma_addr; | ||
854 | void *mem; | ||
855 | struct nvme_id_ctrl *id_ctrl; | ||
856 | struct nvme_id_ns *id_ns; | ||
857 | int xfer_len; | ||
858 | u8 microcode = 0x80; | ||
859 | u8 spt; | ||
860 | u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7}; | ||
861 | u8 grd_chk, app_chk, ref_chk, protect; | ||
862 | u8 uask_sup = 0x20; | ||
863 | u8 v_sup; | ||
864 | u8 luiclr = 0x01; | ||
865 | |||
866 | inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); | ||
867 | if (inq_response == NULL) { | ||
868 | res = -ENOMEM; | ||
869 | goto out_mem; | ||
870 | } | ||
871 | |||
872 | mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), | ||
873 | &dma_addr, GFP_KERNEL); | ||
874 | if (mem == NULL) { | ||
875 | res = -ENOMEM; | ||
876 | goto out_dma; | ||
877 | } | ||
878 | |||
879 | /* nvme ns identify */ | ||
880 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); | ||
881 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
882 | if (res) | ||
883 | goto out_free; | ||
884 | if (nvme_sc) { | ||
885 | res = nvme_sc; | ||
886 | goto out_free; | ||
887 | } | ||
888 | id_ns = mem; | ||
889 | spt = spt_lut[(id_ns->dpc) & 0x07] << 3; | ||
890 | (id_ns->dps) ? (protect = 0x01) : (protect = 0); | ||
891 | grd_chk = protect << 2; | ||
892 | app_chk = protect << 1; | ||
893 | ref_chk = protect; | ||
894 | |||
895 | /* nvme controller identify */ | ||
896 | nvme_sc = nvme_identify(dev, 0, 1, dma_addr); | ||
897 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
898 | if (res) | ||
899 | goto out_free; | ||
900 | if (nvme_sc) { | ||
901 | res = nvme_sc; | ||
902 | goto out_free; | ||
903 | } | ||
904 | id_ctrl = mem; | ||
905 | v_sup = id_ctrl->vwc; | ||
906 | |||
907 | memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); | ||
908 | inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */ | ||
909 | inq_response[2] = 0x00; /* Page Length MSB */ | ||
910 | inq_response[3] = 0x3C; /* Page Length LSB */ | ||
911 | inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk; | ||
912 | inq_response[5] = uask_sup; | ||
913 | inq_response[6] = v_sup; | ||
914 | inq_response[7] = luiclr; | ||
915 | inq_response[8] = 0; | ||
916 | inq_response[9] = 0; | ||
917 | |||
918 | xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); | ||
919 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); | ||
920 | |||
921 | out_free: | ||
922 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, | ||
923 | dma_addr); | ||
924 | out_dma: | ||
925 | kfree(inq_response); | ||
926 | out_mem: | ||
927 | return res; | ||
928 | } | ||
929 | |||
930 | static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
931 | int alloc_len) | ||
932 | { | ||
933 | u8 *inq_response; | ||
934 | int res = SNTI_TRANSLATION_SUCCESS; | ||
935 | int xfer_len; | ||
936 | |||
937 | inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); | ||
938 | if (inq_response == NULL) { | ||
939 | res = -ENOMEM; | ||
940 | goto out_mem; | ||
941 | } | ||
942 | |||
943 | memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); | ||
944 | inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */ | ||
945 | inq_response[2] = 0x00; /* Page Length MSB */ | ||
946 | inq_response[3] = 0x3C; /* Page Length LSB */ | ||
947 | inq_response[4] = 0x00; /* Medium Rotation Rate MSB */ | ||
948 | inq_response[5] = 0x01; /* Medium Rotation Rate LSB */ | ||
949 | inq_response[6] = 0x00; /* Form Factor */ | ||
950 | |||
951 | xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); | ||
952 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); | ||
953 | |||
954 | kfree(inq_response); | ||
955 | out_mem: | ||
956 | return res; | ||
957 | } | ||
958 | |||
959 | /* LOG SENSE Helper Functions */ | ||
960 | |||
961 | static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
962 | int alloc_len) | ||
963 | { | ||
964 | int res = SNTI_TRANSLATION_SUCCESS; | ||
965 | int xfer_len; | ||
966 | u8 *log_response; | ||
967 | |||
968 | log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL); | ||
969 | if (log_response == NULL) { | ||
970 | res = -ENOMEM; | ||
971 | goto out_mem; | ||
972 | } | ||
973 | memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); | ||
974 | |||
975 | log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; | ||
976 | /* Subpage=0x00, Page Length MSB=0 */ | ||
977 | log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH; | ||
978 | log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; | ||
979 | log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; | ||
980 | log_response[6] = LOG_PAGE_TEMPERATURE_PAGE; | ||
981 | |||
982 | xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); | ||
983 | res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); | ||
984 | |||
985 | kfree(log_response); | ||
986 | out_mem: | ||
987 | return res; | ||
988 | } | ||
989 | |||
990 | static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, | ||
991 | struct sg_io_hdr *hdr, int alloc_len) | ||
992 | { | ||
993 | int res = SNTI_TRANSLATION_SUCCESS; | ||
994 | int xfer_len; | ||
995 | u8 *log_response; | ||
996 | struct nvme_command c; | ||
997 | struct nvme_dev *dev = ns->dev; | ||
998 | struct nvme_smart_log *smart_log; | ||
999 | dma_addr_t dma_addr; | ||
1000 | void *mem; | ||
1001 | u8 temp_c; | ||
1002 | u16 temp_k; | ||
1003 | |||
1004 | log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL); | ||
1005 | if (log_response == NULL) { | ||
1006 | res = -ENOMEM; | ||
1007 | goto out_mem; | ||
1008 | } | ||
1009 | memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH); | ||
1010 | |||
1011 | mem = dma_alloc_coherent(&dev->pci_dev->dev, | ||
1012 | sizeof(struct nvme_smart_log), | ||
1013 | &dma_addr, GFP_KERNEL); | ||
1014 | if (mem == NULL) { | ||
1015 | res = -ENOMEM; | ||
1016 | goto out_dma; | ||
1017 | } | ||
1018 | |||
1019 | /* Get SMART Log Page */ | ||
1020 | memset(&c, 0, sizeof(c)); | ||
1021 | c.common.opcode = nvme_admin_get_log_page; | ||
1022 | c.common.nsid = cpu_to_le32(0xFFFFFFFF); | ||
1023 | c.common.prp1 = cpu_to_le64(dma_addr); | ||
1024 | c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / | ||
1025 | BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); | ||
1026 | res = nvme_submit_admin_cmd(dev, &c, NULL); | ||
1027 | if (res != NVME_SC_SUCCESS) { | ||
1028 | temp_c = LOG_TEMP_UNKNOWN; | ||
1029 | } else { | ||
1030 | smart_log = mem; | ||
1031 | temp_k = (smart_log->temperature[1] << 8) + | ||
1032 | (smart_log->temperature[0]); | ||
1033 | temp_c = temp_k - KELVIN_TEMP_FACTOR; | ||
1034 | } | ||
1035 | |||
1036 | log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; | ||
1037 | /* Subpage=0x00, Page Length MSB=0 */ | ||
1038 | log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH; | ||
1039 | /* Informational Exceptions Log Parameter 1 Start */ | ||
1040 | /* Parameter Code=0x0000 bytes 4,5 */ | ||
1041 | log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */ | ||
1042 | log_response[7] = 0x04; /* PARAMETER LENGTH */ | ||
1043 | /* Add sense Code and qualifier = 0x00 each */ | ||
1044 | /* Use Temperature from NVMe Get Log Page, convert to C from K */ | ||
1045 | log_response[10] = temp_c; | ||
1046 | |||
1047 | xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH); | ||
1048 | res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); | ||
1049 | |||
1050 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), | ||
1051 | mem, dma_addr); | ||
1052 | out_dma: | ||
1053 | kfree(log_response); | ||
1054 | out_mem: | ||
1055 | return res; | ||
1056 | } | ||
1057 | |||
1058 | static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
1059 | int alloc_len) | ||
1060 | { | ||
1061 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1062 | int xfer_len; | ||
1063 | u8 *log_response; | ||
1064 | struct nvme_command c; | ||
1065 | struct nvme_dev *dev = ns->dev; | ||
1066 | struct nvme_smart_log *smart_log; | ||
1067 | dma_addr_t dma_addr; | ||
1068 | void *mem; | ||
1069 | u32 feature_resp; | ||
1070 | u8 temp_c_cur, temp_c_thresh; | ||
1071 | u16 temp_k; | ||
1072 | |||
1073 | log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL); | ||
1074 | if (log_response == NULL) { | ||
1075 | res = -ENOMEM; | ||
1076 | goto out_mem; | ||
1077 | } | ||
1078 | memset(log_response, 0, LOG_TEMP_PAGE_LENGTH); | ||
1079 | |||
1080 | mem = dma_alloc_coherent(&dev->pci_dev->dev, | ||
1081 | sizeof(struct nvme_smart_log), | ||
1082 | &dma_addr, GFP_KERNEL); | ||
1083 | if (mem == NULL) { | ||
1084 | res = -ENOMEM; | ||
1085 | goto out_dma; | ||
1086 | } | ||
1087 | |||
1088 | /* Get SMART Log Page */ | ||
1089 | memset(&c, 0, sizeof(c)); | ||
1090 | c.common.opcode = nvme_admin_get_log_page; | ||
1091 | c.common.nsid = cpu_to_le32(0xFFFFFFFF); | ||
1092 | c.common.prp1 = cpu_to_le64(dma_addr); | ||
1093 | c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / | ||
1094 | BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); | ||
1095 | res = nvme_submit_admin_cmd(dev, &c, NULL); | ||
1096 | if (res != NVME_SC_SUCCESS) { | ||
1097 | temp_c_cur = LOG_TEMP_UNKNOWN; | ||
1098 | } else { | ||
1099 | smart_log = mem; | ||
1100 | temp_k = (smart_log->temperature[1] << 8) + | ||
1101 | (smart_log->temperature[0]); | ||
1102 | temp_c_cur = temp_k - KELVIN_TEMP_FACTOR; | ||
1103 | } | ||
1104 | |||
1105 | /* Get Features for Temp Threshold */ | ||
1106 | res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0, | ||
1107 | &feature_resp); | ||
1108 | if (res != NVME_SC_SUCCESS) | ||
1109 | temp_c_thresh = LOG_TEMP_UNKNOWN; | ||
1110 | else | ||
1111 | temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR; | ||
1112 | |||
1113 | log_response[0] = LOG_PAGE_TEMPERATURE_PAGE; | ||
1114 | /* Subpage=0x00, Page Length MSB=0 */ | ||
1115 | log_response[3] = REMAINING_TEMP_PAGE_LENGTH; | ||
1116 | /* Temperature Log Parameter 1 (Temperature) Start */ | ||
1117 | /* Parameter Code = 0x0000 */ | ||
1118 | log_response[6] = 0x01; /* Format and Linking = 01b */ | ||
1119 | log_response[7] = 0x02; /* Parameter Length */ | ||
1120 | /* Use Temperature from NVMe Get Log Page, convert to C from K */ | ||
1121 | log_response[9] = temp_c_cur; | ||
1122 | /* Temperature Log Parameter 2 (Reference Temperature) Start */ | ||
1123 | log_response[11] = 0x01; /* Parameter Code = 0x0001 */ | ||
1124 | log_response[12] = 0x01; /* Format and Linking = 01b */ | ||
1125 | log_response[13] = 0x02; /* Parameter Length */ | ||
1126 | /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */ | ||
1127 | log_response[15] = temp_c_thresh; | ||
1128 | |||
1129 | xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH); | ||
1130 | res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); | ||
1131 | |||
1132 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), | ||
1133 | mem, dma_addr); | ||
1134 | out_dma: | ||
1135 | kfree(log_response); | ||
1136 | out_mem: | ||
1137 | return res; | ||
1138 | } | ||
1139 | |||
1140 | /* MODE SENSE Helper Functions */ | ||
1141 | |||
1142 | static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa, | ||
1143 | u16 mode_data_length, u16 blk_desc_len) | ||
1144 | { | ||
1145 | /* Quick check to make sure I don't stomp on my own memory... */ | ||
1146 | if ((cdb10 && len < 8) || (!cdb10 && len < 4)) | ||
1147 | return SNTI_INTERNAL_ERROR; | ||
1148 | |||
1149 | if (cdb10) { | ||
1150 | resp[0] = (mode_data_length & 0xFF00) >> 8; | ||
1151 | resp[1] = (mode_data_length & 0x00FF); | ||
1152 | /* resp[2] and [3] are zero */ | ||
1153 | resp[4] = llbaa; | ||
1154 | resp[5] = RESERVED_FIELD; | ||
1155 | resp[6] = (blk_desc_len & 0xFF00) >> 8; | ||
1156 | resp[7] = (blk_desc_len & 0x00FF); | ||
1157 | } else { | ||
1158 | resp[0] = (mode_data_length & 0x00FF); | ||
1159 | /* resp[1] and [2] are zero */ | ||
1160 | resp[3] = (blk_desc_len & 0x00FF); | ||
1161 | } | ||
1162 | |||
1163 | return SNTI_TRANSLATION_SUCCESS; | ||
1164 | } | ||
1165 | |||
1166 | static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
1167 | u8 *resp, int len, u8 llbaa) | ||
1168 | { | ||
1169 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1170 | int nvme_sc; | ||
1171 | struct nvme_dev *dev = ns->dev; | ||
1172 | dma_addr_t dma_addr; | ||
1173 | void *mem; | ||
1174 | struct nvme_id_ns *id_ns; | ||
1175 | u8 flbas; | ||
1176 | u32 lba_length; | ||
1177 | |||
1178 | if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN) | ||
1179 | return SNTI_INTERNAL_ERROR; | ||
1180 | else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) | ||
1181 | return SNTI_INTERNAL_ERROR; | ||
1182 | |||
1183 | mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), | ||
1184 | &dma_addr, GFP_KERNEL); | ||
1185 | if (mem == NULL) { | ||
1186 | res = -ENOMEM; | ||
1187 | goto out; | ||
1188 | } | ||
1189 | |||
1190 | /* nvme ns identify */ | ||
1191 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); | ||
1192 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1193 | if (res) | ||
1194 | goto out_dma; | ||
1195 | if (nvme_sc) { | ||
1196 | res = nvme_sc; | ||
1197 | goto out_dma; | ||
1198 | } | ||
1199 | id_ns = mem; | ||
1200 | flbas = (id_ns->flbas) & 0x0F; | ||
1201 | lba_length = (1 << (id_ns->lbaf[flbas].ds)); | ||
1202 | |||
1203 | if (llbaa == 0) { | ||
1204 | __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap)); | ||
1205 | /* Byte 4 is reserved */ | ||
1206 | __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF); | ||
1207 | |||
1208 | memcpy(resp, &tmp_cap, sizeof(u32)); | ||
1209 | memcpy(&resp[4], &tmp_len, sizeof(u32)); | ||
1210 | } else { | ||
1211 | __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap)); | ||
1212 | __be32 tmp_len = cpu_to_be32(lba_length); | ||
1213 | |||
1214 | memcpy(resp, &tmp_cap, sizeof(u64)); | ||
1215 | /* Bytes 8, 9, 10, 11 are reserved */ | ||
1216 | memcpy(&resp[12], &tmp_len, sizeof(u32)); | ||
1217 | } | ||
1218 | |||
1219 | out_dma: | ||
1220 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, | ||
1221 | dma_addr); | ||
1222 | out: | ||
1223 | return res; | ||
1224 | } | ||
1225 | |||
1226 | static int nvme_trans_fill_control_page(struct nvme_ns *ns, | ||
1227 | struct sg_io_hdr *hdr, u8 *resp, | ||
1228 | int len) | ||
1229 | { | ||
1230 | if (len < MODE_PAGE_CONTROL_LEN) | ||
1231 | return SNTI_INTERNAL_ERROR; | ||
1232 | |||
1233 | resp[0] = MODE_PAGE_CONTROL; | ||
1234 | resp[1] = MODE_PAGE_CONTROL_LEN_FIELD; | ||
1235 | resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1, | ||
1236 | * D_SENSE=1, GLTSD=1, RLEC=0 */ | ||
1237 | resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */ | ||
1238 | /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */ | ||
1239 | resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */ | ||
1240 | /* resp[6] and [7] are obsolete, thus zero */ | ||
1241 | resp[8] = 0xFF; /* Busy timeout period = 0xffff */ | ||
1242 | resp[9] = 0xFF; | ||
1243 | /* Bytes 10,11: Extended selftest completion time = 0x0000 */ | ||
1244 | |||
1245 | return SNTI_TRANSLATION_SUCCESS; | ||
1246 | } | ||
1247 | |||
1248 | static int nvme_trans_fill_caching_page(struct nvme_ns *ns, | ||
1249 | struct sg_io_hdr *hdr, | ||
1250 | u8 *resp, int len) | ||
1251 | { | ||
1252 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1253 | int nvme_sc; | ||
1254 | struct nvme_dev *dev = ns->dev; | ||
1255 | u32 feature_resp; | ||
1256 | u8 vwc; | ||
1257 | |||
1258 | if (len < MODE_PAGE_CACHING_LEN) | ||
1259 | return SNTI_INTERNAL_ERROR; | ||
1260 | |||
1261 | nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0, | ||
1262 | &feature_resp); | ||
1263 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1264 | if (res) | ||
1265 | goto out; | ||
1266 | if (nvme_sc) { | ||
1267 | res = nvme_sc; | ||
1268 | goto out; | ||
1269 | } | ||
1270 | vwc = feature_resp & 0x00000001; | ||
1271 | |||
1272 | resp[0] = MODE_PAGE_CACHING; | ||
1273 | resp[1] = MODE_PAGE_CACHING_LEN_FIELD; | ||
1274 | resp[2] = vwc << 2; | ||
1275 | |||
1276 | out: | ||
1277 | return res; | ||
1278 | } | ||
1279 | |||
1280 | static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns, | ||
1281 | struct sg_io_hdr *hdr, u8 *resp, | ||
1282 | int len) | ||
1283 | { | ||
1284 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1285 | |||
1286 | if (len < MODE_PAGE_POW_CND_LEN) | ||
1287 | return SNTI_INTERNAL_ERROR; | ||
1288 | |||
1289 | resp[0] = MODE_PAGE_POWER_CONDITION; | ||
1290 | resp[1] = MODE_PAGE_POW_CND_LEN_FIELD; | ||
1291 | /* All other bytes are zero */ | ||
1292 | |||
1293 | return res; | ||
1294 | } | ||
1295 | |||
1296 | static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns, | ||
1297 | struct sg_io_hdr *hdr, u8 *resp, | ||
1298 | int len) | ||
1299 | { | ||
1300 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1301 | |||
1302 | if (len < MODE_PAGE_INF_EXC_LEN) | ||
1303 | return SNTI_INTERNAL_ERROR; | ||
1304 | |||
1305 | resp[0] = MODE_PAGE_INFO_EXCEP; | ||
1306 | resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD; | ||
1307 | resp[2] = 0x88; | ||
1308 | /* All other bytes are zero */ | ||
1309 | |||
1310 | return res; | ||
1311 | } | ||
1312 | |||
1313 | static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
1314 | u8 *resp, int len) | ||
1315 | { | ||
1316 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1317 | u16 mode_pages_offset_1 = 0; | ||
1318 | u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4; | ||
1319 | |||
1320 | mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN; | ||
1321 | mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN; | ||
1322 | mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN; | ||
1323 | |||
1324 | res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1], | ||
1325 | MODE_PAGE_CACHING_LEN); | ||
1326 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1327 | goto out; | ||
1328 | res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2], | ||
1329 | MODE_PAGE_CONTROL_LEN); | ||
1330 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1331 | goto out; | ||
1332 | res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3], | ||
1333 | MODE_PAGE_POW_CND_LEN); | ||
1334 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1335 | goto out; | ||
1336 | res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4], | ||
1337 | MODE_PAGE_INF_EXC_LEN); | ||
1338 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1339 | goto out; | ||
1340 | |||
1341 | out: | ||
1342 | return res; | ||
1343 | } | ||
1344 | |||
1345 | static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa) | ||
1346 | { | ||
1347 | if (dbd == MODE_SENSE_BLK_DESC_ENABLED) { | ||
1348 | /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */ | ||
1349 | return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT; | ||
1350 | } else { | ||
1351 | return 0; | ||
1352 | } | ||
1353 | } | ||
1354 | |||
1355 | static int nvme_trans_mode_page_create(struct nvme_ns *ns, | ||
1356 | struct sg_io_hdr *hdr, u8 *cmd, | ||
1357 | u16 alloc_len, u8 cdb10, | ||
1358 | int (*mode_page_fill_func) | ||
1359 | (struct nvme_ns *, | ||
1360 | struct sg_io_hdr *hdr, u8 *, int), | ||
1361 | u16 mode_pages_tot_len) | ||
1362 | { | ||
1363 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1364 | int xfer_len; | ||
1365 | u8 *response; | ||
1366 | u8 dbd, llbaa; | ||
1367 | u16 resp_size; | ||
1368 | int mph_size; | ||
1369 | u16 mode_pages_offset_1; | ||
1370 | u16 blk_desc_len, blk_desc_offset, mode_data_length; | ||
1371 | |||
1372 | dbd = GET_MODE_SENSE_DBD(cmd); | ||
1373 | llbaa = GET_MODE_SENSE_LLBAA(cmd); | ||
1374 | mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10); | ||
1375 | blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa); | ||
1376 | |||
1377 | resp_size = mph_size + blk_desc_len + mode_pages_tot_len; | ||
1378 | /* Refer spc4r34 Table 440 for calculation of Mode data Length field */ | ||
1379 | mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len; | ||
1380 | |||
1381 | blk_desc_offset = mph_size; | ||
1382 | mode_pages_offset_1 = blk_desc_offset + blk_desc_len; | ||
1383 | |||
1384 | response = kmalloc(resp_size, GFP_KERNEL); | ||
1385 | if (response == NULL) { | ||
1386 | res = -ENOMEM; | ||
1387 | goto out_mem; | ||
1388 | } | ||
1389 | memset(response, 0, resp_size); | ||
1390 | |||
1391 | res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10, | ||
1392 | llbaa, mode_data_length, blk_desc_len); | ||
1393 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1394 | goto out_free; | ||
1395 | if (blk_desc_len > 0) { | ||
1396 | res = nvme_trans_fill_blk_desc(ns, hdr, | ||
1397 | &response[blk_desc_offset], | ||
1398 | blk_desc_len, llbaa); | ||
1399 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1400 | goto out_free; | ||
1401 | } | ||
1402 | res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1], | ||
1403 | mode_pages_tot_len); | ||
1404 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1405 | goto out_free; | ||
1406 | |||
1407 | xfer_len = min(alloc_len, resp_size); | ||
1408 | res = nvme_trans_copy_to_user(hdr, response, xfer_len); | ||
1409 | |||
1410 | out_free: | ||
1411 | kfree(response); | ||
1412 | out_mem: | ||
1413 | return res; | ||
1414 | } | ||
1415 | |||
1416 | /* Read Capacity Helper Functions */ | ||
1417 | |||
1418 | static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns, | ||
1419 | u8 cdb16) | ||
1420 | { | ||
1421 | u8 flbas; | ||
1422 | u32 lba_length; | ||
1423 | u64 rlba; | ||
1424 | u8 prot_en; | ||
1425 | u8 p_type_lut[4] = {0, 0, 1, 2}; | ||
1426 | __be64 tmp_rlba; | ||
1427 | __be32 tmp_rlba_32; | ||
1428 | __be32 tmp_len; | ||
1429 | |||
1430 | flbas = (id_ns->flbas) & 0x0F; | ||
1431 | lba_length = (1 << (id_ns->lbaf[flbas].ds)); | ||
1432 | rlba = le64_to_cpup(&id_ns->nsze) - 1; | ||
1433 | (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0); | ||
1434 | |||
1435 | if (!cdb16) { | ||
1436 | if (rlba > 0xFFFFFFFF) | ||
1437 | rlba = 0xFFFFFFFF; | ||
1438 | tmp_rlba_32 = cpu_to_be32(rlba); | ||
1439 | tmp_len = cpu_to_be32(lba_length); | ||
1440 | memcpy(response, &tmp_rlba_32, sizeof(u32)); | ||
1441 | memcpy(&response[4], &tmp_len, sizeof(u32)); | ||
1442 | } else { | ||
1443 | tmp_rlba = cpu_to_be64(rlba); | ||
1444 | tmp_len = cpu_to_be32(lba_length); | ||
1445 | memcpy(response, &tmp_rlba, sizeof(u64)); | ||
1446 | memcpy(&response[8], &tmp_len, sizeof(u32)); | ||
1447 | response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en; | ||
1448 | /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */ | ||
1449 | /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */ | ||
1450 | /* Bytes 16-31 - Reserved */ | ||
1451 | } | ||
1452 | } | ||
1453 | |||
1454 | /* Start Stop Unit Helper Functions */ | ||
1455 | |||
1456 | static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
1457 | u8 pc, u8 pcmod, u8 start) | ||
1458 | { | ||
1459 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1460 | int nvme_sc; | ||
1461 | struct nvme_dev *dev = ns->dev; | ||
1462 | dma_addr_t dma_addr; | ||
1463 | void *mem; | ||
1464 | struct nvme_id_ctrl *id_ctrl; | ||
1465 | int lowest_pow_st; /* max npss = lowest power consumption */ | ||
1466 | unsigned ps_desired = 0; | ||
1467 | |||
1468 | /* NVMe Controller Identify */ | ||
1469 | mem = dma_alloc_coherent(&dev->pci_dev->dev, | ||
1470 | sizeof(struct nvme_id_ctrl), | ||
1471 | &dma_addr, GFP_KERNEL); | ||
1472 | if (mem == NULL) { | ||
1473 | res = -ENOMEM; | ||
1474 | goto out; | ||
1475 | } | ||
1476 | nvme_sc = nvme_identify(dev, 0, 1, dma_addr); | ||
1477 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1478 | if (res) | ||
1479 | goto out_dma; | ||
1480 | if (nvme_sc) { | ||
1481 | res = nvme_sc; | ||
1482 | goto out_dma; | ||
1483 | } | ||
1484 | id_ctrl = mem; | ||
1485 | lowest_pow_st = id_ctrl->npss - 1; | ||
1486 | |||
1487 | switch (pc) { | ||
1488 | case NVME_POWER_STATE_START_VALID: | ||
1489 | /* Action unspecified if POWER CONDITION MODIFIER != 0 */ | ||
1490 | if (pcmod == 0 && start == 0x1) | ||
1491 | ps_desired = POWER_STATE_0; | ||
1492 | if (pcmod == 0 && start == 0x0) | ||
1493 | ps_desired = lowest_pow_st; | ||
1494 | break; | ||
1495 | case NVME_POWER_STATE_ACTIVE: | ||
1496 | /* Action unspecified if POWER CONDITION MODIFIER != 0 */ | ||
1497 | if (pcmod == 0) | ||
1498 | ps_desired = POWER_STATE_0; | ||
1499 | break; | ||
1500 | case NVME_POWER_STATE_IDLE: | ||
1501 | /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */ | ||
1502 | /* min of desired state and (lps-1) because lps is STOP */ | ||
1503 | if (pcmod == 0x0) | ||
1504 | ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1)); | ||
1505 | else if (pcmod == 0x1) | ||
1506 | ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1)); | ||
1507 | else if (pcmod == 0x2) | ||
1508 | ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1)); | ||
1509 | break; | ||
1510 | case NVME_POWER_STATE_STANDBY: | ||
1511 | /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */ | ||
1512 | if (pcmod == 0x0) | ||
1513 | ps_desired = max(0, (lowest_pow_st - 2)); | ||
1514 | else if (pcmod == 0x1) | ||
1515 | ps_desired = max(0, (lowest_pow_st - 1)); | ||
1516 | break; | ||
1517 | case NVME_POWER_STATE_LU_CONTROL: | ||
1518 | default: | ||
1519 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
1520 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
1521 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1522 | break; | ||
1523 | } | ||
1524 | nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0, | ||
1525 | NULL); | ||
1526 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1527 | if (res) | ||
1528 | goto out_dma; | ||
1529 | if (nvme_sc) | ||
1530 | res = nvme_sc; | ||
1531 | out_dma: | ||
1532 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, | ||
1533 | dma_addr); | ||
1534 | out: | ||
1535 | return res; | ||
1536 | } | ||
1537 | |||
1538 | /* Write Buffer Helper Functions */ | ||
1539 | /* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */ | ||
1540 | |||
1541 | static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
1542 | u8 opcode, u32 tot_len, u32 offset, | ||
1543 | u8 buffer_id) | ||
1544 | { | ||
1545 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1546 | int nvme_sc; | ||
1547 | struct nvme_dev *dev = ns->dev; | ||
1548 | struct nvme_command c; | ||
1549 | struct nvme_iod *iod = NULL; | ||
1550 | unsigned length; | ||
1551 | |||
1552 | memset(&c, 0, sizeof(c)); | ||
1553 | c.common.opcode = opcode; | ||
1554 | if (opcode == nvme_admin_download_fw) { | ||
1555 | if (hdr->iovec_count > 0) { | ||
1556 | /* Assuming SGL is not allowed for this command */ | ||
1557 | res = nvme_trans_completion(hdr, | ||
1558 | SAM_STAT_CHECK_CONDITION, | ||
1559 | ILLEGAL_REQUEST, | ||
1560 | SCSI_ASC_INVALID_CDB, | ||
1561 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1562 | goto out; | ||
1563 | } | ||
1564 | iod = nvme_map_user_pages(dev, DMA_TO_DEVICE, | ||
1565 | (unsigned long)hdr->dxferp, tot_len); | ||
1566 | if (IS_ERR(iod)) { | ||
1567 | res = PTR_ERR(iod); | ||
1568 | goto out; | ||
1569 | } | ||
1570 | length = nvme_setup_prps(dev, &c.common, iod, tot_len, | ||
1571 | GFP_KERNEL); | ||
1572 | if (length != tot_len) { | ||
1573 | res = -ENOMEM; | ||
1574 | goto out_unmap; | ||
1575 | } | ||
1576 | |||
1577 | c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); | ||
1578 | c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); | ||
1579 | } else if (opcode == nvme_admin_activate_fw) { | ||
1580 | u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV; | ||
1581 | c.common.cdw10[0] = cpu_to_le32(cdw10); | ||
1582 | } | ||
1583 | |||
1584 | nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); | ||
1585 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1586 | if (res) | ||
1587 | goto out_unmap; | ||
1588 | if (nvme_sc) | ||
1589 | res = nvme_sc; | ||
1590 | |||
1591 | out_unmap: | ||
1592 | if (opcode == nvme_admin_download_fw) { | ||
1593 | nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod); | ||
1594 | nvme_free_iod(dev, iod); | ||
1595 | } | ||
1596 | out: | ||
1597 | return res; | ||
1598 | } | ||
1599 | |||
1600 | /* Mode Select Helper Functions */ | ||
1601 | |||
1602 | static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, | ||
1603 | u16 *bd_len, u8 *llbaa) | ||
1604 | { | ||
1605 | if (cdb10) { | ||
1606 | /* 10 Byte CDB */ | ||
1607 | *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + | ||
1608 | parm_list[MODE_SELECT_10_BD_OFFSET + 1]; | ||
1609 | *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] && | ||
1610 | MODE_SELECT_10_LLBAA_MASK; | ||
1611 | } else { | ||
1612 | /* 6 Byte CDB */ | ||
1613 | *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET]; | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, | ||
1618 | u16 idx, u16 bd_len, u8 llbaa) | ||
1619 | { | ||
1620 | u16 bd_num; | ||
1621 | |||
1622 | bd_num = bd_len / ((llbaa == 0) ? | ||
1623 | SHORT_DESC_BLOCK : LONG_DESC_BLOCK); | ||
1624 | /* Store block descriptor info if a FORMAT UNIT comes later */ | ||
1625 | /* TODO Saving 1st BD info; what to do if multiple BD received? */ | ||
1626 | if (llbaa == 0) { | ||
1627 | /* Standard Block Descriptor - spc4r34 7.5.5.1 */ | ||
1628 | ns->mode_select_num_blocks = | ||
1629 | (parm_list[idx + 1] << 16) + | ||
1630 | (parm_list[idx + 2] << 8) + | ||
1631 | (parm_list[idx + 3]); | ||
1632 | |||
1633 | ns->mode_select_block_len = | ||
1634 | (parm_list[idx + 5] << 16) + | ||
1635 | (parm_list[idx + 6] << 8) + | ||
1636 | (parm_list[idx + 7]); | ||
1637 | } else { | ||
1638 | /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */ | ||
1639 | ns->mode_select_num_blocks = | ||
1640 | (((u64)parm_list[idx + 0]) << 56) + | ||
1641 | (((u64)parm_list[idx + 1]) << 48) + | ||
1642 | (((u64)parm_list[idx + 2]) << 40) + | ||
1643 | (((u64)parm_list[idx + 3]) << 32) + | ||
1644 | (((u64)parm_list[idx + 4]) << 24) + | ||
1645 | (((u64)parm_list[idx + 5]) << 16) + | ||
1646 | (((u64)parm_list[idx + 6]) << 8) + | ||
1647 | ((u64)parm_list[idx + 7]); | ||
1648 | |||
1649 | ns->mode_select_block_len = | ||
1650 | (parm_list[idx + 12] << 24) + | ||
1651 | (parm_list[idx + 13] << 16) + | ||
1652 | (parm_list[idx + 14] << 8) + | ||
1653 | (parm_list[idx + 15]); | ||
1654 | } | ||
1655 | } | ||
1656 | |||
1657 | static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
1658 | u8 *mode_page, u8 page_code) | ||
1659 | { | ||
1660 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1661 | int nvme_sc; | ||
1662 | struct nvme_dev *dev = ns->dev; | ||
1663 | unsigned dword11; | ||
1664 | |||
1665 | switch (page_code) { | ||
1666 | case MODE_PAGE_CACHING: | ||
1667 | dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); | ||
1668 | nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11, | ||
1669 | 0, NULL); | ||
1670 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1671 | if (res) | ||
1672 | break; | ||
1673 | if (nvme_sc) { | ||
1674 | res = nvme_sc; | ||
1675 | break; | ||
1676 | } | ||
1677 | break; | ||
1678 | case MODE_PAGE_CONTROL: | ||
1679 | break; | ||
1680 | case MODE_PAGE_POWER_CONDITION: | ||
1681 | /* Verify the OS is not trying to set timers */ | ||
1682 | if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) { | ||
1683 | res = nvme_trans_completion(hdr, | ||
1684 | SAM_STAT_CHECK_CONDITION, | ||
1685 | ILLEGAL_REQUEST, | ||
1686 | SCSI_ASC_INVALID_PARAMETER, | ||
1687 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1688 | if (!res) | ||
1689 | res = SNTI_INTERNAL_ERROR; | ||
1690 | break; | ||
1691 | } | ||
1692 | break; | ||
1693 | default: | ||
1694 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
1695 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
1696 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1697 | if (!res) | ||
1698 | res = SNTI_INTERNAL_ERROR; | ||
1699 | break; | ||
1700 | } | ||
1701 | |||
1702 | return res; | ||
1703 | } | ||
1704 | |||
1705 | static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
1706 | u8 *cmd, u16 parm_list_len, u8 pf, | ||
1707 | u8 sp, u8 cdb10) | ||
1708 | { | ||
1709 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1710 | u8 *parm_list; | ||
1711 | u16 bd_len; | ||
1712 | u8 llbaa = 0; | ||
1713 | u16 index, saved_index; | ||
1714 | u8 page_code; | ||
1715 | u16 mp_size; | ||
1716 | |||
1717 | /* Get parm list from data-in/out buffer */ | ||
1718 | parm_list = kmalloc(parm_list_len, GFP_KERNEL); | ||
1719 | if (parm_list == NULL) { | ||
1720 | res = -ENOMEM; | ||
1721 | goto out; | ||
1722 | } | ||
1723 | |||
1724 | res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len); | ||
1725 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1726 | goto out_mem; | ||
1727 | |||
1728 | nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa); | ||
1729 | index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE); | ||
1730 | |||
1731 | if (bd_len != 0) { | ||
1732 | /* Block Descriptors present, parse */ | ||
1733 | nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa); | ||
1734 | index += bd_len; | ||
1735 | } | ||
1736 | saved_index = index; | ||
1737 | |||
1738 | /* Multiple mode pages may be present; iterate through all */ | ||
1739 | /* In 1st Iteration, don't do NVME Command, only check for CDB errors */ | ||
1740 | do { | ||
1741 | page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; | ||
1742 | mp_size = parm_list[index + 1] + 2; | ||
1743 | if ((page_code != MODE_PAGE_CACHING) && | ||
1744 | (page_code != MODE_PAGE_CONTROL) && | ||
1745 | (page_code != MODE_PAGE_POWER_CONDITION)) { | ||
1746 | res = nvme_trans_completion(hdr, | ||
1747 | SAM_STAT_CHECK_CONDITION, | ||
1748 | ILLEGAL_REQUEST, | ||
1749 | SCSI_ASC_INVALID_CDB, | ||
1750 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1751 | goto out_mem; | ||
1752 | } | ||
1753 | index += mp_size; | ||
1754 | } while (index < parm_list_len); | ||
1755 | |||
1756 | /* In 2nd Iteration, do the NVME Commands */ | ||
1757 | index = saved_index; | ||
1758 | do { | ||
1759 | page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; | ||
1760 | mp_size = parm_list[index + 1] + 2; | ||
1761 | res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index], | ||
1762 | page_code); | ||
1763 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1764 | break; | ||
1765 | index += mp_size; | ||
1766 | } while (index < parm_list_len); | ||
1767 | |||
1768 | out_mem: | ||
1769 | kfree(parm_list); | ||
1770 | out: | ||
1771 | return res; | ||
1772 | } | ||
1773 | |||
1774 | /* Format Unit Helper Functions */ | ||
1775 | |||
1776 | static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, | ||
1777 | struct sg_io_hdr *hdr) | ||
1778 | { | ||
1779 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1780 | int nvme_sc; | ||
1781 | struct nvme_dev *dev = ns->dev; | ||
1782 | dma_addr_t dma_addr; | ||
1783 | void *mem; | ||
1784 | struct nvme_id_ns *id_ns; | ||
1785 | u8 flbas; | ||
1786 | |||
1787 | /* | ||
1788 | * SCSI Expects a MODE SELECT would have been issued prior to | ||
1789 | * a FORMAT UNIT, and the block size and number would be used | ||
1790 | * from the block descriptor in it. If a MODE SELECT had not | ||
1791 | * been issued, FORMAT shall use the current values for both. | ||
1792 | */ | ||
1793 | |||
1794 | if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { | ||
1795 | mem = dma_alloc_coherent(&dev->pci_dev->dev, | ||
1796 | sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL); | ||
1797 | if (mem == NULL) { | ||
1798 | res = -ENOMEM; | ||
1799 | goto out; | ||
1800 | } | ||
1801 | /* nvme ns identify */ | ||
1802 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); | ||
1803 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1804 | if (res) | ||
1805 | goto out_dma; | ||
1806 | if (nvme_sc) { | ||
1807 | res = nvme_sc; | ||
1808 | goto out_dma; | ||
1809 | } | ||
1810 | id_ns = mem; | ||
1811 | |||
1812 | if (ns->mode_select_num_blocks == 0) | ||
1813 | ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap); | ||
1814 | if (ns->mode_select_block_len == 0) { | ||
1815 | flbas = (id_ns->flbas) & 0x0F; | ||
1816 | ns->mode_select_block_len = | ||
1817 | (1 << (id_ns->lbaf[flbas].ds)); | ||
1818 | } | ||
1819 | out_dma: | ||
1820 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), | ||
1821 | mem, dma_addr); | ||
1822 | } | ||
1823 | out: | ||
1824 | return res; | ||
1825 | } | ||
1826 | |||
1827 | static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len, | ||
1828 | u8 format_prot_info, u8 *nvme_pf_code) | ||
1829 | { | ||
1830 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1831 | u8 *parm_list; | ||
1832 | u8 pf_usage, pf_code; | ||
1833 | |||
1834 | parm_list = kmalloc(len, GFP_KERNEL); | ||
1835 | if (parm_list == NULL) { | ||
1836 | res = -ENOMEM; | ||
1837 | goto out; | ||
1838 | } | ||
1839 | res = nvme_trans_copy_from_user(hdr, parm_list, len); | ||
1840 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
1841 | goto out_mem; | ||
1842 | |||
1843 | if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] & | ||
1844 | FORMAT_UNIT_IMMED_MASK) != 0) { | ||
1845 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
1846 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
1847 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1848 | goto out_mem; | ||
1849 | } | ||
1850 | |||
1851 | if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN && | ||
1852 | (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) { | ||
1853 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
1854 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
1855 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1856 | goto out_mem; | ||
1857 | } | ||
1858 | pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] & | ||
1859 | FORMAT_UNIT_PROT_FIELD_USAGE_MASK; | ||
1860 | pf_code = (pf_usage << 2) | format_prot_info; | ||
1861 | switch (pf_code) { | ||
1862 | case 0: | ||
1863 | *nvme_pf_code = 0; | ||
1864 | break; | ||
1865 | case 2: | ||
1866 | *nvme_pf_code = 1; | ||
1867 | break; | ||
1868 | case 3: | ||
1869 | *nvme_pf_code = 2; | ||
1870 | break; | ||
1871 | case 7: | ||
1872 | *nvme_pf_code = 3; | ||
1873 | break; | ||
1874 | default: | ||
1875 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
1876 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
1877 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1878 | break; | ||
1879 | } | ||
1880 | |||
1881 | out_mem: | ||
1882 | kfree(parm_list); | ||
1883 | out: | ||
1884 | return res; | ||
1885 | } | ||
1886 | |||
1887 | static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
1888 | u8 prot_info) | ||
1889 | { | ||
1890 | int res = SNTI_TRANSLATION_SUCCESS; | ||
1891 | int nvme_sc; | ||
1892 | struct nvme_dev *dev = ns->dev; | ||
1893 | dma_addr_t dma_addr; | ||
1894 | void *mem; | ||
1895 | struct nvme_id_ns *id_ns; | ||
1896 | u8 i; | ||
1897 | u8 flbas, nlbaf; | ||
1898 | u8 selected_lbaf = 0xFF; | ||
1899 | u32 cdw10 = 0; | ||
1900 | struct nvme_command c; | ||
1901 | |||
1902 | /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ | ||
1903 | mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), | ||
1904 | &dma_addr, GFP_KERNEL); | ||
1905 | if (mem == NULL) { | ||
1906 | res = -ENOMEM; | ||
1907 | goto out; | ||
1908 | } | ||
1909 | /* nvme ns identify */ | ||
1910 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); | ||
1911 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1912 | if (res) | ||
1913 | goto out_dma; | ||
1914 | if (nvme_sc) { | ||
1915 | res = nvme_sc; | ||
1916 | goto out_dma; | ||
1917 | } | ||
1918 | id_ns = mem; | ||
1919 | flbas = (id_ns->flbas) & 0x0F; | ||
1920 | nlbaf = id_ns->nlbaf; | ||
1921 | |||
1922 | for (i = 0; i < nlbaf; i++) { | ||
1923 | if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) { | ||
1924 | selected_lbaf = i; | ||
1925 | break; | ||
1926 | } | ||
1927 | } | ||
1928 | if (selected_lbaf > 0x0F) { | ||
1929 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
1930 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, | ||
1931 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1932 | } | ||
1933 | if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) { | ||
1934 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
1935 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, | ||
1936 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
1937 | } | ||
1938 | |||
1939 | cdw10 |= prot_info << 5; | ||
1940 | cdw10 |= selected_lbaf & 0x0F; | ||
1941 | memset(&c, 0, sizeof(c)); | ||
1942 | c.format.opcode = nvme_admin_format_nvm; | ||
1943 | c.format.nsid = cpu_to_le32(ns->ns_id); | ||
1944 | c.format.cdw10 = cpu_to_le32(cdw10); | ||
1945 | |||
1946 | nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); | ||
1947 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
1948 | if (res) | ||
1949 | goto out_dma; | ||
1950 | if (nvme_sc) | ||
1951 | res = nvme_sc; | ||
1952 | |||
1953 | out_dma: | ||
1954 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, | ||
1955 | dma_addr); | ||
1956 | out: | ||
1957 | return res; | ||
1958 | } | ||
1959 | |||
1960 | /* Read/Write Helper Functions */ | ||
1961 | |||
1962 | static inline void nvme_trans_get_io_cdb6(u8 *cmd, | ||
1963 | struct nvme_trans_io_cdb *cdb_info) | ||
1964 | { | ||
1965 | cdb_info->fua = 0; | ||
1966 | cdb_info->prot_info = 0; | ||
1967 | cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) & | ||
1968 | IO_6_CDB_LBA_MASK; | ||
1969 | cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET); | ||
1970 | |||
1971 | /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */ | ||
1972 | if (cdb_info->xfer_len == 0) | ||
1973 | cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN; | ||
1974 | } | ||
1975 | |||
1976 | static inline void nvme_trans_get_io_cdb10(u8 *cmd, | ||
1977 | struct nvme_trans_io_cdb *cdb_info) | ||
1978 | { | ||
1979 | cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) & | ||
1980 | IO_CDB_FUA_MASK; | ||
1981 | cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) & | ||
1982 | IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; | ||
1983 | cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET); | ||
1984 | cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET); | ||
1985 | } | ||
1986 | |||
1987 | static inline void nvme_trans_get_io_cdb12(u8 *cmd, | ||
1988 | struct nvme_trans_io_cdb *cdb_info) | ||
1989 | { | ||
1990 | cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) & | ||
1991 | IO_CDB_FUA_MASK; | ||
1992 | cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) & | ||
1993 | IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; | ||
1994 | cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET); | ||
1995 | cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET); | ||
1996 | } | ||
1997 | |||
1998 | static inline void nvme_trans_get_io_cdb16(u8 *cmd, | ||
1999 | struct nvme_trans_io_cdb *cdb_info) | ||
2000 | { | ||
2001 | cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) & | ||
2002 | IO_CDB_FUA_MASK; | ||
2003 | cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) & | ||
2004 | IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; | ||
2005 | cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET); | ||
2006 | cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET); | ||
2007 | } | ||
2008 | |||
2009 | static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr, | ||
2010 | struct nvme_trans_io_cdb *cdb_info, | ||
2011 | u32 max_blocks) | ||
2012 | { | ||
2013 | /* If using iovecs, send one nvme command per vector */ | ||
2014 | if (hdr->iovec_count > 0) | ||
2015 | return hdr->iovec_count; | ||
2016 | else if (cdb_info->xfer_len > max_blocks) | ||
2017 | return ((cdb_info->xfer_len - 1) / max_blocks) + 1; | ||
2018 | else | ||
2019 | return 1; | ||
2020 | } | ||
2021 | |||
2022 | static u16 nvme_trans_io_get_control(struct nvme_ns *ns, | ||
2023 | struct nvme_trans_io_cdb *cdb_info) | ||
2024 | { | ||
2025 | u16 control = 0; | ||
2026 | |||
2027 | /* When Protection information support is added, implement here */ | ||
2028 | |||
2029 | if (cdb_info->fua > 0) | ||
2030 | control |= NVME_RW_FUA; | ||
2031 | |||
2032 | return control; | ||
2033 | } | ||
2034 | |||
2035 | static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2036 | struct nvme_trans_io_cdb *cdb_info, u8 is_write) | ||
2037 | { | ||
2038 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2039 | int nvme_sc; | ||
2040 | struct nvme_dev *dev = ns->dev; | ||
2041 | struct nvme_queue *nvmeq; | ||
2042 | u32 num_cmds; | ||
2043 | struct nvme_iod *iod; | ||
2044 | u64 unit_len; | ||
2045 | u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */ | ||
2046 | u32 retcode; | ||
2047 | u32 i = 0; | ||
2048 | u64 nvme_offset = 0; | ||
2049 | void __user *next_mapping_addr; | ||
2050 | struct nvme_command c; | ||
2051 | u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); | ||
2052 | u16 control; | ||
2053 | u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors); | ||
2054 | |||
2055 | num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); | ||
2056 | |||
2057 | /* | ||
2058 | * This loop handles two cases. | ||
2059 | * First, when an SGL is used in the form of an iovec list: | ||
2060 | * - Use iov_base as the next mapping address for the nvme command_id | ||
2061 | * - Use iov_len as the data transfer length for the command. | ||
2062 | * Second, when we have a single buffer | ||
2063 | * - If larger than max_blocks, split into chunks, offset | ||
2064 | * each nvme command accordingly. | ||
2065 | */ | ||
2066 | for (i = 0; i < num_cmds; i++) { | ||
2067 | memset(&c, 0, sizeof(c)); | ||
2068 | if (hdr->iovec_count > 0) { | ||
2069 | struct sg_iovec sgl; | ||
2070 | |||
2071 | retcode = copy_from_user(&sgl, hdr->dxferp + | ||
2072 | i * sizeof(struct sg_iovec), | ||
2073 | sizeof(struct sg_iovec)); | ||
2074 | if (retcode) | ||
2075 | return -EFAULT; | ||
2076 | unit_len = sgl.iov_len; | ||
2077 | unit_num_blocks = unit_len >> ns->lba_shift; | ||
2078 | next_mapping_addr = sgl.iov_base; | ||
2079 | } else { | ||
2080 | unit_num_blocks = min((u64)max_blocks, | ||
2081 | (cdb_info->xfer_len - nvme_offset)); | ||
2082 | unit_len = unit_num_blocks << ns->lba_shift; | ||
2083 | next_mapping_addr = hdr->dxferp + | ||
2084 | ((1 << ns->lba_shift) * nvme_offset); | ||
2085 | } | ||
2086 | |||
2087 | c.rw.opcode = opcode; | ||
2088 | c.rw.nsid = cpu_to_le32(ns->ns_id); | ||
2089 | c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset); | ||
2090 | c.rw.length = cpu_to_le16(unit_num_blocks - 1); | ||
2091 | control = nvme_trans_io_get_control(ns, cdb_info); | ||
2092 | c.rw.control = cpu_to_le16(control); | ||
2093 | |||
2094 | iod = nvme_map_user_pages(dev, | ||
2095 | (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, | ||
2096 | (unsigned long)next_mapping_addr, unit_len); | ||
2097 | if (IS_ERR(iod)) { | ||
2098 | res = PTR_ERR(iod); | ||
2099 | goto out; | ||
2100 | } | ||
2101 | retcode = nvme_setup_prps(dev, &c.common, iod, unit_len, | ||
2102 | GFP_KERNEL); | ||
2103 | if (retcode != unit_len) { | ||
2104 | nvme_unmap_user_pages(dev, | ||
2105 | (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, | ||
2106 | iod); | ||
2107 | nvme_free_iod(dev, iod); | ||
2108 | res = -ENOMEM; | ||
2109 | goto out; | ||
2110 | } | ||
2111 | |||
2112 | nvme_offset += unit_num_blocks; | ||
2113 | |||
2114 | nvmeq = get_nvmeq(dev); | ||
2115 | /* | ||
2116 | * Since nvme_submit_sync_cmd sleeps, we can't keep | ||
2117 | * preemption disabled. We may be preempted at any | ||
2118 | * point, and be rescheduled to a different CPU. That | ||
2119 | * will cause cacheline bouncing, but no additional | ||
2120 | * races since q_lock already protects against other | ||
2121 | * CPUs. | ||
2122 | */ | ||
2123 | put_nvmeq(nvmeq); | ||
2124 | nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, | ||
2125 | NVME_IO_TIMEOUT); | ||
2126 | if (nvme_sc != NVME_SC_SUCCESS) { | ||
2127 | nvme_unmap_user_pages(dev, | ||
2128 | (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, | ||
2129 | iod); | ||
2130 | nvme_free_iod(dev, iod); | ||
2131 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
2132 | goto out; | ||
2133 | } | ||
2134 | nvme_unmap_user_pages(dev, | ||
2135 | (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, | ||
2136 | iod); | ||
2137 | nvme_free_iod(dev, iod); | ||
2138 | } | ||
2139 | res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS); | ||
2140 | |||
2141 | out: | ||
2142 | return res; | ||
2143 | } | ||
2144 | |||
2145 | |||
2146 | /* SCSI Command Translation Functions */ | ||
2147 | |||
2148 | static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write, | ||
2149 | u8 *cmd) | ||
2150 | { | ||
2151 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2152 | struct nvme_trans_io_cdb cdb_info; | ||
2153 | u8 opcode = cmd[0]; | ||
2154 | u64 xfer_bytes; | ||
2155 | u64 sum_iov_len = 0; | ||
2156 | struct sg_iovec sgl; | ||
2157 | int i; | ||
2158 | size_t not_copied; | ||
2159 | |||
2160 | /* Extract Fields from CDB */ | ||
2161 | switch (opcode) { | ||
2162 | case WRITE_6: | ||
2163 | case READ_6: | ||
2164 | nvme_trans_get_io_cdb6(cmd, &cdb_info); | ||
2165 | break; | ||
2166 | case WRITE_10: | ||
2167 | case READ_10: | ||
2168 | nvme_trans_get_io_cdb10(cmd, &cdb_info); | ||
2169 | break; | ||
2170 | case WRITE_12: | ||
2171 | case READ_12: | ||
2172 | nvme_trans_get_io_cdb12(cmd, &cdb_info); | ||
2173 | break; | ||
2174 | case WRITE_16: | ||
2175 | case READ_16: | ||
2176 | nvme_trans_get_io_cdb16(cmd, &cdb_info); | ||
2177 | break; | ||
2178 | default: | ||
2179 | /* Will never really reach here */ | ||
2180 | res = SNTI_INTERNAL_ERROR; | ||
2181 | goto out; | ||
2182 | } | ||
2183 | |||
2184 | /* Calculate total length of transfer (in bytes) */ | ||
2185 | if (hdr->iovec_count > 0) { | ||
2186 | for (i = 0; i < hdr->iovec_count; i++) { | ||
2187 | not_copied = copy_from_user(&sgl, hdr->dxferp + | ||
2188 | i * sizeof(struct sg_iovec), | ||
2189 | sizeof(struct sg_iovec)); | ||
2190 | if (not_copied) | ||
2191 | return -EFAULT; | ||
2192 | sum_iov_len += sgl.iov_len; | ||
2193 | /* IO vector sizes should be multiples of block size */ | ||
2194 | if (sgl.iov_len % (1 << ns->lba_shift) != 0) { | ||
2195 | res = nvme_trans_completion(hdr, | ||
2196 | SAM_STAT_CHECK_CONDITION, | ||
2197 | ILLEGAL_REQUEST, | ||
2198 | SCSI_ASC_INVALID_PARAMETER, | ||
2199 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2200 | goto out; | ||
2201 | } | ||
2202 | } | ||
2203 | } else { | ||
2204 | sum_iov_len = hdr->dxfer_len; | ||
2205 | } | ||
2206 | |||
2207 | /* As Per sg ioctl howto, if the lengths differ, use the lower one */ | ||
2208 | xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len); | ||
2209 | |||
2210 | /* If block count and actual data buffer size dont match, error out */ | ||
2211 | if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) { | ||
2212 | res = -EINVAL; | ||
2213 | goto out; | ||
2214 | } | ||
2215 | |||
2216 | /* Check for 0 length transfer - it is not illegal */ | ||
2217 | if (cdb_info.xfer_len == 0) | ||
2218 | goto out; | ||
2219 | |||
2220 | /* Send NVMe IO Command(s) */ | ||
2221 | res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write); | ||
2222 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
2223 | goto out; | ||
2224 | |||
2225 | out: | ||
2226 | return res; | ||
2227 | } | ||
2228 | |||
2229 | static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2230 | u8 *cmd) | ||
2231 | { | ||
2232 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2233 | u8 evpd; | ||
2234 | u8 page_code; | ||
2235 | int alloc_len; | ||
2236 | u8 *inq_response; | ||
2237 | |||
2238 | evpd = GET_INQ_EVPD_BIT(cmd); | ||
2239 | page_code = GET_INQ_PAGE_CODE(cmd); | ||
2240 | alloc_len = GET_INQ_ALLOC_LENGTH(cmd); | ||
2241 | |||
2242 | inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL); | ||
2243 | if (inq_response == NULL) { | ||
2244 | res = -ENOMEM; | ||
2245 | goto out_mem; | ||
2246 | } | ||
2247 | |||
2248 | if (evpd == 0) { | ||
2249 | if (page_code == INQ_STANDARD_INQUIRY_PAGE) { | ||
2250 | res = nvme_trans_standard_inquiry_page(ns, hdr, | ||
2251 | inq_response, alloc_len); | ||
2252 | } else { | ||
2253 | res = nvme_trans_completion(hdr, | ||
2254 | SAM_STAT_CHECK_CONDITION, | ||
2255 | ILLEGAL_REQUEST, | ||
2256 | SCSI_ASC_INVALID_CDB, | ||
2257 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2258 | } | ||
2259 | } else { | ||
2260 | switch (page_code) { | ||
2261 | case VPD_SUPPORTED_PAGES: | ||
2262 | res = nvme_trans_supported_vpd_pages(ns, hdr, | ||
2263 | inq_response, alloc_len); | ||
2264 | break; | ||
2265 | case VPD_SERIAL_NUMBER: | ||
2266 | res = nvme_trans_unit_serial_page(ns, hdr, inq_response, | ||
2267 | alloc_len); | ||
2268 | break; | ||
2269 | case VPD_DEVICE_IDENTIFIERS: | ||
2270 | res = nvme_trans_device_id_page(ns, hdr, inq_response, | ||
2271 | alloc_len); | ||
2272 | break; | ||
2273 | case VPD_EXTENDED_INQUIRY: | ||
2274 | res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); | ||
2275 | break; | ||
2276 | case VPD_BLOCK_DEV_CHARACTERISTICS: | ||
2277 | res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); | ||
2278 | break; | ||
2279 | default: | ||
2280 | res = nvme_trans_completion(hdr, | ||
2281 | SAM_STAT_CHECK_CONDITION, | ||
2282 | ILLEGAL_REQUEST, | ||
2283 | SCSI_ASC_INVALID_CDB, | ||
2284 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2285 | break; | ||
2286 | } | ||
2287 | } | ||
2288 | kfree(inq_response); | ||
2289 | out_mem: | ||
2290 | return res; | ||
2291 | } | ||
2292 | |||
2293 | static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2294 | u8 *cmd) | ||
2295 | { | ||
2296 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2297 | u16 alloc_len; | ||
2298 | u8 sp; | ||
2299 | u8 pc; | ||
2300 | u8 page_code; | ||
2301 | |||
2302 | sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET); | ||
2303 | if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) { | ||
2304 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2305 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2306 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2307 | goto out; | ||
2308 | } | ||
2309 | pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET); | ||
2310 | page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK; | ||
2311 | pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT; | ||
2312 | if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) { | ||
2313 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2314 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2315 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2316 | goto out; | ||
2317 | } | ||
2318 | alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET); | ||
2319 | switch (page_code) { | ||
2320 | case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE: | ||
2321 | res = nvme_trans_log_supp_pages(ns, hdr, alloc_len); | ||
2322 | break; | ||
2323 | case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE: | ||
2324 | res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len); | ||
2325 | break; | ||
2326 | case LOG_PAGE_TEMPERATURE_PAGE: | ||
2327 | res = nvme_trans_log_temperature(ns, hdr, alloc_len); | ||
2328 | break; | ||
2329 | default: | ||
2330 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2331 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2332 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2333 | break; | ||
2334 | } | ||
2335 | |||
2336 | out: | ||
2337 | return res; | ||
2338 | } | ||
2339 | |||
2340 | static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2341 | u8 *cmd) | ||
2342 | { | ||
2343 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2344 | u8 cdb10 = 0; | ||
2345 | u16 parm_list_len; | ||
2346 | u8 page_format; | ||
2347 | u8 save_pages; | ||
2348 | |||
2349 | page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET); | ||
2350 | page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK; | ||
2351 | |||
2352 | save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET); | ||
2353 | save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK; | ||
2354 | |||
2355 | if (GET_OPCODE(cmd) == MODE_SELECT) { | ||
2356 | parm_list_len = GET_U8_FROM_CDB(cmd, | ||
2357 | MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET); | ||
2358 | } else { | ||
2359 | parm_list_len = GET_U16_FROM_CDB(cmd, | ||
2360 | MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET); | ||
2361 | cdb10 = 1; | ||
2362 | } | ||
2363 | |||
2364 | if (parm_list_len != 0) { | ||
2365 | /* | ||
2366 | * According to SPC-4 r24, a paramter list length field of 0 | ||
2367 | * shall not be considered an error | ||
2368 | */ | ||
2369 | res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len, | ||
2370 | page_format, save_pages, cdb10); | ||
2371 | } | ||
2372 | |||
2373 | return res; | ||
2374 | } | ||
2375 | |||
2376 | static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2377 | u8 *cmd) | ||
2378 | { | ||
2379 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2380 | u16 alloc_len; | ||
2381 | u8 cdb10 = 0; | ||
2382 | u8 page_code; | ||
2383 | u8 pc; | ||
2384 | |||
2385 | if (GET_OPCODE(cmd) == MODE_SENSE) { | ||
2386 | alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET); | ||
2387 | } else { | ||
2388 | alloc_len = GET_U16_FROM_CDB(cmd, | ||
2389 | MODE_SENSE10_ALLOC_LEN_OFFSET); | ||
2390 | cdb10 = 1; | ||
2391 | } | ||
2392 | |||
2393 | pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) & | ||
2394 | MODE_SENSE_PAGE_CONTROL_MASK; | ||
2395 | if (pc != MODE_SENSE_PC_CURRENT_VALUES) { | ||
2396 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2397 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2398 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2399 | goto out; | ||
2400 | } | ||
2401 | |||
2402 | page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) & | ||
2403 | MODE_SENSE_PAGE_CODE_MASK; | ||
2404 | switch (page_code) { | ||
2405 | case MODE_PAGE_CACHING: | ||
2406 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, | ||
2407 | cdb10, | ||
2408 | &nvme_trans_fill_caching_page, | ||
2409 | MODE_PAGE_CACHING_LEN); | ||
2410 | break; | ||
2411 | case MODE_PAGE_CONTROL: | ||
2412 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, | ||
2413 | cdb10, | ||
2414 | &nvme_trans_fill_control_page, | ||
2415 | MODE_PAGE_CONTROL_LEN); | ||
2416 | break; | ||
2417 | case MODE_PAGE_POWER_CONDITION: | ||
2418 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, | ||
2419 | cdb10, | ||
2420 | &nvme_trans_fill_pow_cnd_page, | ||
2421 | MODE_PAGE_POW_CND_LEN); | ||
2422 | break; | ||
2423 | case MODE_PAGE_INFO_EXCEP: | ||
2424 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, | ||
2425 | cdb10, | ||
2426 | &nvme_trans_fill_inf_exc_page, | ||
2427 | MODE_PAGE_INF_EXC_LEN); | ||
2428 | break; | ||
2429 | case MODE_PAGE_RETURN_ALL: | ||
2430 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, | ||
2431 | cdb10, | ||
2432 | &nvme_trans_fill_all_pages, | ||
2433 | MODE_PAGE_ALL_LEN); | ||
2434 | break; | ||
2435 | default: | ||
2436 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2437 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2438 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2439 | break; | ||
2440 | } | ||
2441 | |||
2442 | out: | ||
2443 | return res; | ||
2444 | } | ||
2445 | |||
2446 | static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2447 | u8 *cmd) | ||
2448 | { | ||
2449 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2450 | int nvme_sc; | ||
2451 | u32 alloc_len = READ_CAP_10_RESP_SIZE; | ||
2452 | u32 resp_size = READ_CAP_10_RESP_SIZE; | ||
2453 | u32 xfer_len; | ||
2454 | u8 cdb16; | ||
2455 | struct nvme_dev *dev = ns->dev; | ||
2456 | dma_addr_t dma_addr; | ||
2457 | void *mem; | ||
2458 | struct nvme_id_ns *id_ns; | ||
2459 | u8 *response; | ||
2460 | |||
2461 | cdb16 = IS_READ_CAP_16(cmd); | ||
2462 | if (cdb16) { | ||
2463 | alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd); | ||
2464 | resp_size = READ_CAP_16_RESP_SIZE; | ||
2465 | } | ||
2466 | |||
2467 | mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), | ||
2468 | &dma_addr, GFP_KERNEL); | ||
2469 | if (mem == NULL) { | ||
2470 | res = -ENOMEM; | ||
2471 | goto out; | ||
2472 | } | ||
2473 | /* nvme ns identify */ | ||
2474 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); | ||
2475 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
2476 | if (res) | ||
2477 | goto out_dma; | ||
2478 | if (nvme_sc) { | ||
2479 | res = nvme_sc; | ||
2480 | goto out_dma; | ||
2481 | } | ||
2482 | id_ns = mem; | ||
2483 | |||
2484 | response = kmalloc(resp_size, GFP_KERNEL); | ||
2485 | if (response == NULL) { | ||
2486 | res = -ENOMEM; | ||
2487 | goto out_dma; | ||
2488 | } | ||
2489 | memset(response, 0, resp_size); | ||
2490 | nvme_trans_fill_read_cap(response, id_ns, cdb16); | ||
2491 | |||
2492 | xfer_len = min(alloc_len, resp_size); | ||
2493 | res = nvme_trans_copy_to_user(hdr, response, xfer_len); | ||
2494 | |||
2495 | kfree(response); | ||
2496 | out_dma: | ||
2497 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, | ||
2498 | dma_addr); | ||
2499 | out: | ||
2500 | return res; | ||
2501 | } | ||
2502 | |||
2503 | static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2504 | u8 *cmd) | ||
2505 | { | ||
2506 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2507 | int nvme_sc; | ||
2508 | u32 alloc_len, xfer_len, resp_size; | ||
2509 | u8 select_report; | ||
2510 | u8 *response; | ||
2511 | struct nvme_dev *dev = ns->dev; | ||
2512 | dma_addr_t dma_addr; | ||
2513 | void *mem; | ||
2514 | struct nvme_id_ctrl *id_ctrl; | ||
2515 | u32 ll_length, lun_id; | ||
2516 | u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET; | ||
2517 | __be32 tmp_len; | ||
2518 | |||
2519 | alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd); | ||
2520 | select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET); | ||
2521 | |||
2522 | if ((select_report != ALL_LUNS_RETURNED) && | ||
2523 | (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) && | ||
2524 | (select_report != RESTRICTED_LUNS_RETURNED)) { | ||
2525 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2526 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2527 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2528 | goto out; | ||
2529 | } else { | ||
2530 | /* NVMe Controller Identify */ | ||
2531 | mem = dma_alloc_coherent(&dev->pci_dev->dev, | ||
2532 | sizeof(struct nvme_id_ctrl), | ||
2533 | &dma_addr, GFP_KERNEL); | ||
2534 | if (mem == NULL) { | ||
2535 | res = -ENOMEM; | ||
2536 | goto out; | ||
2537 | } | ||
2538 | nvme_sc = nvme_identify(dev, 0, 1, dma_addr); | ||
2539 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
2540 | if (res) | ||
2541 | goto out_dma; | ||
2542 | if (nvme_sc) { | ||
2543 | res = nvme_sc; | ||
2544 | goto out_dma; | ||
2545 | } | ||
2546 | id_ctrl = mem; | ||
2547 | ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE; | ||
2548 | resp_size = ll_length + LUN_DATA_HEADER_SIZE; | ||
2549 | |||
2550 | if (alloc_len < resp_size) { | ||
2551 | res = nvme_trans_completion(hdr, | ||
2552 | SAM_STAT_CHECK_CONDITION, | ||
2553 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2554 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2555 | goto out_dma; | ||
2556 | } | ||
2557 | |||
2558 | response = kmalloc(resp_size, GFP_KERNEL); | ||
2559 | if (response == NULL) { | ||
2560 | res = -ENOMEM; | ||
2561 | goto out_dma; | ||
2562 | } | ||
2563 | memset(response, 0, resp_size); | ||
2564 | |||
2565 | /* The first LUN ID will always be 0 per the SAM spec */ | ||
2566 | for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) { | ||
2567 | /* | ||
2568 | * Set the LUN Id and then increment to the next LUN | ||
2569 | * location in the parameter data. | ||
2570 | */ | ||
2571 | __be64 tmp_id = cpu_to_be64(lun_id); | ||
2572 | memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64)); | ||
2573 | lun_id_offset += LUN_ENTRY_SIZE; | ||
2574 | } | ||
2575 | tmp_len = cpu_to_be32(ll_length); | ||
2576 | memcpy(response, &tmp_len, sizeof(u32)); | ||
2577 | } | ||
2578 | |||
2579 | xfer_len = min(alloc_len, resp_size); | ||
2580 | res = nvme_trans_copy_to_user(hdr, response, xfer_len); | ||
2581 | |||
2582 | kfree(response); | ||
2583 | out_dma: | ||
2584 | dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, | ||
2585 | dma_addr); | ||
2586 | out: | ||
2587 | return res; | ||
2588 | } | ||
2589 | |||
2590 | static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2591 | u8 *cmd) | ||
2592 | { | ||
2593 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2594 | u8 alloc_len, xfer_len, resp_size; | ||
2595 | u8 desc_format; | ||
2596 | u8 *response; | ||
2597 | |||
2598 | alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd); | ||
2599 | desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET); | ||
2600 | desc_format &= REQUEST_SENSE_DESC_MASK; | ||
2601 | |||
2602 | resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) : | ||
2603 | (FIXED_FMT_SENSE_DATA_SIZE)); | ||
2604 | response = kmalloc(resp_size, GFP_KERNEL); | ||
2605 | if (response == NULL) { | ||
2606 | res = -ENOMEM; | ||
2607 | goto out; | ||
2608 | } | ||
2609 | memset(response, 0, resp_size); | ||
2610 | |||
2611 | if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) { | ||
2612 | /* Descriptor Format Sense Data */ | ||
2613 | response[0] = DESC_FORMAT_SENSE_DATA; | ||
2614 | response[1] = NO_SENSE; | ||
2615 | /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */ | ||
2616 | response[2] = SCSI_ASC_NO_SENSE; | ||
2617 | response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
2618 | /* SDAT_OVFL = 0 | Additional Sense Length = 0 */ | ||
2619 | } else { | ||
2620 | /* Fixed Format Sense Data */ | ||
2621 | response[0] = FIXED_SENSE_DATA; | ||
2622 | /* Byte 1 = Obsolete */ | ||
2623 | response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */ | ||
2624 | /* Bytes 3-6 - Information - set to zero */ | ||
2625 | response[7] = FIXED_SENSE_DATA_ADD_LENGTH; | ||
2626 | /* Bytes 8-11 - Cmd Specific Information - set to zero */ | ||
2627 | response[12] = SCSI_ASC_NO_SENSE; | ||
2628 | response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; | ||
2629 | /* Byte 14 = Field Replaceable Unit Code = 0 */ | ||
2630 | /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */ | ||
2631 | } | ||
2632 | |||
2633 | xfer_len = min(alloc_len, resp_size); | ||
2634 | res = nvme_trans_copy_to_user(hdr, response, xfer_len); | ||
2635 | |||
2636 | kfree(response); | ||
2637 | out: | ||
2638 | return res; | ||
2639 | } | ||
2640 | |||
2641 | static int nvme_trans_security_protocol(struct nvme_ns *ns, | ||
2642 | struct sg_io_hdr *hdr, | ||
2643 | u8 *cmd) | ||
2644 | { | ||
2645 | return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2646 | ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, | ||
2647 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2648 | } | ||
2649 | |||
2650 | static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2651 | u8 *cmd) | ||
2652 | { | ||
2653 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2654 | int nvme_sc; | ||
2655 | struct nvme_queue *nvmeq; | ||
2656 | struct nvme_command c; | ||
2657 | u8 immed, pcmod, pc, no_flush, start; | ||
2658 | |||
2659 | immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET); | ||
2660 | pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET); | ||
2661 | pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET); | ||
2662 | no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET); | ||
2663 | start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET); | ||
2664 | |||
2665 | immed &= START_STOP_UNIT_CDB_IMMED_MASK; | ||
2666 | pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK; | ||
2667 | pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT; | ||
2668 | no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK; | ||
2669 | start &= START_STOP_UNIT_CDB_START_MASK; | ||
2670 | |||
2671 | if (immed != 0) { | ||
2672 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2673 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2674 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2675 | } else { | ||
2676 | if (no_flush == 0) { | ||
2677 | /* Issue NVME FLUSH command prior to START STOP UNIT */ | ||
2678 | memset(&c, 0, sizeof(c)); | ||
2679 | c.common.opcode = nvme_cmd_flush; | ||
2680 | c.common.nsid = cpu_to_le32(ns->ns_id); | ||
2681 | |||
2682 | nvmeq = get_nvmeq(ns->dev); | ||
2683 | put_nvmeq(nvmeq); | ||
2684 | nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); | ||
2685 | |||
2686 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
2687 | if (res) | ||
2688 | goto out; | ||
2689 | if (nvme_sc) { | ||
2690 | res = nvme_sc; | ||
2691 | goto out; | ||
2692 | } | ||
2693 | } | ||
2694 | /* Setup the expected power state transition */ | ||
2695 | res = nvme_trans_power_state(ns, hdr, pc, pcmod, start); | ||
2696 | } | ||
2697 | |||
2698 | out: | ||
2699 | return res; | ||
2700 | } | ||
2701 | |||
2702 | static int nvme_trans_synchronize_cache(struct nvme_ns *ns, | ||
2703 | struct sg_io_hdr *hdr, u8 *cmd) | ||
2704 | { | ||
2705 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2706 | int nvme_sc; | ||
2707 | struct nvme_command c; | ||
2708 | struct nvme_queue *nvmeq; | ||
2709 | |||
2710 | memset(&c, 0, sizeof(c)); | ||
2711 | c.common.opcode = nvme_cmd_flush; | ||
2712 | c.common.nsid = cpu_to_le32(ns->ns_id); | ||
2713 | |||
2714 | nvmeq = get_nvmeq(ns->dev); | ||
2715 | put_nvmeq(nvmeq); | ||
2716 | nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); | ||
2717 | |||
2718 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
2719 | if (res) | ||
2720 | goto out; | ||
2721 | if (nvme_sc) | ||
2722 | res = nvme_sc; | ||
2723 | |||
2724 | out: | ||
2725 | return res; | ||
2726 | } | ||
2727 | |||
2728 | static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2729 | u8 *cmd) | ||
2730 | { | ||
2731 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2732 | u8 parm_hdr_len = 0; | ||
2733 | u8 nvme_pf_code = 0; | ||
2734 | u8 format_prot_info, long_list, format_data; | ||
2735 | |||
2736 | format_prot_info = GET_U8_FROM_CDB(cmd, | ||
2737 | FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET); | ||
2738 | long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET); | ||
2739 | format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET); | ||
2740 | |||
2741 | format_prot_info = (format_prot_info & | ||
2742 | FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >> | ||
2743 | FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT; | ||
2744 | long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK; | ||
2745 | format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK; | ||
2746 | |||
2747 | if (format_data != 0) { | ||
2748 | if (format_prot_info != 0) { | ||
2749 | if (long_list == 0) | ||
2750 | parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN; | ||
2751 | else | ||
2752 | parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN; | ||
2753 | } | ||
2754 | } else if (format_data == 0 && format_prot_info != 0) { | ||
2755 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2756 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2757 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2758 | goto out; | ||
2759 | } | ||
2760 | |||
2761 | /* Get parm header from data-in/out buffer */ | ||
2762 | /* | ||
2763 | * According to the translation spec, the only fields in the parameter | ||
2764 | * list we are concerned with are in the header. So allocate only that. | ||
2765 | */ | ||
2766 | if (parm_hdr_len > 0) { | ||
2767 | res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len, | ||
2768 | format_prot_info, &nvme_pf_code); | ||
2769 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
2770 | goto out; | ||
2771 | } | ||
2772 | |||
2773 | /* Attempt to activate any previously downloaded firmware image */ | ||
2774 | res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0); | ||
2775 | |||
2776 | /* Determine Block size and count and send format command */ | ||
2777 | res = nvme_trans_fmt_set_blk_size_count(ns, hdr); | ||
2778 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
2779 | goto out; | ||
2780 | |||
2781 | res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code); | ||
2782 | |||
2783 | out: | ||
2784 | return res; | ||
2785 | } | ||
2786 | |||
2787 | static int nvme_trans_test_unit_ready(struct nvme_ns *ns, | ||
2788 | struct sg_io_hdr *hdr, | ||
2789 | u8 *cmd) | ||
2790 | { | ||
2791 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2792 | struct nvme_dev *dev = ns->dev; | ||
2793 | |||
2794 | if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) | ||
2795 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2796 | NOT_READY, SCSI_ASC_LUN_NOT_READY, | ||
2797 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2798 | else | ||
2799 | res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0); | ||
2800 | |||
2801 | return res; | ||
2802 | } | ||
2803 | |||
2804 | static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2805 | u8 *cmd) | ||
2806 | { | ||
2807 | int res = SNTI_TRANSLATION_SUCCESS; | ||
2808 | u32 buffer_offset, parm_list_length; | ||
2809 | u8 buffer_id, mode; | ||
2810 | |||
2811 | parm_list_length = | ||
2812 | GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET); | ||
2813 | if (parm_list_length % BYTES_TO_DWORDS != 0) { | ||
2814 | /* NVMe expects Firmware file to be a whole number of DWORDS */ | ||
2815 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2816 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2817 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2818 | goto out; | ||
2819 | } | ||
2820 | buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET); | ||
2821 | if (buffer_id > NVME_MAX_FIRMWARE_SLOT) { | ||
2822 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2823 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2824 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2825 | goto out; | ||
2826 | } | ||
2827 | mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) & | ||
2828 | WRITE_BUFFER_CDB_MODE_MASK; | ||
2829 | buffer_offset = | ||
2830 | GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET); | ||
2831 | |||
2832 | switch (mode) { | ||
2833 | case DOWNLOAD_SAVE_ACTIVATE: | ||
2834 | res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, | ||
2835 | parm_list_length, buffer_offset, | ||
2836 | buffer_id); | ||
2837 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
2838 | goto out; | ||
2839 | res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, | ||
2840 | parm_list_length, buffer_offset, | ||
2841 | buffer_id); | ||
2842 | break; | ||
2843 | case DOWNLOAD_SAVE_DEFER_ACTIVATE: | ||
2844 | res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, | ||
2845 | parm_list_length, buffer_offset, | ||
2846 | buffer_id); | ||
2847 | break; | ||
2848 | case ACTIVATE_DEFERRED_MICROCODE: | ||
2849 | res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, | ||
2850 | parm_list_length, buffer_offset, | ||
2851 | buffer_id); | ||
2852 | break; | ||
2853 | default: | ||
2854 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2855 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, | ||
2856 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2857 | break; | ||
2858 | } | ||
2859 | |||
2860 | out: | ||
2861 | return res; | ||
2862 | } | ||
2863 | |||
2864 | struct scsi_unmap_blk_desc { | ||
2865 | __be64 slba; | ||
2866 | __be32 nlb; | ||
2867 | u32 resv; | ||
2868 | }; | ||
2869 | |||
2870 | struct scsi_unmap_parm_list { | ||
2871 | __be16 unmap_data_len; | ||
2872 | __be16 unmap_blk_desc_data_len; | ||
2873 | u32 resv; | ||
2874 | struct scsi_unmap_blk_desc desc[0]; | ||
2875 | }; | ||
2876 | |||
2877 | static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, | ||
2878 | u8 *cmd) | ||
2879 | { | ||
2880 | struct nvme_dev *dev = ns->dev; | ||
2881 | struct scsi_unmap_parm_list *plist; | ||
2882 | struct nvme_dsm_range *range; | ||
2883 | struct nvme_queue *nvmeq; | ||
2884 | struct nvme_command c; | ||
2885 | int i, nvme_sc, res = -ENOMEM; | ||
2886 | u16 ndesc, list_len; | ||
2887 | dma_addr_t dma_addr; | ||
2888 | |||
2889 | list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET); | ||
2890 | if (!list_len) | ||
2891 | return -EINVAL; | ||
2892 | |||
2893 | plist = kmalloc(list_len, GFP_KERNEL); | ||
2894 | if (!plist) | ||
2895 | return -ENOMEM; | ||
2896 | |||
2897 | res = nvme_trans_copy_from_user(hdr, plist, list_len); | ||
2898 | if (res != SNTI_TRANSLATION_SUCCESS) | ||
2899 | goto out; | ||
2900 | |||
2901 | ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4; | ||
2902 | if (!ndesc || ndesc > 256) { | ||
2903 | res = -EINVAL; | ||
2904 | goto out; | ||
2905 | } | ||
2906 | |||
2907 | range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), | ||
2908 | &dma_addr, GFP_KERNEL); | ||
2909 | if (!range) | ||
2910 | goto out; | ||
2911 | |||
2912 | for (i = 0; i < ndesc; i++) { | ||
2913 | range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb)); | ||
2914 | range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba)); | ||
2915 | range[i].cattr = 0; | ||
2916 | } | ||
2917 | |||
2918 | memset(&c, 0, sizeof(c)); | ||
2919 | c.dsm.opcode = nvme_cmd_dsm; | ||
2920 | c.dsm.nsid = cpu_to_le32(ns->ns_id); | ||
2921 | c.dsm.prp1 = cpu_to_le64(dma_addr); | ||
2922 | c.dsm.nr = cpu_to_le32(ndesc - 1); | ||
2923 | c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); | ||
2924 | |||
2925 | nvmeq = get_nvmeq(dev); | ||
2926 | put_nvmeq(nvmeq); | ||
2927 | |||
2928 | nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); | ||
2929 | res = nvme_trans_status_code(hdr, nvme_sc); | ||
2930 | |||
2931 | dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), | ||
2932 | range, dma_addr); | ||
2933 | out: | ||
2934 | kfree(plist); | ||
2935 | return res; | ||
2936 | } | ||
2937 | |||
2938 | static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) | ||
2939 | { | ||
2940 | u8 cmd[BLK_MAX_CDB]; | ||
2941 | int retcode; | ||
2942 | unsigned int opcode; | ||
2943 | |||
2944 | if (hdr->cmdp == NULL) | ||
2945 | return -EMSGSIZE; | ||
2946 | if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) | ||
2947 | return -EFAULT; | ||
2948 | |||
2949 | opcode = cmd[0]; | ||
2950 | |||
2951 | switch (opcode) { | ||
2952 | case READ_6: | ||
2953 | case READ_10: | ||
2954 | case READ_12: | ||
2955 | case READ_16: | ||
2956 | retcode = nvme_trans_io(ns, hdr, 0, cmd); | ||
2957 | break; | ||
2958 | case WRITE_6: | ||
2959 | case WRITE_10: | ||
2960 | case WRITE_12: | ||
2961 | case WRITE_16: | ||
2962 | retcode = nvme_trans_io(ns, hdr, 1, cmd); | ||
2963 | break; | ||
2964 | case INQUIRY: | ||
2965 | retcode = nvme_trans_inquiry(ns, hdr, cmd); | ||
2966 | break; | ||
2967 | case LOG_SENSE: | ||
2968 | retcode = nvme_trans_log_sense(ns, hdr, cmd); | ||
2969 | break; | ||
2970 | case MODE_SELECT: | ||
2971 | case MODE_SELECT_10: | ||
2972 | retcode = nvme_trans_mode_select(ns, hdr, cmd); | ||
2973 | break; | ||
2974 | case MODE_SENSE: | ||
2975 | case MODE_SENSE_10: | ||
2976 | retcode = nvme_trans_mode_sense(ns, hdr, cmd); | ||
2977 | break; | ||
2978 | case READ_CAPACITY: | ||
2979 | retcode = nvme_trans_read_capacity(ns, hdr, cmd); | ||
2980 | break; | ||
2981 | case SERVICE_ACTION_IN: | ||
2982 | if (IS_READ_CAP_16(cmd)) | ||
2983 | retcode = nvme_trans_read_capacity(ns, hdr, cmd); | ||
2984 | else | ||
2985 | goto out; | ||
2986 | break; | ||
2987 | case REPORT_LUNS: | ||
2988 | retcode = nvme_trans_report_luns(ns, hdr, cmd); | ||
2989 | break; | ||
2990 | case REQUEST_SENSE: | ||
2991 | retcode = nvme_trans_request_sense(ns, hdr, cmd); | ||
2992 | break; | ||
2993 | case SECURITY_PROTOCOL_IN: | ||
2994 | case SECURITY_PROTOCOL_OUT: | ||
2995 | retcode = nvme_trans_security_protocol(ns, hdr, cmd); | ||
2996 | break; | ||
2997 | case START_STOP: | ||
2998 | retcode = nvme_trans_start_stop(ns, hdr, cmd); | ||
2999 | break; | ||
3000 | case SYNCHRONIZE_CACHE: | ||
3001 | retcode = nvme_trans_synchronize_cache(ns, hdr, cmd); | ||
3002 | break; | ||
3003 | case FORMAT_UNIT: | ||
3004 | retcode = nvme_trans_format_unit(ns, hdr, cmd); | ||
3005 | break; | ||
3006 | case TEST_UNIT_READY: | ||
3007 | retcode = nvme_trans_test_unit_ready(ns, hdr, cmd); | ||
3008 | break; | ||
3009 | case WRITE_BUFFER: | ||
3010 | retcode = nvme_trans_write_buffer(ns, hdr, cmd); | ||
3011 | break; | ||
3012 | case UNMAP: | ||
3013 | retcode = nvme_trans_unmap(ns, hdr, cmd); | ||
3014 | break; | ||
3015 | default: | ||
3016 | out: | ||
3017 | retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
3018 | ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, | ||
3019 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
3020 | break; | ||
3021 | } | ||
3022 | return retcode; | ||
3023 | } | ||
3024 | |||
3025 | int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) | ||
3026 | { | ||
3027 | struct sg_io_hdr hdr; | ||
3028 | int retcode; | ||
3029 | |||
3030 | if (!capable(CAP_SYS_ADMIN)) | ||
3031 | return -EACCES; | ||
3032 | if (copy_from_user(&hdr, u_hdr, sizeof(hdr))) | ||
3033 | return -EFAULT; | ||
3034 | if (hdr.interface_id != 'S') | ||
3035 | return -EINVAL; | ||
3036 | if (hdr.cmd_len > BLK_MAX_CDB) | ||
3037 | return -EINVAL; | ||
3038 | |||
3039 | retcode = nvme_scsi_translate(ns, &hdr); | ||
3040 | if (retcode < 0) | ||
3041 | return retcode; | ||
3042 | if (retcode > 0) | ||
3043 | retcode = SNTI_TRANSLATION_SUCCESS; | ||
3044 | if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0) | ||
3045 | return -EFAULT; | ||
3046 | |||
3047 | return retcode; | ||
3048 | } | ||
3049 | |||
3050 | int nvme_sg_get_version_num(int __user *ip) | ||
3051 | { | ||
3052 | return put_user(sg_version_num, ip); | ||
3053 | } | ||