diff options
-rw-r--r-- | drivers/block/nvme-core.c | 298 | ||||
-rw-r--r-- | include/linux/nvme.h | 2 | ||||
-rw-r--r-- | include/uapi/linux/nvme.h | 16 |
3 files changed, 249 insertions, 67 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index cbdfbbf98392..3ffa57a932ea 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/ptrace.h> | 37 | #include <linux/ptrace.h> |
38 | #include <linux/sched.h> | 38 | #include <linux/sched.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/t10-pi.h> | ||
40 | #include <linux/types.h> | 41 | #include <linux/types.h> |
41 | #include <scsi/sg.h> | 42 | #include <scsi/sg.h> |
42 | #include <asm-generic/io-64-nonatomic-lo-hi.h> | 43 | #include <asm-generic/io-64-nonatomic-lo-hi.h> |
@@ -482,6 +483,62 @@ static int nvme_error_status(u16 status) | |||
482 | } | 483 | } |
483 | } | 484 | } |
484 | 485 | ||
486 | static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) | ||
487 | { | ||
488 | if (be32_to_cpu(pi->ref_tag) == v) | ||
489 | pi->ref_tag = cpu_to_be32(p); | ||
490 | } | ||
491 | |||
492 | static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) | ||
493 | { | ||
494 | if (be32_to_cpu(pi->ref_tag) == p) | ||
495 | pi->ref_tag = cpu_to_be32(v); | ||
496 | } | ||
497 | |||
498 | /** | ||
499 | * nvme_dif_remap - remaps ref tags to bip seed and physical lba | ||
500 | * | ||
501 | * The virtual start sector is the one that was originally submitted by the | ||
502 | * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical | ||
503 | * start sector may be different. Remap protection information to match the | ||
504 | * physical LBA on writes, and back to the original seed on reads. | ||
505 | * | ||
506 | * Type 0 and 3 do not have a ref tag, so no remapping required. | ||
507 | */ | ||
508 | static void nvme_dif_remap(struct request *req, | ||
509 | void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) | ||
510 | { | ||
511 | struct nvme_ns *ns = req->rq_disk->private_data; | ||
512 | struct bio_integrity_payload *bip; | ||
513 | struct t10_pi_tuple *pi; | ||
514 | void *p, *pmap; | ||
515 | u32 i, nlb, ts, phys, virt; | ||
516 | |||
517 | if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) | ||
518 | return; | ||
519 | |||
520 | bip = bio_integrity(req->bio); | ||
521 | if (!bip) | ||
522 | return; | ||
523 | |||
524 | pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; | ||
525 | if (!pmap) | ||
526 | return; | ||
527 | |||
528 | p = pmap; | ||
529 | virt = bip_get_seed(bip); | ||
530 | phys = nvme_block_nr(ns, blk_rq_pos(req)); | ||
531 | nlb = (blk_rq_bytes(req) >> ns->lba_shift); | ||
532 | ts = ns->disk->integrity->tuple_size; | ||
533 | |||
534 | for (i = 0; i < nlb; i++, virt++, phys++) { | ||
535 | pi = (struct t10_pi_tuple *)p; | ||
536 | dif_swap(phys, virt, pi); | ||
537 | p += ts; | ||
538 | } | ||
539 | kunmap_atomic(pmap); | ||
540 | } | ||
541 | |||
485 | static void req_completion(struct nvme_queue *nvmeq, void *ctx, | 542 | static void req_completion(struct nvme_queue *nvmeq, void *ctx, |
486 | struct nvme_completion *cqe) | 543 | struct nvme_completion *cqe) |
487 | { | 544 | { |
@@ -512,9 +569,16 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, | |||
512 | "completing aborted command with status:%04x\n", | 569 | "completing aborted command with status:%04x\n", |
513 | status); | 570 | status); |
514 | 571 | ||
515 | if (iod->nents) | 572 | if (iod->nents) { |
516 | dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, | 573 | dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, |
517 | rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 574 | rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
575 | if (blk_integrity_rq(req)) { | ||
576 | if (!rq_data_dir(req)) | ||
577 | nvme_dif_remap(req, nvme_dif_complete); | ||
578 | dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1, | ||
579 | rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
580 | } | ||
581 | } | ||
518 | nvme_free_iod(nvmeq->dev, iod); | 582 | nvme_free_iod(nvmeq->dev, iod); |
519 | 583 | ||
520 | blk_mq_complete_request(req); | 584 | blk_mq_complete_request(req); |
@@ -670,6 +734,24 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, | |||
670 | cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); | 734 | cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); |
671 | cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); | 735 | cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); |
672 | cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); | 736 | cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); |
737 | |||
738 | if (blk_integrity_rq(req)) { | ||
739 | cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg)); | ||
740 | switch (ns->pi_type) { | ||
741 | case NVME_NS_DPS_PI_TYPE3: | ||
742 | control |= NVME_RW_PRINFO_PRCHK_GUARD; | ||
743 | break; | ||
744 | case NVME_NS_DPS_PI_TYPE1: | ||
745 | case NVME_NS_DPS_PI_TYPE2: | ||
746 | control |= NVME_RW_PRINFO_PRCHK_GUARD | | ||
747 | NVME_RW_PRINFO_PRCHK_REF; | ||
748 | cmnd->rw.reftag = cpu_to_le32( | ||
749 | nvme_block_nr(ns, blk_rq_pos(req))); | ||
750 | break; | ||
751 | } | ||
752 | } else if (ns->ms) | ||
753 | control |= NVME_RW_PRINFO_PRACT; | ||
754 | |||
673 | cmnd->rw.control = cpu_to_le16(control); | 755 | cmnd->rw.control = cpu_to_le16(control); |
674 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); | 756 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); |
675 | 757 | ||
@@ -690,6 +772,19 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
690 | struct nvme_iod *iod; | 772 | struct nvme_iod *iod; |
691 | enum dma_data_direction dma_dir; | 773 | enum dma_data_direction dma_dir; |
692 | 774 | ||
775 | /* | ||
776 | * If formated with metadata, require the block layer provide a buffer | ||
777 | * unless this namespace is formated such that the metadata can be | ||
778 | * stripped/generated by the controller with PRACT=1. | ||
779 | */ | ||
780 | if (ns->ms && !blk_integrity_rq(req)) { | ||
781 | if (!(ns->pi_type && ns->ms == 8)) { | ||
782 | req->errors = -EFAULT; | ||
783 | blk_mq_complete_request(req); | ||
784 | return BLK_MQ_RQ_QUEUE_OK; | ||
785 | } | ||
786 | } | ||
787 | |||
693 | iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC); | 788 | iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC); |
694 | if (!iod) | 789 | if (!iod) |
695 | return BLK_MQ_RQ_QUEUE_BUSY; | 790 | return BLK_MQ_RQ_QUEUE_BUSY; |
@@ -725,6 +820,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
725 | iod->nents, dma_dir); | 820 | iod->nents, dma_dir); |
726 | goto retry_cmd; | 821 | goto retry_cmd; |
727 | } | 822 | } |
823 | if (blk_integrity_rq(req)) { | ||
824 | if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) | ||
825 | goto error_cmd; | ||
826 | |||
827 | sg_init_table(iod->meta_sg, 1); | ||
828 | if (blk_rq_map_integrity_sg( | ||
829 | req->q, req->bio, iod->meta_sg) != 1) | ||
830 | goto error_cmd; | ||
831 | |||
832 | if (rq_data_dir(req)) | ||
833 | nvme_dif_remap(req, nvme_dif_prep); | ||
834 | |||
835 | if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) | ||
836 | goto error_cmd; | ||
837 | } | ||
728 | } | 838 | } |
729 | 839 | ||
730 | nvme_set_info(cmd, iod, req_completion); | 840 | nvme_set_info(cmd, iod, req_completion); |
@@ -1875,13 +1985,61 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo) | |||
1875 | return 0; | 1985 | return 0; |
1876 | } | 1986 | } |
1877 | 1987 | ||
1988 | static void nvme_config_discard(struct nvme_ns *ns) | ||
1989 | { | ||
1990 | u32 logical_block_size = queue_logical_block_size(ns->queue); | ||
1991 | ns->queue->limits.discard_zeroes_data = 0; | ||
1992 | ns->queue->limits.discard_alignment = logical_block_size; | ||
1993 | ns->queue->limits.discard_granularity = logical_block_size; | ||
1994 | ns->queue->limits.max_discard_sectors = 0xffffffff; | ||
1995 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); | ||
1996 | } | ||
1997 | |||
1998 | static int nvme_noop_verify(struct blk_integrity_iter *iter) | ||
1999 | { | ||
2000 | return 0; | ||
2001 | } | ||
2002 | |||
2003 | static int nvme_noop_generate(struct blk_integrity_iter *iter) | ||
2004 | { | ||
2005 | return 0; | ||
2006 | } | ||
2007 | |||
2008 | struct blk_integrity nvme_meta_noop = { | ||
2009 | .name = "NVME_META_NOOP", | ||
2010 | .generate_fn = nvme_noop_generate, | ||
2011 | .verify_fn = nvme_noop_verify, | ||
2012 | }; | ||
2013 | |||
2014 | static void nvme_init_integrity(struct nvme_ns *ns) | ||
2015 | { | ||
2016 | struct blk_integrity integrity; | ||
2017 | |||
2018 | switch (ns->pi_type) { | ||
2019 | case NVME_NS_DPS_PI_TYPE3: | ||
2020 | integrity = t10_pi_type3_crc; | ||
2021 | break; | ||
2022 | case NVME_NS_DPS_PI_TYPE1: | ||
2023 | case NVME_NS_DPS_PI_TYPE2: | ||
2024 | integrity = t10_pi_type1_crc; | ||
2025 | break; | ||
2026 | default: | ||
2027 | integrity = nvme_meta_noop; | ||
2028 | break; | ||
2029 | } | ||
2030 | integrity.tuple_size = ns->ms; | ||
2031 | blk_integrity_register(ns->disk, &integrity); | ||
2032 | blk_queue_max_integrity_segments(ns->queue, 1); | ||
2033 | } | ||
2034 | |||
1878 | static int nvme_revalidate_disk(struct gendisk *disk) | 2035 | static int nvme_revalidate_disk(struct gendisk *disk) |
1879 | { | 2036 | { |
1880 | struct nvme_ns *ns = disk->private_data; | 2037 | struct nvme_ns *ns = disk->private_data; |
1881 | struct nvme_dev *dev = ns->dev; | 2038 | struct nvme_dev *dev = ns->dev; |
1882 | struct nvme_id_ns *id; | 2039 | struct nvme_id_ns *id; |
1883 | dma_addr_t dma_addr; | 2040 | dma_addr_t dma_addr; |
1884 | int lbaf; | 2041 | int lbaf, pi_type, old_ms; |
2042 | unsigned short bs; | ||
1885 | 2043 | ||
1886 | id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, | 2044 | id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, |
1887 | GFP_KERNEL); | 2045 | GFP_KERNEL); |
@@ -1890,16 +2048,50 @@ static int nvme_revalidate_disk(struct gendisk *disk) | |||
1890 | __func__); | 2048 | __func__); |
1891 | return 0; | 2049 | return 0; |
1892 | } | 2050 | } |
2051 | if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) { | ||
2052 | dev_warn(&dev->pci_dev->dev, | ||
2053 | "identify failed ns:%d, setting capacity to 0\n", | ||
2054 | ns->ns_id); | ||
2055 | memset(id, 0, sizeof(*id)); | ||
2056 | } | ||
1893 | 2057 | ||
1894 | if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) | 2058 | old_ms = ns->ms; |
1895 | goto free; | 2059 | lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; |
1896 | |||
1897 | lbaf = id->flbas & 0xf; | ||
1898 | ns->lba_shift = id->lbaf[lbaf].ds; | 2060 | ns->lba_shift = id->lbaf[lbaf].ds; |
2061 | ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); | ||
2062 | |||
2063 | /* | ||
2064 | * If identify namespace failed, use default 512 byte block size so | ||
2065 | * block layer can use before failing read/write for 0 capacity. | ||
2066 | */ | ||
2067 | if (ns->lba_shift == 0) | ||
2068 | ns->lba_shift = 9; | ||
2069 | bs = 1 << ns->lba_shift; | ||
2070 | |||
2071 | /* XXX: PI implementation requires metadata equal t10 pi tuple size */ | ||
2072 | pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? | ||
2073 | id->dps & NVME_NS_DPS_PI_MASK : 0; | ||
2074 | |||
2075 | if (disk->integrity && (ns->pi_type != pi_type || ns->ms != old_ms || | ||
2076 | bs != queue_logical_block_size(disk->queue) || | ||
2077 | (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) | ||
2078 | blk_integrity_unregister(disk); | ||
2079 | |||
2080 | ns->pi_type = pi_type; | ||
2081 | blk_queue_logical_block_size(ns->queue, bs); | ||
2082 | |||
2083 | if (ns->ms && !disk->integrity && (disk->flags & GENHD_FL_UP) && | ||
2084 | !(id->flbas & NVME_NS_FLBAS_META_EXT)) | ||
2085 | nvme_init_integrity(ns); | ||
2086 | |||
2087 | if (id->ncap == 0 || (ns->ms && !disk->integrity)) | ||
2088 | set_capacity(disk, 0); | ||
2089 | else | ||
2090 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); | ||
2091 | |||
2092 | if (dev->oncs & NVME_CTRL_ONCS_DSM) | ||
2093 | nvme_config_discard(ns); | ||
1899 | 2094 | ||
1900 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); | ||
1901 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); | ||
1902 | free: | ||
1903 | dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); | 2095 | dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); |
1904 | return 0; | 2096 | return 0; |
1905 | } | 2097 | } |
@@ -1956,30 +2148,16 @@ static int nvme_kthread(void *data) | |||
1956 | return 0; | 2148 | return 0; |
1957 | } | 2149 | } |
1958 | 2150 | ||
1959 | static void nvme_config_discard(struct nvme_ns *ns) | 2151 | static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) |
1960 | { | ||
1961 | u32 logical_block_size = queue_logical_block_size(ns->queue); | ||
1962 | ns->queue->limits.discard_zeroes_data = 0; | ||
1963 | ns->queue->limits.discard_alignment = logical_block_size; | ||
1964 | ns->queue->limits.discard_granularity = logical_block_size; | ||
1965 | ns->queue->limits.max_discard_sectors = 0xffffffff; | ||
1966 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); | ||
1967 | } | ||
1968 | |||
1969 | static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, | ||
1970 | struct nvme_id_ns *id, struct nvme_lba_range_type *rt) | ||
1971 | { | 2152 | { |
1972 | struct nvme_ns *ns; | 2153 | struct nvme_ns *ns; |
1973 | struct gendisk *disk; | 2154 | struct gendisk *disk; |
1974 | int node = dev_to_node(&dev->pci_dev->dev); | 2155 | int node = dev_to_node(&dev->pci_dev->dev); |
1975 | int lbaf; | ||
1976 | |||
1977 | if (rt->attributes & NVME_LBART_ATTRIB_HIDE) | ||
1978 | return NULL; | ||
1979 | 2156 | ||
1980 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); | 2157 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); |
1981 | if (!ns) | 2158 | if (!ns) |
1982 | return NULL; | 2159 | return; |
2160 | |||
1983 | ns->queue = blk_mq_init_queue(&dev->tagset); | 2161 | ns->queue = blk_mq_init_queue(&dev->tagset); |
1984 | if (IS_ERR(ns->queue)) | 2162 | if (IS_ERR(ns->queue)) |
1985 | goto out_free_ns; | 2163 | goto out_free_ns; |
@@ -1995,9 +2173,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, | |||
1995 | 2173 | ||
1996 | ns->ns_id = nsid; | 2174 | ns->ns_id = nsid; |
1997 | ns->disk = disk; | 2175 | ns->disk = disk; |
1998 | lbaf = id->flbas & 0xf; | 2176 | ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ |
1999 | ns->lba_shift = id->lbaf[lbaf].ds; | 2177 | list_add_tail(&ns->list, &dev->namespaces); |
2000 | ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); | 2178 | |
2001 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); | 2179 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); |
2002 | if (dev->max_hw_sectors) | 2180 | if (dev->max_hw_sectors) |
2003 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); | 2181 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); |
@@ -2014,18 +2192,23 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, | |||
2014 | disk->driverfs_dev = &dev->pci_dev->dev; | 2192 | disk->driverfs_dev = &dev->pci_dev->dev; |
2015 | disk->flags = GENHD_FL_EXT_DEVT; | 2193 | disk->flags = GENHD_FL_EXT_DEVT; |
2016 | sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); | 2194 | sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); |
2017 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); | ||
2018 | |||
2019 | if (dev->oncs & NVME_CTRL_ONCS_DSM) | ||
2020 | nvme_config_discard(ns); | ||
2021 | |||
2022 | return ns; | ||
2023 | 2195 | ||
2196 | /* | ||
2197 | * Initialize capacity to 0 until we establish the namespace format and | ||
2198 | * setup integrity extentions if necessary. The revalidate_disk after | ||
2199 | * add_disk allows the driver to register with integrity if the format | ||
2200 | * requires it. | ||
2201 | */ | ||
2202 | set_capacity(disk, 0); | ||
2203 | nvme_revalidate_disk(ns->disk); | ||
2204 | add_disk(ns->disk); | ||
2205 | if (ns->ms) | ||
2206 | revalidate_disk(ns->disk); | ||
2207 | return; | ||
2024 | out_free_queue: | 2208 | out_free_queue: |
2025 | blk_cleanup_queue(ns->queue); | 2209 | blk_cleanup_queue(ns->queue); |
2026 | out_free_ns: | 2210 | out_free_ns: |
2027 | kfree(ns); | 2211 | kfree(ns); |
2028 | return NULL; | ||
2029 | } | 2212 | } |
2030 | 2213 | ||
2031 | static void nvme_create_io_queues(struct nvme_dev *dev) | 2214 | static void nvme_create_io_queues(struct nvme_dev *dev) |
@@ -2150,22 +2333,20 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
2150 | struct pci_dev *pdev = dev->pci_dev; | 2333 | struct pci_dev *pdev = dev->pci_dev; |
2151 | int res; | 2334 | int res; |
2152 | unsigned nn, i; | 2335 | unsigned nn, i; |
2153 | struct nvme_ns *ns; | ||
2154 | struct nvme_id_ctrl *ctrl; | 2336 | struct nvme_id_ctrl *ctrl; |
2155 | struct nvme_id_ns *id_ns; | ||
2156 | void *mem; | 2337 | void *mem; |
2157 | dma_addr_t dma_addr; | 2338 | dma_addr_t dma_addr; |
2158 | int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; | 2339 | int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; |
2159 | 2340 | ||
2160 | mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL); | 2341 | mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL); |
2161 | if (!mem) | 2342 | if (!mem) |
2162 | return -ENOMEM; | 2343 | return -ENOMEM; |
2163 | 2344 | ||
2164 | res = nvme_identify(dev, 0, 1, dma_addr); | 2345 | res = nvme_identify(dev, 0, 1, dma_addr); |
2165 | if (res) { | 2346 | if (res) { |
2166 | dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res); | 2347 | dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res); |
2167 | res = -EIO; | 2348 | dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr); |
2168 | goto out; | 2349 | return -EIO; |
2169 | } | 2350 | } |
2170 | 2351 | ||
2171 | ctrl = mem; | 2352 | ctrl = mem; |
@@ -2191,6 +2372,7 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
2191 | } else | 2372 | } else |
2192 | dev->max_hw_sectors = max_hw_sectors; | 2373 | dev->max_hw_sectors = max_hw_sectors; |
2193 | } | 2374 | } |
2375 | dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr); | ||
2194 | 2376 | ||
2195 | dev->tagset.ops = &nvme_mq_ops; | 2377 | dev->tagset.ops = &nvme_mq_ops; |
2196 | dev->tagset.nr_hw_queues = dev->online_queues - 1; | 2378 | dev->tagset.nr_hw_queues = dev->online_queues - 1; |
@@ -2203,33 +2385,12 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
2203 | dev->tagset.driver_data = dev; | 2385 | dev->tagset.driver_data = dev; |
2204 | 2386 | ||
2205 | if (blk_mq_alloc_tag_set(&dev->tagset)) | 2387 | if (blk_mq_alloc_tag_set(&dev->tagset)) |
2206 | goto out; | 2388 | return 0; |
2207 | |||
2208 | id_ns = mem; | ||
2209 | for (i = 1; i <= nn; i++) { | ||
2210 | res = nvme_identify(dev, i, 0, dma_addr); | ||
2211 | if (res) | ||
2212 | continue; | ||
2213 | |||
2214 | if (id_ns->ncap == 0) | ||
2215 | continue; | ||
2216 | |||
2217 | res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, | ||
2218 | dma_addr + 4096, NULL); | ||
2219 | if (res) | ||
2220 | memset(mem + 4096, 0, 4096); | ||
2221 | 2389 | ||
2222 | ns = nvme_alloc_ns(dev, i, mem, mem + 4096); | 2390 | for (i = 1; i <= nn; i++) |
2223 | if (ns) | 2391 | nvme_alloc_ns(dev, i); |
2224 | list_add_tail(&ns->list, &dev->namespaces); | ||
2225 | } | ||
2226 | list_for_each_entry(ns, &dev->namespaces, list) | ||
2227 | add_disk(ns->disk); | ||
2228 | res = 0; | ||
2229 | 2392 | ||
2230 | out: | 2393 | return 0; |
2231 | dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); | ||
2232 | return res; | ||
2233 | } | 2394 | } |
2234 | 2395 | ||
2235 | static int nvme_dev_map(struct nvme_dev *dev) | 2396 | static int nvme_dev_map(struct nvme_dev *dev) |
@@ -2528,8 +2689,11 @@ static void nvme_dev_remove(struct nvme_dev *dev) | |||
2528 | struct nvme_ns *ns; | 2689 | struct nvme_ns *ns; |
2529 | 2690 | ||
2530 | list_for_each_entry(ns, &dev->namespaces, list) { | 2691 | list_for_each_entry(ns, &dev->namespaces, list) { |
2531 | if (ns->disk->flags & GENHD_FL_UP) | 2692 | if (ns->disk->flags & GENHD_FL_UP) { |
2693 | if (ns->disk->integrity) | ||
2694 | blk_integrity_unregister(ns->disk); | ||
2532 | del_gendisk(ns->disk); | 2695 | del_gendisk(ns->disk); |
2696 | } | ||
2533 | if (!blk_queue_dying(ns->queue)) { | 2697 | if (!blk_queue_dying(ns->queue)) { |
2534 | blk_mq_abort_requeue_list(ns->queue); | 2698 | blk_mq_abort_requeue_list(ns->queue); |
2535 | blk_cleanup_queue(ns->queue); | 2699 | blk_cleanup_queue(ns->queue); |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 19a5d4b23209..cca264db2478 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -121,6 +121,7 @@ struct nvme_ns { | |||
121 | unsigned ns_id; | 121 | unsigned ns_id; |
122 | int lba_shift; | 122 | int lba_shift; |
123 | int ms; | 123 | int ms; |
124 | int pi_type; | ||
124 | u64 mode_select_num_blocks; | 125 | u64 mode_select_num_blocks; |
125 | u32 mode_select_block_len; | 126 | u32 mode_select_block_len; |
126 | }; | 127 | }; |
@@ -138,6 +139,7 @@ struct nvme_iod { | |||
138 | int nents; /* Used in scatterlist */ | 139 | int nents; /* Used in scatterlist */ |
139 | int length; /* Of data, in bytes */ | 140 | int length; /* Of data, in bytes */ |
140 | dma_addr_t first_dma; | 141 | dma_addr_t first_dma; |
142 | struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */ | ||
141 | struct scatterlist sg[0]; | 143 | struct scatterlist sg[0]; |
142 | }; | 144 | }; |
143 | 145 | ||
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h index 26386cf3db44..406bfc95652c 100644 --- a/include/uapi/linux/nvme.h +++ b/include/uapi/linux/nvme.h | |||
@@ -124,10 +124,22 @@ struct nvme_id_ns { | |||
124 | 124 | ||
125 | enum { | 125 | enum { |
126 | NVME_NS_FEAT_THIN = 1 << 0, | 126 | NVME_NS_FEAT_THIN = 1 << 0, |
127 | NVME_NS_FLBAS_LBA_MASK = 0xf, | ||
128 | NVME_NS_FLBAS_META_EXT = 0x10, | ||
127 | NVME_LBAF_RP_BEST = 0, | 129 | NVME_LBAF_RP_BEST = 0, |
128 | NVME_LBAF_RP_BETTER = 1, | 130 | NVME_LBAF_RP_BETTER = 1, |
129 | NVME_LBAF_RP_GOOD = 2, | 131 | NVME_LBAF_RP_GOOD = 2, |
130 | NVME_LBAF_RP_DEGRADED = 3, | 132 | NVME_LBAF_RP_DEGRADED = 3, |
133 | NVME_NS_DPC_PI_LAST = 1 << 4, | ||
134 | NVME_NS_DPC_PI_FIRST = 1 << 3, | ||
135 | NVME_NS_DPC_PI_TYPE3 = 1 << 2, | ||
136 | NVME_NS_DPC_PI_TYPE2 = 1 << 1, | ||
137 | NVME_NS_DPC_PI_TYPE1 = 1 << 0, | ||
138 | NVME_NS_DPS_PI_FIRST = 1 << 3, | ||
139 | NVME_NS_DPS_PI_MASK = 0x7, | ||
140 | NVME_NS_DPS_PI_TYPE1 = 1, | ||
141 | NVME_NS_DPS_PI_TYPE2 = 2, | ||
142 | NVME_NS_DPS_PI_TYPE3 = 3, | ||
131 | }; | 143 | }; |
132 | 144 | ||
133 | struct nvme_smart_log { | 145 | struct nvme_smart_log { |
@@ -261,6 +273,10 @@ enum { | |||
261 | NVME_RW_DSM_LATENCY_LOW = 3 << 4, | 273 | NVME_RW_DSM_LATENCY_LOW = 3 << 4, |
262 | NVME_RW_DSM_SEQ_REQ = 1 << 6, | 274 | NVME_RW_DSM_SEQ_REQ = 1 << 6, |
263 | NVME_RW_DSM_COMPRESSED = 1 << 7, | 275 | NVME_RW_DSM_COMPRESSED = 1 << 7, |
276 | NVME_RW_PRINFO_PRCHK_REF = 1 << 10, | ||
277 | NVME_RW_PRINFO_PRCHK_APP = 1 << 11, | ||
278 | NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, | ||
279 | NVME_RW_PRINFO_PRACT = 1 << 13, | ||
264 | }; | 280 | }; |
265 | 281 | ||
266 | struct nvme_dsm_cmd { | 282 | struct nvme_dsm_cmd { |