aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2015-02-21 01:12:02 -0500
committerJens Axboe <axboe@fb.com>2015-02-21 01:12:02 -0500
commitdecf6d79de84e427d409ff74156af2e77ffadd84 (patch)
tree45801481c596ecd0b05c719025e29fa028aaceee /drivers/block
parent045c47ca306acf30c740c285a77a4b4bda6be7c5 (diff)
parent0c0f9b95c8b710b74772edd9693fe7ab5419a75a (diff)
Merge branch 'for-3.20' of git://git.infradead.org/users/kbusch/linux-nvme into for-linus
Merge 3.20 NVMe changes from Keith.
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c496
-rw-r--r--drivers/block/nvme-scsi.c96
2 files changed, 385 insertions, 207 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index cbdfbbf98392..b64bccbb78c9 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -37,17 +37,18 @@
37#include <linux/ptrace.h> 37#include <linux/ptrace.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/t10-pi.h>
40#include <linux/types.h> 41#include <linux/types.h>
41#include <scsi/sg.h> 42#include <scsi/sg.h>
42#include <asm-generic/io-64-nonatomic-lo-hi.h> 43#include <asm-generic/io-64-nonatomic-lo-hi.h>
43 44
45#define NVME_MINORS (1U << MINORBITS)
44#define NVME_Q_DEPTH 1024 46#define NVME_Q_DEPTH 1024
45#define NVME_AQ_DEPTH 64 47#define NVME_AQ_DEPTH 64
46#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
47#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
48#define ADMIN_TIMEOUT (admin_timeout * HZ) 50#define ADMIN_TIMEOUT (admin_timeout * HZ)
49#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ) 51#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
50#define IOD_TIMEOUT (retry_time * HZ)
51 52
52static unsigned char admin_timeout = 60; 53static unsigned char admin_timeout = 60;
53module_param(admin_timeout, byte, 0644); 54module_param(admin_timeout, byte, 0644);
@@ -57,10 +58,6 @@ unsigned char nvme_io_timeout = 30;
57module_param_named(io_timeout, nvme_io_timeout, byte, 0644); 58module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
58MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 59MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
59 60
60static unsigned char retry_time = 30;
61module_param(retry_time, byte, 0644);
62MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
63
64static unsigned char shutdown_timeout = 5; 61static unsigned char shutdown_timeout = 5;
65module_param(shutdown_timeout, byte, 0644); 62module_param(shutdown_timeout, byte, 0644);
66MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 63MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@@ -68,6 +65,9 @@ MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown")
68static int nvme_major; 65static int nvme_major;
69module_param(nvme_major, int, 0); 66module_param(nvme_major, int, 0);
70 67
68static int nvme_char_major;
69module_param(nvme_char_major, int, 0);
70
71static int use_threaded_interrupts; 71static int use_threaded_interrupts;
72module_param(use_threaded_interrupts, int, 0); 72module_param(use_threaded_interrupts, int, 0);
73 73
@@ -76,7 +76,8 @@ static LIST_HEAD(dev_list);
76static struct task_struct *nvme_thread; 76static struct task_struct *nvme_thread;
77static struct workqueue_struct *nvme_workq; 77static struct workqueue_struct *nvme_workq;
78static wait_queue_head_t nvme_kthread_wait; 78static wait_queue_head_t nvme_kthread_wait;
79static struct notifier_block nvme_nb; 79
80static struct class *nvme_class;
80 81
81static void nvme_reset_failed_dev(struct work_struct *ws); 82static void nvme_reset_failed_dev(struct work_struct *ws);
82static int nvme_process_cq(struct nvme_queue *nvmeq); 83static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -95,7 +96,6 @@ struct async_cmd_info {
95 * commands and one for I/O commands). 96 * commands and one for I/O commands).
96 */ 97 */
97struct nvme_queue { 98struct nvme_queue {
98 struct llist_node node;
99 struct device *q_dmadev; 99 struct device *q_dmadev;
100 struct nvme_dev *dev; 100 struct nvme_dev *dev;
101 char irqname[24]; /* nvme4294967295-65535\0 */ 101 char irqname[24]; /* nvme4294967295-65535\0 */
@@ -482,6 +482,62 @@ static int nvme_error_status(u16 status)
482 } 482 }
483} 483}
484 484
485static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
486{
487 if (be32_to_cpu(pi->ref_tag) == v)
488 pi->ref_tag = cpu_to_be32(p);
489}
490
491static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
492{
493 if (be32_to_cpu(pi->ref_tag) == p)
494 pi->ref_tag = cpu_to_be32(v);
495}
496
497/**
498 * nvme_dif_remap - remaps ref tags to bip seed and physical lba
499 *
500 * The virtual start sector is the one that was originally submitted by the
501 * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical
502 * start sector may be different. Remap protection information to match the
503 * physical LBA on writes, and back to the original seed on reads.
504 *
505 * Type 0 and 3 do not have a ref tag, so no remapping required.
506 */
507static void nvme_dif_remap(struct request *req,
508 void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
509{
510 struct nvme_ns *ns = req->rq_disk->private_data;
511 struct bio_integrity_payload *bip;
512 struct t10_pi_tuple *pi;
513 void *p, *pmap;
514 u32 i, nlb, ts, phys, virt;
515
516 if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
517 return;
518
519 bip = bio_integrity(req->bio);
520 if (!bip)
521 return;
522
523 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
524 if (!pmap)
525 return;
526
527 p = pmap;
528 virt = bip_get_seed(bip);
529 phys = nvme_block_nr(ns, blk_rq_pos(req));
530 nlb = (blk_rq_bytes(req) >> ns->lba_shift);
531 ts = ns->disk->integrity->tuple_size;
532
533 for (i = 0; i < nlb; i++, virt++, phys++) {
534 pi = (struct t10_pi_tuple *)p;
535 dif_swap(phys, virt, pi);
536 p += ts;
537 }
538 kunmap_atomic(pmap);
539}
540
485static void req_completion(struct nvme_queue *nvmeq, void *ctx, 541static void req_completion(struct nvme_queue *nvmeq, void *ctx,
486 struct nvme_completion *cqe) 542 struct nvme_completion *cqe)
487{ 543{
@@ -512,9 +568,16 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
512 "completing aborted command with status:%04x\n", 568 "completing aborted command with status:%04x\n",
513 status); 569 status);
514 570
515 if (iod->nents) 571 if (iod->nents) {
516 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, 572 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
517 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 573 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
574 if (blk_integrity_rq(req)) {
575 if (!rq_data_dir(req))
576 nvme_dif_remap(req, nvme_dif_complete);
577 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
578 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
579 }
580 }
518 nvme_free_iod(nvmeq->dev, iod); 581 nvme_free_iod(nvmeq->dev, iod);
519 582
520 blk_mq_complete_request(req); 583 blk_mq_complete_request(req);
@@ -670,6 +733,24 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
670 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); 733 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
671 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 734 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
672 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 735 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
736
737 if (blk_integrity_rq(req)) {
738 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
739 switch (ns->pi_type) {
740 case NVME_NS_DPS_PI_TYPE3:
741 control |= NVME_RW_PRINFO_PRCHK_GUARD;
742 break;
743 case NVME_NS_DPS_PI_TYPE1:
744 case NVME_NS_DPS_PI_TYPE2:
745 control |= NVME_RW_PRINFO_PRCHK_GUARD |
746 NVME_RW_PRINFO_PRCHK_REF;
747 cmnd->rw.reftag = cpu_to_le32(
748 nvme_block_nr(ns, blk_rq_pos(req)));
749 break;
750 }
751 } else if (ns->ms)
752 control |= NVME_RW_PRINFO_PRACT;
753
673 cmnd->rw.control = cpu_to_le16(control); 754 cmnd->rw.control = cpu_to_le16(control);
674 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 755 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
675 756
@@ -690,6 +771,19 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
690 struct nvme_iod *iod; 771 struct nvme_iod *iod;
691 enum dma_data_direction dma_dir; 772 enum dma_data_direction dma_dir;
692 773
774 /*
775 * If formated with metadata, require the block layer provide a buffer
776 * unless this namespace is formated such that the metadata can be
777 * stripped/generated by the controller with PRACT=1.
778 */
779 if (ns->ms && !blk_integrity_rq(req)) {
780 if (!(ns->pi_type && ns->ms == 8)) {
781 req->errors = -EFAULT;
782 blk_mq_complete_request(req);
783 return BLK_MQ_RQ_QUEUE_OK;
784 }
785 }
786
693 iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC); 787 iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
694 if (!iod) 788 if (!iod)
695 return BLK_MQ_RQ_QUEUE_BUSY; 789 return BLK_MQ_RQ_QUEUE_BUSY;
@@ -725,6 +819,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
725 iod->nents, dma_dir); 819 iod->nents, dma_dir);
726 goto retry_cmd; 820 goto retry_cmd;
727 } 821 }
822 if (blk_integrity_rq(req)) {
823 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
824 goto error_cmd;
825
826 sg_init_table(iod->meta_sg, 1);
827 if (blk_rq_map_integrity_sg(
828 req->q, req->bio, iod->meta_sg) != 1)
829 goto error_cmd;
830
831 if (rq_data_dir(req))
832 nvme_dif_remap(req, nvme_dif_prep);
833
834 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
835 goto error_cmd;
836 }
728 } 837 }
729 838
730 nvme_set_info(cmd, iod, req_completion); 839 nvme_set_info(cmd, iod, req_completion);
@@ -817,14 +926,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
817 return IRQ_WAKE_THREAD; 926 return IRQ_WAKE_THREAD;
818} 927}
819 928
820static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
821 cmd_info)
822{
823 spin_lock_irq(&nvmeq->q_lock);
824 cancel_cmd_info(cmd_info, NULL);
825 spin_unlock_irq(&nvmeq->q_lock);
826}
827
828struct sync_cmd_info { 929struct sync_cmd_info {
829 struct task_struct *task; 930 struct task_struct *task;
830 u32 result; 931 u32 result;
@@ -847,7 +948,6 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
847static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd, 948static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
848 u32 *result, unsigned timeout) 949 u32 *result, unsigned timeout)
849{ 950{
850 int ret;
851 struct sync_cmd_info cmdinfo; 951 struct sync_cmd_info cmdinfo;
852 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 952 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
853 struct nvme_queue *nvmeq = cmd_rq->nvmeq; 953 struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -859,29 +959,12 @@ static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
859 959
860 nvme_set_info(cmd_rq, &cmdinfo, sync_completion); 960 nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
861 961
862 set_current_state(TASK_KILLABLE); 962 set_current_state(TASK_UNINTERRUPTIBLE);
863 ret = nvme_submit_cmd(nvmeq, cmd); 963 nvme_submit_cmd(nvmeq, cmd);
864 if (ret) { 964 schedule();
865 nvme_finish_cmd(nvmeq, req->tag, NULL);
866 set_current_state(TASK_RUNNING);
867 }
868 ret = schedule_timeout(timeout);
869
870 /*
871 * Ensure that sync_completion has either run, or that it will
872 * never run.
873 */
874 nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
875
876 /*
877 * We never got the completion
878 */
879 if (cmdinfo.status == -EINTR)
880 return -EINTR;
881 965
882 if (result) 966 if (result)
883 *result = cmdinfo.result; 967 *result = cmdinfo.result;
884
885 return cmdinfo.status; 968 return cmdinfo.status;
886} 969}
887 970
@@ -1158,29 +1241,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1158 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 1241 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
1159 struct nvme_queue *nvmeq = cmd->nvmeq; 1242 struct nvme_queue *nvmeq = cmd->nvmeq;
1160 1243
1161 /*
1162 * The aborted req will be completed on receiving the abort req.
1163 * We enable the timer again. If hit twice, it'll cause a device reset,
1164 * as the device then is in a faulty state.
1165 */
1166 int ret = BLK_EH_RESET_TIMER;
1167
1168 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1244 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1169 nvmeq->qid); 1245 nvmeq->qid);
1170
1171 spin_lock_irq(&nvmeq->q_lock); 1246 spin_lock_irq(&nvmeq->q_lock);
1172 if (!nvmeq->dev->initialized) { 1247 nvme_abort_req(req);
1173 /*
1174 * Force cancelled command frees the request, which requires we
1175 * return BLK_EH_NOT_HANDLED.
1176 */
1177 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
1178 ret = BLK_EH_NOT_HANDLED;
1179 } else
1180 nvme_abort_req(req);
1181 spin_unlock_irq(&nvmeq->q_lock); 1248 spin_unlock_irq(&nvmeq->q_lock);
1182 1249
1183 return ret; 1250 /*
1251 * The aborted req will be completed on receiving the abort req.
1252 * We enable the timer again. If hit twice, it'll cause a device reset,
1253 * as the device then is in a faulty state.
1254 */
1255 return BLK_EH_RESET_TIMER;
1184} 1256}
1185 1257
1186static void nvme_free_queue(struct nvme_queue *nvmeq) 1258static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1233,7 +1305,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
1233 struct blk_mq_hw_ctx *hctx = nvmeq->hctx; 1305 struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
1234 1306
1235 spin_lock_irq(&nvmeq->q_lock); 1307 spin_lock_irq(&nvmeq->q_lock);
1236 nvme_process_cq(nvmeq);
1237 if (hctx && hctx->tags) 1308 if (hctx && hctx->tags)
1238 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq); 1309 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
1239 spin_unlock_irq(&nvmeq->q_lock); 1310 spin_unlock_irq(&nvmeq->q_lock);
@@ -1256,7 +1327,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1256 } 1327 }
1257 if (!qid && dev->admin_q) 1328 if (!qid && dev->admin_q)
1258 blk_mq_freeze_queue_start(dev->admin_q); 1329 blk_mq_freeze_queue_start(dev->admin_q);
1259 nvme_clear_queue(nvmeq); 1330
1331 spin_lock_irq(&nvmeq->q_lock);
1332 nvme_process_cq(nvmeq);
1333 spin_unlock_irq(&nvmeq->q_lock);
1260} 1334}
1261 1335
1262static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1336static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -1875,13 +1949,61 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
1875 return 0; 1949 return 0;
1876} 1950}
1877 1951
1952static void nvme_config_discard(struct nvme_ns *ns)
1953{
1954 u32 logical_block_size = queue_logical_block_size(ns->queue);
1955 ns->queue->limits.discard_zeroes_data = 0;
1956 ns->queue->limits.discard_alignment = logical_block_size;
1957 ns->queue->limits.discard_granularity = logical_block_size;
1958 ns->queue->limits.max_discard_sectors = 0xffffffff;
1959 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1960}
1961
1962static int nvme_noop_verify(struct blk_integrity_iter *iter)
1963{
1964 return 0;
1965}
1966
1967static int nvme_noop_generate(struct blk_integrity_iter *iter)
1968{
1969 return 0;
1970}
1971
1972struct blk_integrity nvme_meta_noop = {
1973 .name = "NVME_META_NOOP",
1974 .generate_fn = nvme_noop_generate,
1975 .verify_fn = nvme_noop_verify,
1976};
1977
1978static void nvme_init_integrity(struct nvme_ns *ns)
1979{
1980 struct blk_integrity integrity;
1981
1982 switch (ns->pi_type) {
1983 case NVME_NS_DPS_PI_TYPE3:
1984 integrity = t10_pi_type3_crc;
1985 break;
1986 case NVME_NS_DPS_PI_TYPE1:
1987 case NVME_NS_DPS_PI_TYPE2:
1988 integrity = t10_pi_type1_crc;
1989 break;
1990 default:
1991 integrity = nvme_meta_noop;
1992 break;
1993 }
1994 integrity.tuple_size = ns->ms;
1995 blk_integrity_register(ns->disk, &integrity);
1996 blk_queue_max_integrity_segments(ns->queue, 1);
1997}
1998
1878static int nvme_revalidate_disk(struct gendisk *disk) 1999static int nvme_revalidate_disk(struct gendisk *disk)
1879{ 2000{
1880 struct nvme_ns *ns = disk->private_data; 2001 struct nvme_ns *ns = disk->private_data;
1881 struct nvme_dev *dev = ns->dev; 2002 struct nvme_dev *dev = ns->dev;
1882 struct nvme_id_ns *id; 2003 struct nvme_id_ns *id;
1883 dma_addr_t dma_addr; 2004 dma_addr_t dma_addr;
1884 int lbaf; 2005 int lbaf, pi_type, old_ms;
2006 unsigned short bs;
1885 2007
1886 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, 2008 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
1887 GFP_KERNEL); 2009 GFP_KERNEL);
@@ -1890,16 +2012,50 @@ static int nvme_revalidate_disk(struct gendisk *disk)
1890 __func__); 2012 __func__);
1891 return 0; 2013 return 0;
1892 } 2014 }
2015 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
2016 dev_warn(&dev->pci_dev->dev,
2017 "identify failed ns:%d, setting capacity to 0\n",
2018 ns->ns_id);
2019 memset(id, 0, sizeof(*id));
2020 }
1893 2021
1894 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) 2022 old_ms = ns->ms;
1895 goto free; 2023 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1896
1897 lbaf = id->flbas & 0xf;
1898 ns->lba_shift = id->lbaf[lbaf].ds; 2024 ns->lba_shift = id->lbaf[lbaf].ds;
2025 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
2026
2027 /*
2028 * If identify namespace failed, use default 512 byte block size so
2029 * block layer can use before failing read/write for 0 capacity.
2030 */
2031 if (ns->lba_shift == 0)
2032 ns->lba_shift = 9;
2033 bs = 1 << ns->lba_shift;
2034
2035 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
2036 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
2037 id->dps & NVME_NS_DPS_PI_MASK : 0;
2038
2039 if (disk->integrity && (ns->pi_type != pi_type || ns->ms != old_ms ||
2040 bs != queue_logical_block_size(disk->queue) ||
2041 (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
2042 blk_integrity_unregister(disk);
2043
2044 ns->pi_type = pi_type;
2045 blk_queue_logical_block_size(ns->queue, bs);
2046
2047 if (ns->ms && !disk->integrity && (disk->flags & GENHD_FL_UP) &&
2048 !(id->flbas & NVME_NS_FLBAS_META_EXT))
2049 nvme_init_integrity(ns);
2050
2051 if (id->ncap == 0 || (ns->ms && !disk->integrity))
2052 set_capacity(disk, 0);
2053 else
2054 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
2055
2056 if (dev->oncs & NVME_CTRL_ONCS_DSM)
2057 nvme_config_discard(ns);
1899 2058
1900 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1901 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1902 free:
1903 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); 2059 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
1904 return 0; 2060 return 0;
1905} 2061}
@@ -1923,8 +2079,7 @@ static int nvme_kthread(void *data)
1923 spin_lock(&dev_list_lock); 2079 spin_lock(&dev_list_lock);
1924 list_for_each_entry_safe(dev, next, &dev_list, node) { 2080 list_for_each_entry_safe(dev, next, &dev_list, node) {
1925 int i; 2081 int i;
1926 if (readl(&dev->bar->csts) & NVME_CSTS_CFS && 2082 if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
1927 dev->initialized) {
1928 if (work_busy(&dev->reset_work)) 2083 if (work_busy(&dev->reset_work))
1929 continue; 2084 continue;
1930 list_del_init(&dev->node); 2085 list_del_init(&dev->node);
@@ -1956,30 +2111,16 @@ static int nvme_kthread(void *data)
1956 return 0; 2111 return 0;
1957} 2112}
1958 2113
1959static void nvme_config_discard(struct nvme_ns *ns) 2114static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
1960{
1961 u32 logical_block_size = queue_logical_block_size(ns->queue);
1962 ns->queue->limits.discard_zeroes_data = 0;
1963 ns->queue->limits.discard_alignment = logical_block_size;
1964 ns->queue->limits.discard_granularity = logical_block_size;
1965 ns->queue->limits.max_discard_sectors = 0xffffffff;
1966 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1967}
1968
1969static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1970 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1971{ 2115{
1972 struct nvme_ns *ns; 2116 struct nvme_ns *ns;
1973 struct gendisk *disk; 2117 struct gendisk *disk;
1974 int node = dev_to_node(&dev->pci_dev->dev); 2118 int node = dev_to_node(&dev->pci_dev->dev);
1975 int lbaf;
1976
1977 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1978 return NULL;
1979 2119
1980 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 2120 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
1981 if (!ns) 2121 if (!ns)
1982 return NULL; 2122 return;
2123
1983 ns->queue = blk_mq_init_queue(&dev->tagset); 2124 ns->queue = blk_mq_init_queue(&dev->tagset);
1984 if (IS_ERR(ns->queue)) 2125 if (IS_ERR(ns->queue))
1985 goto out_free_ns; 2126 goto out_free_ns;
@@ -1995,9 +2136,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1995 2136
1996 ns->ns_id = nsid; 2137 ns->ns_id = nsid;
1997 ns->disk = disk; 2138 ns->disk = disk;
1998 lbaf = id->flbas & 0xf; 2139 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
1999 ns->lba_shift = id->lbaf[lbaf].ds; 2140 list_add_tail(&ns->list, &dev->namespaces);
2000 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 2141
2001 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 2142 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
2002 if (dev->max_hw_sectors) 2143 if (dev->max_hw_sectors)
2003 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 2144 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -2011,21 +2152,26 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
2011 disk->fops = &nvme_fops; 2152 disk->fops = &nvme_fops;
2012 disk->private_data = ns; 2153 disk->private_data = ns;
2013 disk->queue = ns->queue; 2154 disk->queue = ns->queue;
2014 disk->driverfs_dev = &dev->pci_dev->dev; 2155 disk->driverfs_dev = dev->device;
2015 disk->flags = GENHD_FL_EXT_DEVT; 2156 disk->flags = GENHD_FL_EXT_DEVT;
2016 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 2157 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
2017 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
2018
2019 if (dev->oncs & NVME_CTRL_ONCS_DSM)
2020 nvme_config_discard(ns);
2021
2022 return ns;
2023 2158
2159 /*
2160 * Initialize capacity to 0 until we establish the namespace format and
2161 * setup integrity extentions if necessary. The revalidate_disk after
2162 * add_disk allows the driver to register with integrity if the format
2163 * requires it.
2164 */
2165 set_capacity(disk, 0);
2166 nvme_revalidate_disk(ns->disk);
2167 add_disk(ns->disk);
2168 if (ns->ms)
2169 revalidate_disk(ns->disk);
2170 return;
2024 out_free_queue: 2171 out_free_queue:
2025 blk_cleanup_queue(ns->queue); 2172 blk_cleanup_queue(ns->queue);
2026 out_free_ns: 2173 out_free_ns:
2027 kfree(ns); 2174 kfree(ns);
2028 return NULL;
2029} 2175}
2030 2176
2031static void nvme_create_io_queues(struct nvme_dev *dev) 2177static void nvme_create_io_queues(struct nvme_dev *dev)
@@ -2150,22 +2296,20 @@ static int nvme_dev_add(struct nvme_dev *dev)
2150 struct pci_dev *pdev = dev->pci_dev; 2296 struct pci_dev *pdev = dev->pci_dev;
2151 int res; 2297 int res;
2152 unsigned nn, i; 2298 unsigned nn, i;
2153 struct nvme_ns *ns;
2154 struct nvme_id_ctrl *ctrl; 2299 struct nvme_id_ctrl *ctrl;
2155 struct nvme_id_ns *id_ns;
2156 void *mem; 2300 void *mem;
2157 dma_addr_t dma_addr; 2301 dma_addr_t dma_addr;
2158 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 2302 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
2159 2303
2160 mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL); 2304 mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
2161 if (!mem) 2305 if (!mem)
2162 return -ENOMEM; 2306 return -ENOMEM;
2163 2307
2164 res = nvme_identify(dev, 0, 1, dma_addr); 2308 res = nvme_identify(dev, 0, 1, dma_addr);
2165 if (res) { 2309 if (res) {
2166 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res); 2310 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
2167 res = -EIO; 2311 dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
2168 goto out; 2312 return -EIO;
2169 } 2313 }
2170 2314
2171 ctrl = mem; 2315 ctrl = mem;
@@ -2191,6 +2335,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2191 } else 2335 } else
2192 dev->max_hw_sectors = max_hw_sectors; 2336 dev->max_hw_sectors = max_hw_sectors;
2193 } 2337 }
2338 dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
2194 2339
2195 dev->tagset.ops = &nvme_mq_ops; 2340 dev->tagset.ops = &nvme_mq_ops;
2196 dev->tagset.nr_hw_queues = dev->online_queues - 1; 2341 dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2203,33 +2348,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
2203 dev->tagset.driver_data = dev; 2348 dev->tagset.driver_data = dev;
2204 2349
2205 if (blk_mq_alloc_tag_set(&dev->tagset)) 2350 if (blk_mq_alloc_tag_set(&dev->tagset))
2206 goto out; 2351 return 0;
2207
2208 id_ns = mem;
2209 for (i = 1; i <= nn; i++) {
2210 res = nvme_identify(dev, i, 0, dma_addr);
2211 if (res)
2212 continue;
2213
2214 if (id_ns->ncap == 0)
2215 continue;
2216
2217 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
2218 dma_addr + 4096, NULL);
2219 if (res)
2220 memset(mem + 4096, 0, 4096);
2221 2352
2222 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 2353 for (i = 1; i <= nn; i++)
2223 if (ns) 2354 nvme_alloc_ns(dev, i);
2224 list_add_tail(&ns->list, &dev->namespaces);
2225 }
2226 list_for_each_entry(ns, &dev->namespaces, list)
2227 add_disk(ns->disk);
2228 res = 0;
2229 2355
2230 out: 2356 return 0;
2231 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
2232 return res;
2233} 2357}
2234 2358
2235static int nvme_dev_map(struct nvme_dev *dev) 2359static int nvme_dev_map(struct nvme_dev *dev)
@@ -2358,8 +2482,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
2358static void nvme_del_queue_end(struct nvme_queue *nvmeq) 2482static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2359{ 2483{
2360 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2484 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2361
2362 nvme_clear_queue(nvmeq);
2363 nvme_put_dq(dq); 2485 nvme_put_dq(dq);
2364} 2486}
2365 2487
@@ -2502,7 +2624,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2502 int i; 2624 int i;
2503 u32 csts = -1; 2625 u32 csts = -1;
2504 2626
2505 dev->initialized = 0;
2506 nvme_dev_list_remove(dev); 2627 nvme_dev_list_remove(dev);
2507 2628
2508 if (dev->bar) { 2629 if (dev->bar) {
@@ -2513,7 +2634,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2513 for (i = dev->queue_count - 1; i >= 0; i--) { 2634 for (i = dev->queue_count - 1; i >= 0; i--) {
2514 struct nvme_queue *nvmeq = dev->queues[i]; 2635 struct nvme_queue *nvmeq = dev->queues[i];
2515 nvme_suspend_queue(nvmeq); 2636 nvme_suspend_queue(nvmeq);
2516 nvme_clear_queue(nvmeq);
2517 } 2637 }
2518 } else { 2638 } else {
2519 nvme_disable_io_queues(dev); 2639 nvme_disable_io_queues(dev);
@@ -2521,6 +2641,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2521 nvme_disable_queue(dev, 0); 2641 nvme_disable_queue(dev, 0);
2522 } 2642 }
2523 nvme_dev_unmap(dev); 2643 nvme_dev_unmap(dev);
2644
2645 for (i = dev->queue_count - 1; i >= 0; i--)
2646 nvme_clear_queue(dev->queues[i]);
2524} 2647}
2525 2648
2526static void nvme_dev_remove(struct nvme_dev *dev) 2649static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2528,8 +2651,11 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2528 struct nvme_ns *ns; 2651 struct nvme_ns *ns;
2529 2652
2530 list_for_each_entry(ns, &dev->namespaces, list) { 2653 list_for_each_entry(ns, &dev->namespaces, list) {
2531 if (ns->disk->flags & GENHD_FL_UP) 2654 if (ns->disk->flags & GENHD_FL_UP) {
2655 if (ns->disk->integrity)
2656 blk_integrity_unregister(ns->disk);
2532 del_gendisk(ns->disk); 2657 del_gendisk(ns->disk);
2658 }
2533 if (!blk_queue_dying(ns->queue)) { 2659 if (!blk_queue_dying(ns->queue)) {
2534 blk_mq_abort_requeue_list(ns->queue); 2660 blk_mq_abort_requeue_list(ns->queue);
2535 blk_cleanup_queue(ns->queue); 2661 blk_cleanup_queue(ns->queue);
@@ -2611,6 +2737,7 @@ static void nvme_free_dev(struct kref *kref)
2611 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 2737 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2612 2738
2613 pci_dev_put(dev->pci_dev); 2739 pci_dev_put(dev->pci_dev);
2740 put_device(dev->device);
2614 nvme_free_namespaces(dev); 2741 nvme_free_namespaces(dev);
2615 nvme_release_instance(dev); 2742 nvme_release_instance(dev);
2616 blk_mq_free_tag_set(&dev->tagset); 2743 blk_mq_free_tag_set(&dev->tagset);
@@ -2622,11 +2749,27 @@ static void nvme_free_dev(struct kref *kref)
2622 2749
2623static int nvme_dev_open(struct inode *inode, struct file *f) 2750static int nvme_dev_open(struct inode *inode, struct file *f)
2624{ 2751{
2625 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, 2752 struct nvme_dev *dev;
2626 miscdev); 2753 int instance = iminor(inode);
2627 kref_get(&dev->kref); 2754 int ret = -ENODEV;
2628 f->private_data = dev; 2755
2629 return 0; 2756 spin_lock(&dev_list_lock);
2757 list_for_each_entry(dev, &dev_list, node) {
2758 if (dev->instance == instance) {
2759 if (!dev->admin_q) {
2760 ret = -EWOULDBLOCK;
2761 break;
2762 }
2763 if (!kref_get_unless_zero(&dev->kref))
2764 break;
2765 f->private_data = dev;
2766 ret = 0;
2767 break;
2768 }
2769 }
2770 spin_unlock(&dev_list_lock);
2771
2772 return ret;
2630} 2773}
2631 2774
2632static int nvme_dev_release(struct inode *inode, struct file *f) 2775static int nvme_dev_release(struct inode *inode, struct file *f)
@@ -2768,7 +2911,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
2768 nvme_unfreeze_queues(dev); 2911 nvme_unfreeze_queues(dev);
2769 nvme_set_irq_hints(dev); 2912 nvme_set_irq_hints(dev);
2770 } 2913 }
2771 dev->initialized = 1;
2772 return 0; 2914 return 0;
2773} 2915}
2774 2916
@@ -2799,6 +2941,7 @@ static void nvme_reset_workfn(struct work_struct *work)
2799 dev->reset_workfn(work); 2941 dev->reset_workfn(work);
2800} 2942}
2801 2943
2944static void nvme_async_probe(struct work_struct *work);
2802static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2945static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2803{ 2946{
2804 int node, result = -ENOMEM; 2947 int node, result = -ENOMEM;
@@ -2834,37 +2977,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2834 goto release; 2977 goto release;
2835 2978
2836 kref_init(&dev->kref); 2979 kref_init(&dev->kref);
2837 result = nvme_dev_start(dev); 2980 dev->device = device_create(nvme_class, &pdev->dev,
2838 if (result) 2981 MKDEV(nvme_char_major, dev->instance),
2982 dev, "nvme%d", dev->instance);
2983 if (IS_ERR(dev->device)) {
2984 result = PTR_ERR(dev->device);
2839 goto release_pools; 2985 goto release_pools;
2986 }
2987 get_device(dev->device);
2840 2988
2841 if (dev->online_queues > 1) 2989 INIT_WORK(&dev->probe_work, nvme_async_probe);
2842 result = nvme_dev_add(dev); 2990 schedule_work(&dev->probe_work);
2843 if (result)
2844 goto shutdown;
2845
2846 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
2847 dev->miscdev.minor = MISC_DYNAMIC_MINOR;
2848 dev->miscdev.parent = &pdev->dev;
2849 dev->miscdev.name = dev->name;
2850 dev->miscdev.fops = &nvme_dev_fops;
2851 result = misc_register(&dev->miscdev);
2852 if (result)
2853 goto remove;
2854
2855 nvme_set_irq_hints(dev);
2856
2857 dev->initialized = 1;
2858 return 0; 2991 return 0;
2859 2992
2860 remove:
2861 nvme_dev_remove(dev);
2862 nvme_dev_remove_admin(dev);
2863 nvme_free_namespaces(dev);
2864 shutdown:
2865 nvme_dev_shutdown(dev);
2866 release_pools: 2993 release_pools:
2867 nvme_free_queues(dev, 0);
2868 nvme_release_prp_pools(dev); 2994 nvme_release_prp_pools(dev);
2869 release: 2995 release:
2870 nvme_release_instance(dev); 2996 nvme_release_instance(dev);
@@ -2877,6 +3003,29 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2877 return result; 3003 return result;
2878} 3004}
2879 3005
3006static void nvme_async_probe(struct work_struct *work)
3007{
3008 struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
3009 int result;
3010
3011 result = nvme_dev_start(dev);
3012 if (result)
3013 goto reset;
3014
3015 if (dev->online_queues > 1)
3016 result = nvme_dev_add(dev);
3017 if (result)
3018 goto reset;
3019
3020 nvme_set_irq_hints(dev);
3021 return;
3022 reset:
3023 if (!work_busy(&dev->reset_work)) {
3024 dev->reset_workfn = nvme_reset_failed_dev;
3025 queue_work(nvme_workq, &dev->reset_work);
3026 }
3027}
3028
2880static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 3029static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
2881{ 3030{
2882 struct nvme_dev *dev = pci_get_drvdata(pdev); 3031 struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -2902,11 +3051,12 @@ static void nvme_remove(struct pci_dev *pdev)
2902 spin_unlock(&dev_list_lock); 3051 spin_unlock(&dev_list_lock);
2903 3052
2904 pci_set_drvdata(pdev, NULL); 3053 pci_set_drvdata(pdev, NULL);
3054 flush_work(&dev->probe_work);
2905 flush_work(&dev->reset_work); 3055 flush_work(&dev->reset_work);
2906 misc_deregister(&dev->miscdev);
2907 nvme_dev_shutdown(dev); 3056 nvme_dev_shutdown(dev);
2908 nvme_dev_remove(dev); 3057 nvme_dev_remove(dev);
2909 nvme_dev_remove_admin(dev); 3058 nvme_dev_remove_admin(dev);
3059 device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
2910 nvme_free_queues(dev, 0); 3060 nvme_free_queues(dev, 0);
2911 nvme_release_prp_pools(dev); 3061 nvme_release_prp_pools(dev);
2912 kref_put(&dev->kref, nvme_free_dev); 3062 kref_put(&dev->kref, nvme_free_dev);
@@ -2990,11 +3140,26 @@ static int __init nvme_init(void)
2990 else if (result > 0) 3140 else if (result > 0)
2991 nvme_major = result; 3141 nvme_major = result;
2992 3142
3143 result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
3144 &nvme_dev_fops);
3145 if (result < 0)
3146 goto unregister_blkdev;
3147 else if (result > 0)
3148 nvme_char_major = result;
3149
3150 nvme_class = class_create(THIS_MODULE, "nvme");
3151 if (!nvme_class)
3152 goto unregister_chrdev;
3153
2993 result = pci_register_driver(&nvme_driver); 3154 result = pci_register_driver(&nvme_driver);
2994 if (result) 3155 if (result)
2995 goto unregister_blkdev; 3156 goto destroy_class;
2996 return 0; 3157 return 0;
2997 3158
3159 destroy_class:
3160 class_destroy(nvme_class);
3161 unregister_chrdev:
3162 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
2998 unregister_blkdev: 3163 unregister_blkdev:
2999 unregister_blkdev(nvme_major, "nvme"); 3164 unregister_blkdev(nvme_major, "nvme");
3000 kill_workq: 3165 kill_workq:
@@ -3005,9 +3170,10 @@ static int __init nvme_init(void)
3005static void __exit nvme_exit(void) 3170static void __exit nvme_exit(void)
3006{ 3171{
3007 pci_unregister_driver(&nvme_driver); 3172 pci_unregister_driver(&nvme_driver);
3008 unregister_hotcpu_notifier(&nvme_nb);
3009 unregister_blkdev(nvme_major, "nvme"); 3173 unregister_blkdev(nvme_major, "nvme");
3010 destroy_workqueue(nvme_workq); 3174 destroy_workqueue(nvme_workq);
3175 class_destroy(nvme_class);
3176 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
3011 BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); 3177 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
3012 _nvme_check_size(); 3178 _nvme_check_size();
3013} 3179}
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 5e78568026c3..e10196e0182d 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -779,10 +779,8 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
779 struct nvme_dev *dev = ns->dev; 779 struct nvme_dev *dev = ns->dev;
780 dma_addr_t dma_addr; 780 dma_addr_t dma_addr;
781 void *mem; 781 void *mem;
782 struct nvme_id_ctrl *id_ctrl;
783 int res = SNTI_TRANSLATION_SUCCESS; 782 int res = SNTI_TRANSLATION_SUCCESS;
784 int nvme_sc; 783 int nvme_sc;
785 u8 ieee[4];
786 int xfer_len; 784 int xfer_len;
787 __be32 tmp_id = cpu_to_be32(ns->ns_id); 785 __be32 tmp_id = cpu_to_be32(ns->ns_id);
788 786
@@ -793,46 +791,60 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
793 goto out_dma; 791 goto out_dma;
794 } 792 }
795 793
796 /* nvme controller identify */ 794 memset(inq_response, 0, alloc_len);
797 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
798 res = nvme_trans_status_code(hdr, nvme_sc);
799 if (res)
800 goto out_free;
801 if (nvme_sc) {
802 res = nvme_sc;
803 goto out_free;
804 }
805 id_ctrl = mem;
806
807 /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
808 ieee[0] = id_ctrl->ieee[0] << 4;
809 ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
810 ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
811 ieee[3] = id_ctrl->ieee[2] >> 4;
812
813 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
814 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ 795 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
815 inq_response[3] = 20; /* Page Length */ 796 if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
816 /* Designation Descriptor start */ 797 struct nvme_id_ns *id_ns = mem;
817 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ 798 void *eui = id_ns->eui64;
818 inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */ 799 int len = sizeof(id_ns->eui64);
819 inq_response[6] = 0x00; /* Rsvd */
820 inq_response[7] = 16; /* Designator Length */
821 /* Designator start */
822 inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
823 inq_response[9] = ieee[2]; /* IEEE ID */
824 inq_response[10] = ieee[1]; /* IEEE ID */
825 inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */
826 inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
827 inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
828 inq_response[14] = dev->serial[0];
829 inq_response[15] = dev->serial[1];
830 inq_response[16] = dev->model[0];
831 inq_response[17] = dev->model[1];
832 memcpy(&inq_response[18], &tmp_id, sizeof(u32));
833 /* Last 2 bytes are zero */
834 800
835 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 801 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
802 res = nvme_trans_status_code(hdr, nvme_sc);
803 if (res)
804 goto out_free;
805 if (nvme_sc) {
806 res = nvme_sc;
807 goto out_free;
808 }
809
810 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
811 if (bitmap_empty(eui, len * 8)) {
812 eui = id_ns->nguid;
813 len = sizeof(id_ns->nguid);
814 }
815 }
816 if (bitmap_empty(eui, len * 8))
817 goto scsi_string;
818
819 inq_response[3] = 4 + len; /* Page Length */
820 /* Designation Descriptor start */
821 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
822 inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */
823 inq_response[6] = 0x00; /* Rsvd */
824 inq_response[7] = len; /* Designator Length */
825 memcpy(&inq_response[8], eui, len);
826 } else {
827 scsi_string:
828 if (alloc_len < 72) {
829 res = nvme_trans_completion(hdr,
830 SAM_STAT_CHECK_CONDITION,
831 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
832 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
833 goto out_free;
834 }
835 inq_response[3] = 0x48; /* Page Length */
836 /* Designation Descriptor start */
837 inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */
838 inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */
839 inq_response[6] = 0x00; /* Rsvd */
840 inq_response[7] = 0x44; /* Designator Length */
841
842 sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
843 memcpy(&inq_response[12], dev->model, sizeof(dev->model));
844 sprintf(&inq_response[52], "%04x", tmp_id);
845 memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
846 }
847 xfer_len = alloc_len;
836 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 848 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
837 849
838 out_free: 850 out_free:
@@ -1600,7 +1612,7 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1600 /* 10 Byte CDB */ 1612 /* 10 Byte CDB */
1601 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + 1613 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1602 parm_list[MODE_SELECT_10_BD_OFFSET + 1]; 1614 parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1603 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] && 1615 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
1604 MODE_SELECT_10_LLBAA_MASK; 1616 MODE_SELECT_10_LLBAA_MASK;
1605 } else { 1617 } else {
1606 /* 6 Byte CDB */ 1618 /* 6 Byte CDB */
@@ -2222,7 +2234,7 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2222 page_code = GET_INQ_PAGE_CODE(cmd); 2234 page_code = GET_INQ_PAGE_CODE(cmd);
2223 alloc_len = GET_INQ_ALLOC_LENGTH(cmd); 2235 alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2224 2236
2225 inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL); 2237 inq_response = kmalloc(alloc_len, GFP_KERNEL);
2226 if (inq_response == NULL) { 2238 if (inq_response == NULL) {
2227 res = -ENOMEM; 2239 res = -ENOMEM;
2228 goto out_mem; 2240 goto out_mem;