diff options
author | Jens Axboe <axboe@kernel.dk> | 2017-09-01 15:52:37 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-09-01 15:52:37 -0400 |
commit | a254d989f31df97be9ee07a13b9b20087d0d3106 (patch) | |
tree | 3c7b93f881bd4f93997f67826b79e1e5d23c6b93 | |
parent | 40326d8a33d5b70039849d233975b63c733d94a2 (diff) | |
parent | 40a5fce495715c48c2e02668144e68a507ac5a30 (diff) |
Merge branch 'nvme-4.14' of git://git.infradead.org/nvme into for-4.14/block-postmerge
Pull NVMe updates from Christoph:
"A few more nvme updates for 4.14:
- generate a correct default NQN (Daniel Verkamp)
- metadata passthrough for the NVME_IOCTL_IO_CMD ioctl, as well as
related fixes and cleanups (Keith)
- better scalability for connecting to the NVMeOF target (Roland Dreier)
- target support for reading the host identifier (Omri Mann)"
-rw-r--r-- | drivers/nvme/host/core.c | 97 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.c | 22 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 7 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 4 | ||||
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 17 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 1 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 1 |
7 files changed, 77 insertions, 72 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index b0dd58db110e..277a7a02cba5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -600,10 +600,44 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
600 | } | 600 | } |
601 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); | 601 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); |
602 | 602 | ||
603 | int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | 603 | static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, |
604 | void __user *ubuffer, unsigned bufflen, | 604 | unsigned len, u32 seed, bool write) |
605 | void __user *meta_buffer, unsigned meta_len, u32 meta_seed, | 605 | { |
606 | u32 *result, unsigned timeout) | 606 | struct bio_integrity_payload *bip; |
607 | int ret = -ENOMEM; | ||
608 | void *buf; | ||
609 | |||
610 | buf = kmalloc(len, GFP_KERNEL); | ||
611 | if (!buf) | ||
612 | goto out; | ||
613 | |||
614 | ret = -EFAULT; | ||
615 | if (write && copy_from_user(buf, ubuf, len)) | ||
616 | goto out_free_meta; | ||
617 | |||
618 | bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); | ||
619 | if (IS_ERR(bip)) { | ||
620 | ret = PTR_ERR(bip); | ||
621 | goto out_free_meta; | ||
622 | } | ||
623 | |||
624 | bip->bip_iter.bi_size = len; | ||
625 | bip->bip_iter.bi_sector = seed; | ||
626 | ret = bio_integrity_add_page(bio, virt_to_page(buf), len, | ||
627 | offset_in_page(buf)); | ||
628 | if (ret == len) | ||
629 | return buf; | ||
630 | ret = -ENOMEM; | ||
631 | out_free_meta: | ||
632 | kfree(buf); | ||
633 | out: | ||
634 | return ERR_PTR(ret); | ||
635 | } | ||
636 | |||
637 | static int nvme_submit_user_cmd(struct request_queue *q, | ||
638 | struct nvme_command *cmd, void __user *ubuffer, | ||
639 | unsigned bufflen, void __user *meta_buffer, unsigned meta_len, | ||
640 | u32 meta_seed, u32 *result, unsigned timeout) | ||
607 | { | 641 | { |
608 | bool write = nvme_is_write(cmd); | 642 | bool write = nvme_is_write(cmd); |
609 | struct nvme_ns *ns = q->queuedata; | 643 | struct nvme_ns *ns = q->queuedata; |
@@ -625,46 +659,17 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
625 | if (ret) | 659 | if (ret) |
626 | goto out; | 660 | goto out; |
627 | bio = req->bio; | 661 | bio = req->bio; |
628 | |||
629 | if (!disk) | ||
630 | goto submit; | ||
631 | bio->bi_disk = disk; | 662 | bio->bi_disk = disk; |
632 | 663 | if (disk && meta_buffer && meta_len) { | |
633 | if (meta_buffer && meta_len) { | 664 | meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, |
634 | struct bio_integrity_payload *bip; | 665 | meta_seed, write); |
635 | 666 | if (IS_ERR(meta)) { | |
636 | meta = kmalloc(meta_len, GFP_KERNEL); | 667 | ret = PTR_ERR(meta); |
637 | if (!meta) { | ||
638 | ret = -ENOMEM; | ||
639 | goto out_unmap; | 668 | goto out_unmap; |
640 | } | 669 | } |
641 | |||
642 | if (write) { | ||
643 | if (copy_from_user(meta, meta_buffer, | ||
644 | meta_len)) { | ||
645 | ret = -EFAULT; | ||
646 | goto out_free_meta; | ||
647 | } | ||
648 | } | ||
649 | |||
650 | bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); | ||
651 | if (IS_ERR(bip)) { | ||
652 | ret = PTR_ERR(bip); | ||
653 | goto out_free_meta; | ||
654 | } | ||
655 | |||
656 | bip->bip_iter.bi_size = meta_len; | ||
657 | bip->bip_iter.bi_sector = meta_seed; | ||
658 | |||
659 | ret = bio_integrity_add_page(bio, virt_to_page(meta), | ||
660 | meta_len, offset_in_page(meta)); | ||
661 | if (ret != meta_len) { | ||
662 | ret = -ENOMEM; | ||
663 | goto out_free_meta; | ||
664 | } | ||
665 | } | 670 | } |
666 | } | 671 | } |
667 | submit: | 672 | |
668 | blk_execute_rq(req->q, disk, req, 0); | 673 | blk_execute_rq(req->q, disk, req, 0); |
669 | if (nvme_req(req)->flags & NVME_REQ_CANCELLED) | 674 | if (nvme_req(req)->flags & NVME_REQ_CANCELLED) |
670 | ret = -EINTR; | 675 | ret = -EINTR; |
@@ -676,7 +681,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
676 | if (copy_to_user(meta_buffer, meta, meta_len)) | 681 | if (copy_to_user(meta_buffer, meta, meta_len)) |
677 | ret = -EFAULT; | 682 | ret = -EFAULT; |
678 | } | 683 | } |
679 | out_free_meta: | ||
680 | kfree(meta); | 684 | kfree(meta); |
681 | out_unmap: | 685 | out_unmap: |
682 | if (bio) | 686 | if (bio) |
@@ -686,14 +690,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
686 | return ret; | 690 | return ret; |
687 | } | 691 | } |
688 | 692 | ||
689 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | ||
690 | void __user *ubuffer, unsigned bufflen, u32 *result, | ||
691 | unsigned timeout) | ||
692 | { | ||
693 | return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0, | ||
694 | result, timeout); | ||
695 | } | ||
696 | |||
697 | static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) | 693 | static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) |
698 | { | 694 | { |
699 | struct nvme_ctrl *ctrl = rq->end_io_data; | 695 | struct nvme_ctrl *ctrl = rq->end_io_data; |
@@ -983,7 +979,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
983 | c.rw.apptag = cpu_to_le16(io.apptag); | 979 | c.rw.apptag = cpu_to_le16(io.apptag); |
984 | c.rw.appmask = cpu_to_le16(io.appmask); | 980 | c.rw.appmask = cpu_to_le16(io.appmask); |
985 | 981 | ||
986 | return __nvme_submit_user_cmd(ns->queue, &c, | 982 | return nvme_submit_user_cmd(ns->queue, &c, |
987 | (void __user *)(uintptr_t)io.addr, length, | 983 | (void __user *)(uintptr_t)io.addr, length, |
988 | metadata, meta_len, io.slba, NULL, 0); | 984 | metadata, meta_len, io.slba, NULL, 0); |
989 | } | 985 | } |
@@ -1021,7 +1017,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | |||
1021 | 1017 | ||
1022 | status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, | 1018 | status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, |
1023 | (void __user *)(uintptr_t)cmd.addr, cmd.data_len, | 1019 | (void __user *)(uintptr_t)cmd.addr, cmd.data_len, |
1024 | &cmd.result, timeout); | 1020 | (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, |
1021 | 0, &cmd.result, timeout); | ||
1025 | if (status >= 0) { | 1022 | if (status >= 0) { |
1026 | if (put_user(cmd.result, &ucmd->result)) | 1023 | if (put_user(cmd.result, &ucmd->result)) |
1027 | return -EFAULT; | 1024 | return -EFAULT; |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index fc3b6552f467..47307752dc65 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include "fabrics.h" | 22 | #include "fabrics.h" |
23 | 23 | ||
24 | static LIST_HEAD(nvmf_transports); | 24 | static LIST_HEAD(nvmf_transports); |
25 | static DEFINE_MUTEX(nvmf_transports_mutex); | 25 | static DECLARE_RWSEM(nvmf_transports_rwsem); |
26 | 26 | ||
27 | static LIST_HEAD(nvmf_hosts); | 27 | static LIST_HEAD(nvmf_hosts); |
28 | static DEFINE_MUTEX(nvmf_hosts_mutex); | 28 | static DEFINE_MUTEX(nvmf_hosts_mutex); |
@@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void) | |||
75 | 75 | ||
76 | kref_init(&host->ref); | 76 | kref_init(&host->ref); |
77 | snprintf(host->nqn, NVMF_NQN_SIZE, | 77 | snprintf(host->nqn, NVMF_NQN_SIZE, |
78 | "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id); | 78 | "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); |
79 | 79 | ||
80 | mutex_lock(&nvmf_hosts_mutex); | 80 | mutex_lock(&nvmf_hosts_mutex); |
81 | list_add_tail(&host->list, &nvmf_hosts); | 81 | list_add_tail(&host->list, &nvmf_hosts); |
@@ -495,9 +495,9 @@ int nvmf_register_transport(struct nvmf_transport_ops *ops) | |||
495 | if (!ops->create_ctrl) | 495 | if (!ops->create_ctrl) |
496 | return -EINVAL; | 496 | return -EINVAL; |
497 | 497 | ||
498 | mutex_lock(&nvmf_transports_mutex); | 498 | down_write(&nvmf_transports_rwsem); |
499 | list_add_tail(&ops->entry, &nvmf_transports); | 499 | list_add_tail(&ops->entry, &nvmf_transports); |
500 | mutex_unlock(&nvmf_transports_mutex); | 500 | up_write(&nvmf_transports_rwsem); |
501 | 501 | ||
502 | return 0; | 502 | return 0; |
503 | } | 503 | } |
@@ -514,9 +514,9 @@ EXPORT_SYMBOL_GPL(nvmf_register_transport); | |||
514 | */ | 514 | */ |
515 | void nvmf_unregister_transport(struct nvmf_transport_ops *ops) | 515 | void nvmf_unregister_transport(struct nvmf_transport_ops *ops) |
516 | { | 516 | { |
517 | mutex_lock(&nvmf_transports_mutex); | 517 | down_write(&nvmf_transports_rwsem); |
518 | list_del(&ops->entry); | 518 | list_del(&ops->entry); |
519 | mutex_unlock(&nvmf_transports_mutex); | 519 | up_write(&nvmf_transports_rwsem); |
520 | } | 520 | } |
521 | EXPORT_SYMBOL_GPL(nvmf_unregister_transport); | 521 | EXPORT_SYMBOL_GPL(nvmf_unregister_transport); |
522 | 522 | ||
@@ -525,7 +525,7 @@ static struct nvmf_transport_ops *nvmf_lookup_transport( | |||
525 | { | 525 | { |
526 | struct nvmf_transport_ops *ops; | 526 | struct nvmf_transport_ops *ops; |
527 | 527 | ||
528 | lockdep_assert_held(&nvmf_transports_mutex); | 528 | lockdep_assert_held(&nvmf_transports_rwsem); |
529 | 529 | ||
530 | list_for_each_entry(ops, &nvmf_transports, entry) { | 530 | list_for_each_entry(ops, &nvmf_transports, entry) { |
531 | if (strcmp(ops->name, opts->transport) == 0) | 531 | if (strcmp(ops->name, opts->transport) == 0) |
@@ -851,7 +851,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) | |||
851 | goto out_free_opts; | 851 | goto out_free_opts; |
852 | opts->mask &= ~NVMF_REQUIRED_OPTS; | 852 | opts->mask &= ~NVMF_REQUIRED_OPTS; |
853 | 853 | ||
854 | mutex_lock(&nvmf_transports_mutex); | 854 | down_read(&nvmf_transports_rwsem); |
855 | ops = nvmf_lookup_transport(opts); | 855 | ops = nvmf_lookup_transport(opts); |
856 | if (!ops) { | 856 | if (!ops) { |
857 | pr_info("no handler found for transport %s.\n", | 857 | pr_info("no handler found for transport %s.\n", |
@@ -878,16 +878,16 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) | |||
878 | dev_warn(ctrl->device, | 878 | dev_warn(ctrl->device, |
879 | "controller returned incorrect NQN: \"%s\".\n", | 879 | "controller returned incorrect NQN: \"%s\".\n", |
880 | ctrl->subnqn); | 880 | ctrl->subnqn); |
881 | mutex_unlock(&nvmf_transports_mutex); | 881 | up_read(&nvmf_transports_rwsem); |
882 | ctrl->ops->delete_ctrl(ctrl); | 882 | ctrl->ops->delete_ctrl(ctrl); |
883 | return ERR_PTR(-EINVAL); | 883 | return ERR_PTR(-EINVAL); |
884 | } | 884 | } |
885 | 885 | ||
886 | mutex_unlock(&nvmf_transports_mutex); | 886 | up_read(&nvmf_transports_rwsem); |
887 | return ctrl; | 887 | return ctrl; |
888 | 888 | ||
889 | out_unlock: | 889 | out_unlock: |
890 | mutex_unlock(&nvmf_transports_mutex); | 890 | up_read(&nvmf_transports_rwsem); |
891 | out_free_opts: | 891 | out_free_opts: |
892 | nvmf_free_options(opts); | 892 | nvmf_free_options(opts); |
893 | return ERR_PTR(ret); | 893 | return ERR_PTR(ret); |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 936c4056d98e..a19a587d60ed 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -314,13 +314,6 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
314 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 314 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
315 | union nvme_result *result, void *buffer, unsigned bufflen, | 315 | union nvme_result *result, void *buffer, unsigned bufflen, |
316 | unsigned timeout, int qid, int at_head, int flags); | 316 | unsigned timeout, int qid, int at_head, int flags); |
317 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | ||
318 | void __user *ubuffer, unsigned bufflen, u32 *result, | ||
319 | unsigned timeout); | ||
320 | int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | ||
321 | void __user *ubuffer, unsigned bufflen, | ||
322 | void __user *meta_buffer, unsigned meta_len, u32 meta_seed, | ||
323 | u32 *result, unsigned timeout); | ||
324 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); | 317 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); |
325 | void nvme_start_keep_alive(struct nvme_ctrl *ctrl); | 318 | void nvme_start_keep_alive(struct nvme_ctrl *ctrl); |
326 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); | 319 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 544805a2421b..11874afb2422 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -668,7 +668,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, | |||
668 | if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) | 668 | if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) |
669 | goto out_unmap; | 669 | goto out_unmap; |
670 | 670 | ||
671 | if (rq_data_dir(req)) | 671 | if (req_op(req) == REQ_OP_WRITE) |
672 | nvme_dif_remap(req, nvme_dif_prep); | 672 | nvme_dif_remap(req, nvme_dif_prep); |
673 | 673 | ||
674 | if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) | 674 | if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) |
@@ -696,7 +696,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) | |||
696 | if (iod->nents) { | 696 | if (iod->nents) { |
697 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); | 697 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); |
698 | if (blk_integrity_rq(req)) { | 698 | if (blk_integrity_rq(req)) { |
699 | if (!rq_data_dir(req)) | 699 | if (req_op(req) == REQ_OP_READ) |
700 | nvme_dif_remap(req, nvme_dif_complete); | 700 | nvme_dif_remap(req, nvme_dif_complete); |
701 | dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); | 701 | dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); |
702 | } | 702 | } |
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 9496c71d2257..c4a0bf36e752 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c | |||
@@ -443,7 +443,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req) | |||
443 | u32 val32; | 443 | u32 val32; |
444 | u16 status = 0; | 444 | u16 status = 0; |
445 | 445 | ||
446 | switch (cdw10 & 0xf) { | 446 | switch (cdw10 & 0xff) { |
447 | case NVME_FEAT_NUM_QUEUES: | 447 | case NVME_FEAT_NUM_QUEUES: |
448 | nvmet_set_result(req, | 448 | nvmet_set_result(req, |
449 | (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); | 449 | (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); |
@@ -453,6 +453,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req) | |||
453 | req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); | 453 | req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); |
454 | nvmet_set_result(req, req->sq->ctrl->kato); | 454 | nvmet_set_result(req, req->sq->ctrl->kato); |
455 | break; | 455 | break; |
456 | case NVME_FEAT_HOST_ID: | ||
457 | status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; | ||
458 | break; | ||
456 | default: | 459 | default: |
457 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | 460 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
458 | break; | 461 | break; |
@@ -467,7 +470,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req) | |||
467 | u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); | 470 | u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); |
468 | u16 status = 0; | 471 | u16 status = 0; |
469 | 472 | ||
470 | switch (cdw10 & 0xf) { | 473 | switch (cdw10 & 0xff) { |
471 | /* | 474 | /* |
472 | * These features are mandatory in the spec, but we don't | 475 | * These features are mandatory in the spec, but we don't |
473 | * have a useful way to implement them. We'll eventually | 476 | * have a useful way to implement them. We'll eventually |
@@ -501,6 +504,16 @@ static void nvmet_execute_get_features(struct nvmet_req *req) | |||
501 | case NVME_FEAT_KATO: | 504 | case NVME_FEAT_KATO: |
502 | nvmet_set_result(req, req->sq->ctrl->kato * 1000); | 505 | nvmet_set_result(req, req->sq->ctrl->kato * 1000); |
503 | break; | 506 | break; |
507 | case NVME_FEAT_HOST_ID: | ||
508 | /* need 128-bit host identifier flag */ | ||
509 | if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) { | ||
510 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | ||
511 | break; | ||
512 | } | ||
513 | |||
514 | status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, | ||
515 | sizeof(req->sq->ctrl->hostid)); | ||
516 | break; | ||
504 | default: | 517 | default: |
505 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | 518 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
506 | break; | 519 | break; |
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 3cc17269504b..859a66725291 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c | |||
@@ -154,6 +154,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) | |||
154 | le32_to_cpu(c->kato), &ctrl); | 154 | le32_to_cpu(c->kato), &ctrl); |
155 | if (status) | 155 | if (status) |
156 | goto out; | 156 | goto out; |
157 | uuid_copy(&ctrl->hostid, &d->hostid); | ||
157 | 158 | ||
158 | status = nvmet_install_queue(ctrl, req); | 159 | status = nvmet_install_queue(ctrl, req); |
159 | if (status) { | 160 | if (status) { |
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index e3b244c7e443..7d261ab894f4 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
@@ -115,6 +115,7 @@ struct nvmet_ctrl { | |||
115 | u32 cc; | 115 | u32 cc; |
116 | u32 csts; | 116 | u32 csts; |
117 | 117 | ||
118 | uuid_t hostid; | ||
118 | u16 cntlid; | 119 | u16 cntlid; |
119 | u32 kato; | 120 | u32 kato; |
120 | 121 | ||