summaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-11-01 17:42:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-11-01 17:42:49 -0400
commitb5b1de3537e2cd8f52971224a1be24bb3ce34a65 (patch)
treecd199730744628103c36b14c878b94af4fea4735 /drivers/vhost
parent90de1fb83e7c760aa403381f072486fc4e3e8b5f (diff)
parent79f800b2e76923cd8ce0aa659cb5c019d9643bc9 (diff)
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio/vhost updates from Michael Tsirkin: "Fixes and tweaks: - virtio balloon page hinting support - vhost scsi control queue - misc fixes" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: MAINTAINERS: remove reference to bogus vsock file vhost/scsi: Use common handling code in request queue handler vhost/scsi: Extract common handling code from control queue handler vhost/scsi: Respond to control queue operations vhost/scsi: truncate T10 PI iov_iter to prot_bytes virtio-balloon: VIRTIO_BALLOON_F_PAGE_POISON mm/page_poison: expose page_poisoning_enabled to kernel modules virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT kvm_config: add CONFIG_VIRTIO_MENU
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/scsi.c426
1 files changed, 329 insertions, 97 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c24bb690680b..50dffe83714c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -203,6 +203,19 @@ struct vhost_scsi {
203 int vs_events_nr; /* num of pending events, protected by vq->mutex */ 203 int vs_events_nr; /* num of pending events, protected by vq->mutex */
204}; 204};
205 205
206/*
207 * Context for processing request and control queue operations.
208 */
209struct vhost_scsi_ctx {
210 int head;
211 unsigned int out, in;
212 size_t req_size, rsp_size;
213 size_t out_size, in_size;
214 u8 *target, *lunp;
215 void *req;
216 struct iov_iter out_iter;
217};
218
206static struct workqueue_struct *vhost_scsi_workqueue; 219static struct workqueue_struct *vhost_scsi_workqueue;
207 220
208/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ 221/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -800,24 +813,120 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
800 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 813 pr_err("Faulted on virtio_scsi_cmd_resp\n");
801} 814}
802 815
816static int
817vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
818 struct vhost_scsi_ctx *vc)
819{
820 int ret = -ENXIO;
821
822 vc->head = vhost_get_vq_desc(vq, vq->iov,
823 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
824 NULL, NULL);
825
826 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
827 vc->head, vc->out, vc->in);
828
829 /* On error, stop handling until the next kick. */
830 if (unlikely(vc->head < 0))
831 goto done;
832
833 /* Nothing new? Wait for eventfd to tell us they refilled. */
834 if (vc->head == vq->num) {
835 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
836 vhost_disable_notify(&vs->dev, vq);
837 ret = -EAGAIN;
838 }
839 goto done;
840 }
841
842 /*
843 * Get the size of request and response buffers.
844 * FIXME: Not correct for BIDI operation
845 */
846 vc->out_size = iov_length(vq->iov, vc->out);
847 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
848
849 /*
850 * Copy over the virtio-scsi request header, which for a
851 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
852 * single iovec may contain both the header + outgoing
853 * WRITE payloads.
854 *
855 * copy_from_iter() will advance out_iter, so that it will
856 * point at the start of the outgoing WRITE payload, if
857 * DMA_TO_DEVICE is set.
858 */
859 iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
860 ret = 0;
861
862done:
863 return ret;
864}
865
866static int
867vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
868{
869 if (unlikely(vc->in_size < vc->rsp_size)) {
870 vq_err(vq,
871 "Response buf too small, need min %zu bytes got %zu",
872 vc->rsp_size, vc->in_size);
873 return -EINVAL;
874 } else if (unlikely(vc->out_size < vc->req_size)) {
875 vq_err(vq,
876 "Request buf too small, need min %zu bytes got %zu",
877 vc->req_size, vc->out_size);
878 return -EIO;
879 }
880
881 return 0;
882}
883
884static int
885vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
886 struct vhost_scsi_tpg **tpgp)
887{
888 int ret = -EIO;
889
890 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
891 &vc->out_iter))) {
892 vq_err(vq, "Faulted on copy_from_iter\n");
893 } else if (unlikely(*vc->lunp != 1)) {
894 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
895 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
896 } else {
897 struct vhost_scsi_tpg **vs_tpg, *tpg;
898
899 vs_tpg = vq->private_data; /* validated at handler entry */
900
901 tpg = READ_ONCE(vs_tpg[*vc->target]);
902 if (unlikely(!tpg)) {
903 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
904 } else {
905 if (tpgp)
906 *tpgp = tpg;
907 ret = 0;
908 }
909 }
910
911 return ret;
912}
913
803static void 914static void
804vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 915vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
805{ 916{
806 struct vhost_scsi_tpg **vs_tpg, *tpg; 917 struct vhost_scsi_tpg **vs_tpg, *tpg;
807 struct virtio_scsi_cmd_req v_req; 918 struct virtio_scsi_cmd_req v_req;
808 struct virtio_scsi_cmd_req_pi v_req_pi; 919 struct virtio_scsi_cmd_req_pi v_req_pi;
920 struct vhost_scsi_ctx vc;
809 struct vhost_scsi_cmd *cmd; 921 struct vhost_scsi_cmd *cmd;
810 struct iov_iter out_iter, in_iter, prot_iter, data_iter; 922 struct iov_iter in_iter, prot_iter, data_iter;
811 u64 tag; 923 u64 tag;
812 u32 exp_data_len, data_direction; 924 u32 exp_data_len, data_direction;
813 unsigned int out = 0, in = 0; 925 int ret, prot_bytes;
814 int head, ret, prot_bytes;
815 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
816 size_t out_size, in_size;
817 u16 lun; 926 u16 lun;
818 u8 *target, *lunp, task_attr; 927 u8 task_attr;
819 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); 928 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
820 void *req, *cdb; 929 void *cdb;
821 930
822 mutex_lock(&vq->mutex); 931 mutex_lock(&vq->mutex);
823 /* 932 /*
@@ -828,85 +937,47 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
828 if (!vs_tpg) 937 if (!vs_tpg)
829 goto out; 938 goto out;
830 939
940 memset(&vc, 0, sizeof(vc));
941 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
942
831 vhost_disable_notify(&vs->dev, vq); 943 vhost_disable_notify(&vs->dev, vq);
832 944
833 for (;;) { 945 for (;;) {
834 head = vhost_get_vq_desc(vq, vq->iov, 946 ret = vhost_scsi_get_desc(vs, vq, &vc);
835 ARRAY_SIZE(vq->iov), &out, &in, 947 if (ret)
836 NULL, NULL); 948 goto err;
837 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", 949
838 head, out, in);
839 /* On error, stop handling until the next kick. */
840 if (unlikely(head < 0))
841 break;
842 /* Nothing new? Wait for eventfd to tell us they refilled. */
843 if (head == vq->num) {
844 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
845 vhost_disable_notify(&vs->dev, vq);
846 continue;
847 }
848 break;
849 }
850 /*
851 * Check for a sane response buffer so we can report early
852 * errors back to the guest.
853 */
854 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
855 vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
856 " size, got %zu bytes\n", vq->iov[out].iov_len);
857 break;
858 }
859 /* 950 /*
860 * Setup pointers and values based upon different virtio-scsi 951 * Setup pointers and values based upon different virtio-scsi
861 * request header if T10_PI is enabled in KVM guest. 952 * request header if T10_PI is enabled in KVM guest.
862 */ 953 */
863 if (t10_pi) { 954 if (t10_pi) {
864 req = &v_req_pi; 955 vc.req = &v_req_pi;
865 req_size = sizeof(v_req_pi); 956 vc.req_size = sizeof(v_req_pi);
866 lunp = &v_req_pi.lun[0]; 957 vc.lunp = &v_req_pi.lun[0];
867 target = &v_req_pi.lun[1]; 958 vc.target = &v_req_pi.lun[1];
868 } else { 959 } else {
869 req = &v_req; 960 vc.req = &v_req;
870 req_size = sizeof(v_req); 961 vc.req_size = sizeof(v_req);
871 lunp = &v_req.lun[0]; 962 vc.lunp = &v_req.lun[0];
872 target = &v_req.lun[1]; 963 vc.target = &v_req.lun[1];
873 } 964 }
874 /*
875 * FIXME: Not correct for BIDI operation
876 */
877 out_size = iov_length(vq->iov, out);
878 in_size = iov_length(&vq->iov[out], in);
879 965
880 /* 966 /*
881 * Copy over the virtio-scsi request header, which for a 967 * Validate the size of request and response buffers.
882 * ANY_LAYOUT enabled guest may span multiple iovecs, or a 968 * Check for a sane response buffer so we can report
883 * single iovec may contain both the header + outgoing 969 * early errors back to the guest.
884 * WRITE payloads.
885 *
886 * copy_from_iter() will advance out_iter, so that it will
887 * point at the start of the outgoing WRITE payload, if
888 * DMA_TO_DEVICE is set.
889 */ 970 */
890 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); 971 ret = vhost_scsi_chk_size(vq, &vc);
972 if (ret)
973 goto err;
891 974
892 if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) { 975 ret = vhost_scsi_get_req(vq, &vc, &tpg);
893 vq_err(vq, "Faulted on copy_from_iter\n"); 976 if (ret)
894 vhost_scsi_send_bad_target(vs, vq, head, out); 977 goto err;
895 continue; 978
896 } 979 ret = -EIO; /* bad target on any error from here on */
897 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
898 if (unlikely(*lunp != 1)) {
899 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
900 vhost_scsi_send_bad_target(vs, vq, head, out);
901 continue;
902 }
903 980
904 tpg = READ_ONCE(vs_tpg[*target]);
905 if (unlikely(!tpg)) {
906 /* Target does not exist, fail the request */
907 vhost_scsi_send_bad_target(vs, vq, head, out);
908 continue;
909 }
910 /* 981 /*
911 * Determine data_direction by calculating the total outgoing 982 * Determine data_direction by calculating the total outgoing
912 * iovec sizes + incoming iovec sizes vs. virtio-scsi request + 983 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
@@ -924,17 +995,17 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
924 */ 995 */
925 prot_bytes = 0; 996 prot_bytes = 0;
926 997
927 if (out_size > req_size) { 998 if (vc.out_size > vc.req_size) {
928 data_direction = DMA_TO_DEVICE; 999 data_direction = DMA_TO_DEVICE;
929 exp_data_len = out_size - req_size; 1000 exp_data_len = vc.out_size - vc.req_size;
930 data_iter = out_iter; 1001 data_iter = vc.out_iter;
931 } else if (in_size > rsp_size) { 1002 } else if (vc.in_size > vc.rsp_size) {
932 data_direction = DMA_FROM_DEVICE; 1003 data_direction = DMA_FROM_DEVICE;
933 exp_data_len = in_size - rsp_size; 1004 exp_data_len = vc.in_size - vc.rsp_size;
934 1005
935 iov_iter_init(&in_iter, READ, &vq->iov[out], in, 1006 iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
936 rsp_size + exp_data_len); 1007 vc.rsp_size + exp_data_len);
937 iov_iter_advance(&in_iter, rsp_size); 1008 iov_iter_advance(&in_iter, vc.rsp_size);
938 data_iter = in_iter; 1009 data_iter = in_iter;
939 } else { 1010 } else {
940 data_direction = DMA_NONE; 1011 data_direction = DMA_NONE;
@@ -950,21 +1021,20 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
950 if (data_direction != DMA_TO_DEVICE) { 1021 if (data_direction != DMA_TO_DEVICE) {
951 vq_err(vq, "Received non zero pi_bytesout," 1022 vq_err(vq, "Received non zero pi_bytesout,"
952 " but wrong data_direction\n"); 1023 " but wrong data_direction\n");
953 vhost_scsi_send_bad_target(vs, vq, head, out); 1024 goto err;
954 continue;
955 } 1025 }
956 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); 1026 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
957 } else if (v_req_pi.pi_bytesin) { 1027 } else if (v_req_pi.pi_bytesin) {
958 if (data_direction != DMA_FROM_DEVICE) { 1028 if (data_direction != DMA_FROM_DEVICE) {
959 vq_err(vq, "Received non zero pi_bytesin," 1029 vq_err(vq, "Received non zero pi_bytesin,"
960 " but wrong data_direction\n"); 1030 " but wrong data_direction\n");
961 vhost_scsi_send_bad_target(vs, vq, head, out); 1031 goto err;
962 continue;
963 } 1032 }
964 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); 1033 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
965 } 1034 }
966 /* 1035 /*
967 * Set prot_iter to data_iter, and advance past any 1036 * Set prot_iter to data_iter and truncate it to
1037 * prot_bytes, and advance data_iter past any
968 * preceeding prot_bytes that may be present. 1038 * preceeding prot_bytes that may be present.
969 * 1039 *
970 * Also fix up the exp_data_len to reflect only the 1040 * Also fix up the exp_data_len to reflect only the
@@ -973,6 +1043,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
973 if (prot_bytes) { 1043 if (prot_bytes) {
974 exp_data_len -= prot_bytes; 1044 exp_data_len -= prot_bytes;
975 prot_iter = data_iter; 1045 prot_iter = data_iter;
1046 iov_iter_truncate(&prot_iter, prot_bytes);
976 iov_iter_advance(&data_iter, prot_bytes); 1047 iov_iter_advance(&data_iter, prot_bytes);
977 } 1048 }
978 tag = vhost64_to_cpu(vq, v_req_pi.tag); 1049 tag = vhost64_to_cpu(vq, v_req_pi.tag);
@@ -996,8 +1067,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
996 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1067 vq_err(vq, "Received SCSI CDB with command_size: %d that"
997 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1068 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
998 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); 1069 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
999 vhost_scsi_send_bad_target(vs, vq, head, out); 1070 goto err;
1000 continue;
1001 } 1071 }
1002 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, 1072 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1003 exp_data_len + prot_bytes, 1073 exp_data_len + prot_bytes,
@@ -1005,13 +1075,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1005 if (IS_ERR(cmd)) { 1075 if (IS_ERR(cmd)) {
1006 vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1076 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1007 PTR_ERR(cmd)); 1077 PTR_ERR(cmd));
1008 vhost_scsi_send_bad_target(vs, vq, head, out); 1078 goto err;
1009 continue;
1010 } 1079 }
1011 cmd->tvc_vhost = vs; 1080 cmd->tvc_vhost = vs;
1012 cmd->tvc_vq = vq; 1081 cmd->tvc_vq = vq;
1013 cmd->tvc_resp_iov = vq->iov[out]; 1082 cmd->tvc_resp_iov = vq->iov[vc.out];
1014 cmd->tvc_in_iovs = in; 1083 cmd->tvc_in_iovs = vc.in;
1015 1084
1016 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1085 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1017 cmd->tvc_cdb[0], cmd->tvc_lun); 1086 cmd->tvc_cdb[0], cmd->tvc_lun);
@@ -1019,14 +1088,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1019 " %d\n", cmd, exp_data_len, prot_bytes, data_direction); 1088 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1020 1089
1021 if (data_direction != DMA_NONE) { 1090 if (data_direction != DMA_NONE) {
1022 ret = vhost_scsi_mapal(cmd, 1091 if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1023 prot_bytes, &prot_iter, 1092 &prot_iter, exp_data_len,
1024 exp_data_len, &data_iter); 1093 &data_iter))) {
1025 if (unlikely(ret)) {
1026 vq_err(vq, "Failed to map iov to sgl\n"); 1094 vq_err(vq, "Failed to map iov to sgl\n");
1027 vhost_scsi_release_cmd(&cmd->tvc_se_cmd); 1095 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1028 vhost_scsi_send_bad_target(vs, vq, head, out); 1096 goto err;
1029 continue;
1030 } 1097 }
1031 } 1098 }
1032 /* 1099 /*
@@ -1034,7 +1101,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1034 * complete the virtio-scsi request in TCM callback context via 1101 * complete the virtio-scsi request in TCM callback context via
1035 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() 1102 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1036 */ 1103 */
1037 cmd->tvc_vq_desc = head; 1104 cmd->tvc_vq_desc = vc.head;
1038 /* 1105 /*
1039 * Dispatch cmd descriptor for cmwq execution in process 1106 * Dispatch cmd descriptor for cmwq execution in process
1040 * context provided by vhost_scsi_workqueue. This also ensures 1107 * context provided by vhost_scsi_workqueue. This also ensures
@@ -1043,6 +1110,166 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1043 */ 1110 */
1044 INIT_WORK(&cmd->work, vhost_scsi_submission_work); 1111 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1045 queue_work(vhost_scsi_workqueue, &cmd->work); 1112 queue_work(vhost_scsi_workqueue, &cmd->work);
1113 ret = 0;
1114err:
1115 /*
1116 * ENXIO: No more requests, or read error, wait for next kick
1117 * EINVAL: Invalid response buffer, drop the request
1118 * EIO: Respond with bad target
1119 * EAGAIN: Pending request
1120 */
1121 if (ret == -ENXIO)
1122 break;
1123 else if (ret == -EIO)
1124 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1125 }
1126out:
1127 mutex_unlock(&vq->mutex);
1128}
1129
1130static void
1131vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1132 struct vhost_virtqueue *vq,
1133 struct vhost_scsi_ctx *vc)
1134{
1135 struct virtio_scsi_ctrl_tmf_resp __user *resp;
1136 struct virtio_scsi_ctrl_tmf_resp rsp;
1137 int ret;
1138
1139 pr_debug("%s\n", __func__);
1140 memset(&rsp, 0, sizeof(rsp));
1141 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1142 resp = vq->iov[vc->out].iov_base;
1143 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
1144 if (!ret)
1145 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1146 else
1147 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1148}
1149
1150static void
1151vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1152 struct vhost_virtqueue *vq,
1153 struct vhost_scsi_ctx *vc)
1154{
1155 struct virtio_scsi_ctrl_an_resp __user *resp;
1156 struct virtio_scsi_ctrl_an_resp rsp;
1157 int ret;
1158
1159 pr_debug("%s\n", __func__);
1160 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1161 rsp.response = VIRTIO_SCSI_S_OK;
1162 resp = vq->iov[vc->out].iov_base;
1163 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
1164 if (!ret)
1165 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1166 else
1167 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1168}
1169
1170static void
1171vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1172{
1173 union {
1174 __virtio32 type;
1175 struct virtio_scsi_ctrl_an_req an;
1176 struct virtio_scsi_ctrl_tmf_req tmf;
1177 } v_req;
1178 struct vhost_scsi_ctx vc;
1179 size_t typ_size;
1180 int ret;
1181
1182 mutex_lock(&vq->mutex);
1183 /*
1184 * We can handle the vq only after the endpoint is setup by calling the
1185 * VHOST_SCSI_SET_ENDPOINT ioctl.
1186 */
1187 if (!vq->private_data)
1188 goto out;
1189
1190 memset(&vc, 0, sizeof(vc));
1191
1192 vhost_disable_notify(&vs->dev, vq);
1193
1194 for (;;) {
1195 ret = vhost_scsi_get_desc(vs, vq, &vc);
1196 if (ret)
1197 goto err;
1198
1199 /*
1200 * Get the request type first in order to setup
1201 * other parameters dependent on the type.
1202 */
1203 vc.req = &v_req.type;
1204 typ_size = sizeof(v_req.type);
1205
1206 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1207 &vc.out_iter))) {
1208 vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1209 /*
1210 * The size of the response buffer depends on the
1211 * request type and must be validated against it.
1212 * Since the request type is not known, don't send
1213 * a response.
1214 */
1215 continue;
1216 }
1217
1218 switch (v_req.type) {
1219 case VIRTIO_SCSI_T_TMF:
1220 vc.req = &v_req.tmf;
1221 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1222 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1223 vc.lunp = &v_req.tmf.lun[0];
1224 vc.target = &v_req.tmf.lun[1];
1225 break;
1226 case VIRTIO_SCSI_T_AN_QUERY:
1227 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1228 vc.req = &v_req.an;
1229 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1230 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1231 vc.lunp = &v_req.an.lun[0];
1232 vc.target = NULL;
1233 break;
1234 default:
1235 vq_err(vq, "Unknown control request %d", v_req.type);
1236 continue;
1237 }
1238
1239 /*
1240 * Validate the size of request and response buffers.
1241 * Check for a sane response buffer so we can report
1242 * early errors back to the guest.
1243 */
1244 ret = vhost_scsi_chk_size(vq, &vc);
1245 if (ret)
1246 goto err;
1247
1248 /*
1249 * Get the rest of the request now that its size is known.
1250 */
1251 vc.req += typ_size;
1252 vc.req_size -= typ_size;
1253
1254 ret = vhost_scsi_get_req(vq, &vc, NULL);
1255 if (ret)
1256 goto err;
1257
1258 if (v_req.type == VIRTIO_SCSI_T_TMF)
1259 vhost_scsi_send_tmf_reject(vs, vq, &vc);
1260 else
1261 vhost_scsi_send_an_resp(vs, vq, &vc);
1262err:
1263 /*
1264 * ENXIO: No more requests, or read error, wait for next kick
1265 * EINVAL: Invalid response buffer, drop the request
1266 * EIO: Respond with bad target
1267 * EAGAIN: Pending request
1268 */
1269 if (ret == -ENXIO)
1270 break;
1271 else if (ret == -EIO)
1272 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1046 } 1273 }
1047out: 1274out:
1048 mutex_unlock(&vq->mutex); 1275 mutex_unlock(&vq->mutex);
@@ -1050,7 +1277,12 @@ out:
1050 1277
1051static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 1278static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1052{ 1279{
1280 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1281 poll.work);
1282 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1283
1053 pr_debug("%s: The handling func for control queue.\n", __func__); 1284 pr_debug("%s: The handling func for control queue.\n", __func__);
1285 vhost_scsi_ctl_handle_vq(vs, vq);
1054} 1286}
1055 1287
1056static void 1288static void