aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/scsi.c
diff options
context:
space:
mode:
authorBijan Mottahedeh <bijan.mottahedeh@oracle.com>2018-09-17 20:09:49 -0400
committerMichael S. Tsirkin <mst@redhat.com>2018-10-24 21:16:13 -0400
commit09d7583294aada625349b6f80f1e1c730b5a5208 (patch)
treed3aba1d9ce303a2153f57d13ab305746edcb8ab9 /drivers/vhost/scsi.c
parent3f8ca2e115e55af4c15d97dda635e948d2e380be (diff)
vhost/scsi: Use common handling code in request queue handler
Change the request queue handler to use common handling routines same as the control queue handler. Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@oracle.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost/scsi.c')
-rw-r--r--drivers/vhost/scsi.c361
1 files changed, 164 insertions, 197 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 4cd03a1d7f21..50dffe83714c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -813,24 +813,120 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
813 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 813 pr_err("Faulted on virtio_scsi_cmd_resp\n");
814} 814}
815 815
816static int
817vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
818 struct vhost_scsi_ctx *vc)
819{
820 int ret = -ENXIO;
821
822 vc->head = vhost_get_vq_desc(vq, vq->iov,
823 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
824 NULL, NULL);
825
826 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
827 vc->head, vc->out, vc->in);
828
829 /* On error, stop handling until the next kick. */
830 if (unlikely(vc->head < 0))
831 goto done;
832
833 /* Nothing new? Wait for eventfd to tell us they refilled. */
834 if (vc->head == vq->num) {
835 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
836 vhost_disable_notify(&vs->dev, vq);
837 ret = -EAGAIN;
838 }
839 goto done;
840 }
841
842 /*
843 * Get the size of request and response buffers.
844 * FIXME: Not correct for BIDI operation
845 */
846 vc->out_size = iov_length(vq->iov, vc->out);
847 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
848
849 /*
850 * Copy over the virtio-scsi request header, which for a
851 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
852 * single iovec may contain both the header + outgoing
853 * WRITE payloads.
854 *
855 * copy_from_iter() will advance out_iter, so that it will
856 * point at the start of the outgoing WRITE payload, if
857 * DMA_TO_DEVICE is set.
858 */
859 iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
860 ret = 0;
861
862done:
863 return ret;
864}
865
866static int
867vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
868{
869 if (unlikely(vc->in_size < vc->rsp_size)) {
870 vq_err(vq,
871 "Response buf too small, need min %zu bytes got %zu",
872 vc->rsp_size, vc->in_size);
873 return -EINVAL;
874 } else if (unlikely(vc->out_size < vc->req_size)) {
875 vq_err(vq,
876 "Request buf too small, need min %zu bytes got %zu",
877 vc->req_size, vc->out_size);
878 return -EIO;
879 }
880
881 return 0;
882}
883
884static int
885vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
886 struct vhost_scsi_tpg **tpgp)
887{
888 int ret = -EIO;
889
890 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
891 &vc->out_iter))) {
892 vq_err(vq, "Faulted on copy_from_iter\n");
893 } else if (unlikely(*vc->lunp != 1)) {
894 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
895 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
896 } else {
897 struct vhost_scsi_tpg **vs_tpg, *tpg;
898
899 vs_tpg = vq->private_data; /* validated at handler entry */
900
901 tpg = READ_ONCE(vs_tpg[*vc->target]);
902 if (unlikely(!tpg)) {
903 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
904 } else {
905 if (tpgp)
906 *tpgp = tpg;
907 ret = 0;
908 }
909 }
910
911 return ret;
912}
913
816static void 914static void
817vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 915vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
818{ 916{
819 struct vhost_scsi_tpg **vs_tpg, *tpg; 917 struct vhost_scsi_tpg **vs_tpg, *tpg;
820 struct virtio_scsi_cmd_req v_req; 918 struct virtio_scsi_cmd_req v_req;
821 struct virtio_scsi_cmd_req_pi v_req_pi; 919 struct virtio_scsi_cmd_req_pi v_req_pi;
920 struct vhost_scsi_ctx vc;
822 struct vhost_scsi_cmd *cmd; 921 struct vhost_scsi_cmd *cmd;
823 struct iov_iter out_iter, in_iter, prot_iter, data_iter; 922 struct iov_iter in_iter, prot_iter, data_iter;
824 u64 tag; 923 u64 tag;
825 u32 exp_data_len, data_direction; 924 u32 exp_data_len, data_direction;
826 unsigned int out = 0, in = 0; 925 int ret, prot_bytes;
827 int head, ret, prot_bytes;
828 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
829 size_t out_size, in_size;
830 u16 lun; 926 u16 lun;
831 u8 *target, *lunp, task_attr; 927 u8 task_attr;
832 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); 928 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
833 void *req, *cdb; 929 void *cdb;
834 930
835 mutex_lock(&vq->mutex); 931 mutex_lock(&vq->mutex);
836 /* 932 /*
@@ -841,85 +937,47 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
841 if (!vs_tpg) 937 if (!vs_tpg)
842 goto out; 938 goto out;
843 939
940 memset(&vc, 0, sizeof(vc));
941 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
942
844 vhost_disable_notify(&vs->dev, vq); 943 vhost_disable_notify(&vs->dev, vq);
845 944
846 for (;;) { 945 for (;;) {
847 head = vhost_get_vq_desc(vq, vq->iov, 946 ret = vhost_scsi_get_desc(vs, vq, &vc);
848 ARRAY_SIZE(vq->iov), &out, &in, 947 if (ret)
849 NULL, NULL); 948 goto err;
850 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", 949
851 head, out, in);
852 /* On error, stop handling until the next kick. */
853 if (unlikely(head < 0))
854 break;
855 /* Nothing new? Wait for eventfd to tell us they refilled. */
856 if (head == vq->num) {
857 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
858 vhost_disable_notify(&vs->dev, vq);
859 continue;
860 }
861 break;
862 }
863 /*
864 * Check for a sane response buffer so we can report early
865 * errors back to the guest.
866 */
867 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
868 vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
869 " size, got %zu bytes\n", vq->iov[out].iov_len);
870 break;
871 }
872 /* 950 /*
873 * Setup pointers and values based upon different virtio-scsi 951 * Setup pointers and values based upon different virtio-scsi
874 * request header if T10_PI is enabled in KVM guest. 952 * request header if T10_PI is enabled in KVM guest.
875 */ 953 */
876 if (t10_pi) { 954 if (t10_pi) {
877 req = &v_req_pi; 955 vc.req = &v_req_pi;
878 req_size = sizeof(v_req_pi); 956 vc.req_size = sizeof(v_req_pi);
879 lunp = &v_req_pi.lun[0]; 957 vc.lunp = &v_req_pi.lun[0];
880 target = &v_req_pi.lun[1]; 958 vc.target = &v_req_pi.lun[1];
881 } else { 959 } else {
882 req = &v_req; 960 vc.req = &v_req;
883 req_size = sizeof(v_req); 961 vc.req_size = sizeof(v_req);
884 lunp = &v_req.lun[0]; 962 vc.lunp = &v_req.lun[0];
885 target = &v_req.lun[1]; 963 vc.target = &v_req.lun[1];
886 } 964 }
887 /*
888 * FIXME: Not correct for BIDI operation
889 */
890 out_size = iov_length(vq->iov, out);
891 in_size = iov_length(&vq->iov[out], in);
892 965
893 /* 966 /*
894 * Copy over the virtio-scsi request header, which for a 967 * Validate the size of request and response buffers.
895 * ANY_LAYOUT enabled guest may span multiple iovecs, or a 968 * Check for a sane response buffer so we can report
896 * single iovec may contain both the header + outgoing 969 * early errors back to the guest.
897 * WRITE payloads.
898 *
899 * copy_from_iter() will advance out_iter, so that it will
900 * point at the start of the outgoing WRITE payload, if
901 * DMA_TO_DEVICE is set.
902 */ 970 */
903 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); 971 ret = vhost_scsi_chk_size(vq, &vc);
972 if (ret)
973 goto err;
904 974
905 if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) { 975 ret = vhost_scsi_get_req(vq, &vc, &tpg);
906 vq_err(vq, "Faulted on copy_from_iter\n"); 976 if (ret)
907 vhost_scsi_send_bad_target(vs, vq, head, out); 977 goto err;
908 continue; 978
909 } 979 ret = -EIO; /* bad target on any error from here on */
910 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
911 if (unlikely(*lunp != 1)) {
912 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
913 vhost_scsi_send_bad_target(vs, vq, head, out);
914 continue;
915 }
916 980
917 tpg = READ_ONCE(vs_tpg[*target]);
918 if (unlikely(!tpg)) {
919 /* Target does not exist, fail the request */
920 vhost_scsi_send_bad_target(vs, vq, head, out);
921 continue;
922 }
923 /* 981 /*
924 * Determine data_direction by calculating the total outgoing 982 * Determine data_direction by calculating the total outgoing
925 * iovec sizes + incoming iovec sizes vs. virtio-scsi request + 983 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
@@ -937,17 +995,17 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
937 */ 995 */
938 prot_bytes = 0; 996 prot_bytes = 0;
939 997
940 if (out_size > req_size) { 998 if (vc.out_size > vc.req_size) {
941 data_direction = DMA_TO_DEVICE; 999 data_direction = DMA_TO_DEVICE;
942 exp_data_len = out_size - req_size; 1000 exp_data_len = vc.out_size - vc.req_size;
943 data_iter = out_iter; 1001 data_iter = vc.out_iter;
944 } else if (in_size > rsp_size) { 1002 } else if (vc.in_size > vc.rsp_size) {
945 data_direction = DMA_FROM_DEVICE; 1003 data_direction = DMA_FROM_DEVICE;
946 exp_data_len = in_size - rsp_size; 1004 exp_data_len = vc.in_size - vc.rsp_size;
947 1005
948 iov_iter_init(&in_iter, READ, &vq->iov[out], in, 1006 iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
949 rsp_size + exp_data_len); 1007 vc.rsp_size + exp_data_len);
950 iov_iter_advance(&in_iter, rsp_size); 1008 iov_iter_advance(&in_iter, vc.rsp_size);
951 data_iter = in_iter; 1009 data_iter = in_iter;
952 } else { 1010 } else {
953 data_direction = DMA_NONE; 1011 data_direction = DMA_NONE;
@@ -963,16 +1021,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
963 if (data_direction != DMA_TO_DEVICE) { 1021 if (data_direction != DMA_TO_DEVICE) {
964 vq_err(vq, "Received non zero pi_bytesout," 1022 vq_err(vq, "Received non zero pi_bytesout,"
965 " but wrong data_direction\n"); 1023 " but wrong data_direction\n");
966 vhost_scsi_send_bad_target(vs, vq, head, out); 1024 goto err;
967 continue;
968 } 1025 }
969 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); 1026 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
970 } else if (v_req_pi.pi_bytesin) { 1027 } else if (v_req_pi.pi_bytesin) {
971 if (data_direction != DMA_FROM_DEVICE) { 1028 if (data_direction != DMA_FROM_DEVICE) {
972 vq_err(vq, "Received non zero pi_bytesin," 1029 vq_err(vq, "Received non zero pi_bytesin,"
973 " but wrong data_direction\n"); 1030 " but wrong data_direction\n");
974 vhost_scsi_send_bad_target(vs, vq, head, out); 1031 goto err;
975 continue;
976 } 1032 }
977 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); 1033 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
978 } 1034 }
@@ -1011,8 +1067,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1011 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1067 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1012 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1068 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1013 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); 1069 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1014 vhost_scsi_send_bad_target(vs, vq, head, out); 1070 goto err;
1015 continue;
1016 } 1071 }
1017 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, 1072 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1018 exp_data_len + prot_bytes, 1073 exp_data_len + prot_bytes,
@@ -1020,13 +1075,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1020 if (IS_ERR(cmd)) { 1075 if (IS_ERR(cmd)) {
1021 vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1076 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1022 PTR_ERR(cmd)); 1077 PTR_ERR(cmd));
1023 vhost_scsi_send_bad_target(vs, vq, head, out); 1078 goto err;
1024 continue;
1025 } 1079 }
1026 cmd->tvc_vhost = vs; 1080 cmd->tvc_vhost = vs;
1027 cmd->tvc_vq = vq; 1081 cmd->tvc_vq = vq;
1028 cmd->tvc_resp_iov = vq->iov[out]; 1082 cmd->tvc_resp_iov = vq->iov[vc.out];
1029 cmd->tvc_in_iovs = in; 1083 cmd->tvc_in_iovs = vc.in;
1030 1084
1031 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1085 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1032 cmd->tvc_cdb[0], cmd->tvc_lun); 1086 cmd->tvc_cdb[0], cmd->tvc_lun);
@@ -1034,14 +1088,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1034 " %d\n", cmd, exp_data_len, prot_bytes, data_direction); 1088 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1035 1089
1036 if (data_direction != DMA_NONE) { 1090 if (data_direction != DMA_NONE) {
1037 ret = vhost_scsi_mapal(cmd, 1091 if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1038 prot_bytes, &prot_iter, 1092 &prot_iter, exp_data_len,
1039 exp_data_len, &data_iter); 1093 &data_iter))) {
1040 if (unlikely(ret)) {
1041 vq_err(vq, "Failed to map iov to sgl\n"); 1094 vq_err(vq, "Failed to map iov to sgl\n");
1042 vhost_scsi_release_cmd(&cmd->tvc_se_cmd); 1095 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1043 vhost_scsi_send_bad_target(vs, vq, head, out); 1096 goto err;
1044 continue;
1045 } 1097 }
1046 } 1098 }
1047 /* 1099 /*
@@ -1049,7 +1101,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1049 * complete the virtio-scsi request in TCM callback context via 1101 * complete the virtio-scsi request in TCM callback context via
1050 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() 1102 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1051 */ 1103 */
1052 cmd->tvc_vq_desc = head; 1104 cmd->tvc_vq_desc = vc.head;
1053 /* 1105 /*
1054 * Dispatch cmd descriptor for cmwq execution in process 1106 * Dispatch cmd descriptor for cmwq execution in process
1055 * context provided by vhost_scsi_workqueue. This also ensures 1107 * context provided by vhost_scsi_workqueue. This also ensures
@@ -1058,112 +1110,27 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1058 */ 1110 */
1059 INIT_WORK(&cmd->work, vhost_scsi_submission_work); 1111 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1060 queue_work(vhost_scsi_workqueue, &cmd->work); 1112 queue_work(vhost_scsi_workqueue, &cmd->work);
1113 ret = 0;
1114err:
1115 /*
1116 * ENXIO: No more requests, or read error, wait for next kick
1117 * EINVAL: Invalid response buffer, drop the request
1118 * EIO: Respond with bad target
1119 * EAGAIN: Pending request
1120 */
1121 if (ret == -ENXIO)
1122 break;
1123 else if (ret == -EIO)
1124 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1061 } 1125 }
1062out: 1126out:
1063 mutex_unlock(&vq->mutex); 1127 mutex_unlock(&vq->mutex);
1064} 1128}
1065 1129
1066static int
1067vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1068 struct vhost_scsi_ctx *vc)
1069{
1070 int ret = -ENXIO;
1071
1072 vc->head = vhost_get_vq_desc(vq, vq->iov,
1073 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
1074 NULL, NULL);
1075
1076 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1077 vc->head, vc->out, vc->in);
1078
1079 /* On error, stop handling until the next kick. */
1080 if (unlikely(vc->head < 0))
1081 goto done;
1082
1083 /* Nothing new? Wait for eventfd to tell us they refilled. */
1084 if (vc->head == vq->num) {
1085 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1086 vhost_disable_notify(&vs->dev, vq);
1087 ret = -EAGAIN;
1088 }
1089 goto done;
1090 }
1091
1092 /*
1093 * Get the size of request and response buffers.
1094 */
1095 vc->out_size = iov_length(vq->iov, vc->out);
1096 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
1097
1098 /*
1099 * Copy over the virtio-scsi request header, which for a
1100 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1101 * single iovec may contain both the header + outgoing
1102 * WRITE payloads.
1103 *
1104 * copy_from_iter() will advance out_iter, so that it will
1105 * point at the start of the outgoing WRITE payload, if
1106 * DMA_TO_DEVICE is set.
1107 */
1108 iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
1109 ret = 0;
1110
1111done:
1112 return ret;
1113}
1114
1115static int
1116vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
1117{
1118 if (unlikely(vc->in_size < vc->rsp_size)) {
1119 vq_err(vq,
1120 "Response buf too small, need min %zu bytes got %zu",
1121 vc->rsp_size, vc->in_size);
1122 return -EINVAL;
1123 } else if (unlikely(vc->out_size < vc->req_size)) {
1124 vq_err(vq,
1125 "Request buf too small, need min %zu bytes got %zu",
1126 vc->req_size, vc->out_size);
1127 return -EIO;
1128 }
1129
1130 return 0;
1131}
1132
1133static int
1134vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1135 struct vhost_scsi_tpg **tpgp)
1136{
1137 int ret = -EIO;
1138
1139 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1140 &vc->out_iter)))
1141 vq_err(vq, "Faulted on copy_from_iter\n");
1142 else if (unlikely(*vc->lunp != 1))
1143 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1144 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1145 else {
1146 struct vhost_scsi_tpg **vs_tpg, *tpg;
1147
1148 vs_tpg = vq->private_data; /* validated at handler entry */
1149
1150 tpg = READ_ONCE(vs_tpg[*vc->target]);
1151 if (unlikely(!tpg))
1152 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
1153 else {
1154 if (tpgp)
1155 *tpgp = tpg;
1156 ret = 0;
1157 }
1158 }
1159
1160 return ret;
1161}
1162
1163static void 1130static void
1164vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, 1131vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1165 struct vhost_virtqueue *vq, 1132 struct vhost_virtqueue *vq,
1166 struct vhost_scsi_ctx *vc) 1133 struct vhost_scsi_ctx *vc)
1167{ 1134{
1168 struct virtio_scsi_ctrl_tmf_resp __user *resp; 1135 struct virtio_scsi_ctrl_tmf_resp __user *resp;
1169 struct virtio_scsi_ctrl_tmf_resp rsp; 1136 struct virtio_scsi_ctrl_tmf_resp rsp;
@@ -1289,7 +1256,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1289 goto err; 1256 goto err;
1290 1257
1291 if (v_req.type == VIRTIO_SCSI_T_TMF) 1258 if (v_req.type == VIRTIO_SCSI_T_TMF)
1292 vhost_scsi_send_tmf_resp(vs, vq, &vc); 1259 vhost_scsi_send_tmf_reject(vs, vq, &vc);
1293 else 1260 else
1294 vhost_scsi_send_an_resp(vs, vq, &vc); 1261 vhost_scsi_send_an_resp(vs, vq, &vc);
1295err: 1262err: