diff options
author | Nicholas Bellinger <nab@linux-iscsi.org> | 2015-01-26 00:14:58 -0500 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2015-02-04 13:55:37 -0500 |
commit | 09b13fa8c1a1093e9458549ac8bb203a7c65c62a (patch) | |
tree | cd107aeab2fb6b1e6e1debc868ef8773bb777710 /drivers/vhost | |
parent | e8de56b5e76ab7ed2a4aa3476649fe3fa85de1d7 (diff) |
vhost/scsi: Add ANY_LAYOUT support in vhost_scsi_handle_vq
This patch adds ANY_LAYOUT compatible support within the existing
vhost_scsi_handle_vq() ->handle_kick() callback.
It calculates data_direction + exp_data_len for the new tcm_vhost_cmd
descriptor by walking both outgoing + incoming iovecs using iov_iter,
assuming the layout of outgoing request header + T10_PI + Data payload
comes first.
It also uses copy_from_iter() to copy leading virtio-scsi request header
that may or may not include SCSI CDB, that returns a re-calculated iovec
to start of T10_PI or Data SGL memory.
Also, go ahead and drop the legacy pre virtio v1.0 !ANY_LAYOUT logic.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/vhost')
-rw-r--r-- | drivers/vhost/scsi.c | 306 |
1 files changed, 110 insertions, 196 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 5396b8a3028f..e53959f30c26 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -828,93 +828,6 @@ out: | |||
828 | } | 828 | } |
829 | 829 | ||
830 | static int | 830 | static int |
831 | vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, | ||
832 | struct iovec *iov, | ||
833 | int niov, | ||
834 | bool write) | ||
835 | { | ||
836 | struct scatterlist *sg = cmd->tvc_sgl; | ||
837 | unsigned int sgl_count = 0; | ||
838 | int ret, i; | ||
839 | |||
840 | for (i = 0; i < niov; i++) | ||
841 | sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len); | ||
842 | |||
843 | if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { | ||
844 | pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" | ||
845 | " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", | ||
846 | sgl_count, TCM_VHOST_PREALLOC_SGLS); | ||
847 | return -ENOBUFS; | ||
848 | } | ||
849 | |||
850 | pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); | ||
851 | sg_init_table(sg, sgl_count); | ||
852 | cmd->tvc_sgl_count = sgl_count; | ||
853 | |||
854 | pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); | ||
855 | |||
856 | for (i = 0; i < niov; i++) { | ||
857 | ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, iov[i].iov_len, | ||
858 | sg, write); | ||
859 | if (ret < 0) { | ||
860 | for (i = 0; i < cmd->tvc_sgl_count; i++) { | ||
861 | struct page *page = sg_page(&cmd->tvc_sgl[i]); | ||
862 | if (page) | ||
863 | put_page(page); | ||
864 | } | ||
865 | cmd->tvc_sgl_count = 0; | ||
866 | return ret; | ||
867 | } | ||
868 | sg += ret; | ||
869 | sgl_count -= ret; | ||
870 | } | ||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | static int | ||
875 | vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, | ||
876 | struct iovec *iov, | ||
877 | int niov, | ||
878 | bool write) | ||
879 | { | ||
880 | struct scatterlist *prot_sg = cmd->tvc_prot_sgl; | ||
881 | unsigned int prot_sgl_count = 0; | ||
882 | int ret, i; | ||
883 | |||
884 | for (i = 0; i < niov; i++) | ||
885 | prot_sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len); | ||
886 | |||
887 | if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { | ||
888 | pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" | ||
889 | " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n", | ||
890 | prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS); | ||
891 | return -ENOBUFS; | ||
892 | } | ||
893 | |||
894 | pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, | ||
895 | prot_sg, prot_sgl_count); | ||
896 | sg_init_table(prot_sg, prot_sgl_count); | ||
897 | cmd->tvc_prot_sgl_count = prot_sgl_count; | ||
898 | |||
899 | for (i = 0; i < niov; i++) { | ||
900 | ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, iov[i].iov_len, | ||
901 | prot_sg, write); | ||
902 | if (ret < 0) { | ||
903 | for (i = 0; i < cmd->tvc_prot_sgl_count; i++) { | ||
904 | struct page *page = sg_page(&cmd->tvc_prot_sgl[i]); | ||
905 | if (page) | ||
906 | put_page(page); | ||
907 | } | ||
908 | cmd->tvc_prot_sgl_count = 0; | ||
909 | return ret; | ||
910 | } | ||
911 | prot_sg += ret; | ||
912 | prot_sgl_count -= ret; | ||
913 | } | ||
914 | return 0; | ||
915 | } | ||
916 | |||
917 | static int | ||
918 | vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) | 831 | vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) |
919 | { | 832 | { |
920 | int sgl_count = 0; | 833 | int sgl_count = 0; |
@@ -1064,19 +977,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs, | |||
1064 | static void | 977 | static void |
1065 | vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | 978 | vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) |
1066 | { | 979 | { |
1067 | struct tcm_vhost_tpg **vs_tpg; | 980 | struct tcm_vhost_tpg **vs_tpg, *tpg; |
1068 | struct virtio_scsi_cmd_req v_req; | 981 | struct virtio_scsi_cmd_req v_req; |
1069 | struct virtio_scsi_cmd_req_pi v_req_pi; | 982 | struct virtio_scsi_cmd_req_pi v_req_pi; |
1070 | struct tcm_vhost_tpg *tpg; | ||
1071 | struct tcm_vhost_cmd *cmd; | 983 | struct tcm_vhost_cmd *cmd; |
984 | struct iov_iter out_iter, in_iter, prot_iter, data_iter; | ||
1072 | u64 tag; | 985 | u64 tag; |
1073 | u32 exp_data_len, data_first, data_num, data_direction, prot_first; | 986 | u32 exp_data_len, data_direction; |
1074 | unsigned out, in, i; | 987 | unsigned out, in; |
1075 | int head, ret, data_niov, prot_niov, prot_bytes; | 988 | int head, ret, prot_bytes; |
1076 | size_t req_size; | 989 | size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); |
990 | size_t out_size, in_size; | ||
1077 | u16 lun; | 991 | u16 lun; |
1078 | u8 *target, *lunp, task_attr; | 992 | u8 *target, *lunp, task_attr; |
1079 | bool hdr_pi; | 993 | bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); |
1080 | void *req, *cdb; | 994 | void *req, *cdb; |
1081 | 995 | ||
1082 | mutex_lock(&vq->mutex); | 996 | mutex_lock(&vq->mutex); |
@@ -1092,10 +1006,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1092 | 1006 | ||
1093 | for (;;) { | 1007 | for (;;) { |
1094 | head = vhost_get_vq_desc(vq, vq->iov, | 1008 | head = vhost_get_vq_desc(vq, vq->iov, |
1095 | ARRAY_SIZE(vq->iov), &out, &in, | 1009 | ARRAY_SIZE(vq->iov), &out, &in, |
1096 | NULL, NULL); | 1010 | NULL, NULL); |
1097 | pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", | 1011 | pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", |
1098 | head, out, in); | 1012 | head, out, in); |
1099 | /* On error, stop handling until the next kick. */ | 1013 | /* On error, stop handling until the next kick. */ |
1100 | if (unlikely(head < 0)) | 1014 | if (unlikely(head < 0)) |
1101 | break; | 1015 | break; |
@@ -1107,117 +1021,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1107 | } | 1021 | } |
1108 | break; | 1022 | break; |
1109 | } | 1023 | } |
1110 | |||
1111 | /* FIXME: BIDI operation */ | ||
1112 | if (out == 1 && in == 1) { | ||
1113 | data_direction = DMA_NONE; | ||
1114 | data_first = 0; | ||
1115 | data_num = 0; | ||
1116 | } else if (out == 1 && in > 1) { | ||
1117 | data_direction = DMA_FROM_DEVICE; | ||
1118 | data_first = out + 1; | ||
1119 | data_num = in - 1; | ||
1120 | } else if (out > 1 && in == 1) { | ||
1121 | data_direction = DMA_TO_DEVICE; | ||
1122 | data_first = 1; | ||
1123 | data_num = out - 1; | ||
1124 | } else { | ||
1125 | vq_err(vq, "Invalid buffer layout out: %u in: %u\n", | ||
1126 | out, in); | ||
1127 | break; | ||
1128 | } | ||
1129 | |||
1130 | /* | 1024 | /* |
1131 | * Check for a sane resp buffer so we can report errors to | 1025 | * Check for a sane response buffer so we can report early |
1132 | * the guest. | 1026 | * errors back to the guest. |
1133 | */ | 1027 | */ |
1134 | if (unlikely(vq->iov[out].iov_len != | 1028 | if (unlikely(vq->iov[out].iov_len < rsp_size)) { |
1135 | sizeof(struct virtio_scsi_cmd_resp))) { | 1029 | vq_err(vq, "Expecting at least virtio_scsi_cmd_resp" |
1136 | vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" | 1030 | " size, got %zu bytes\n", vq->iov[out].iov_len); |
1137 | " bytes\n", vq->iov[out].iov_len); | ||
1138 | break; | 1031 | break; |
1139 | } | 1032 | } |
1140 | 1033 | /* | |
1141 | if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { | 1034 | * Setup pointers and values based upon different virtio-scsi |
1035 | * request header if T10_PI is enabled in KVM guest. | ||
1036 | */ | ||
1037 | if (t10_pi) { | ||
1142 | req = &v_req_pi; | 1038 | req = &v_req_pi; |
1039 | req_size = sizeof(v_req_pi); | ||
1143 | lunp = &v_req_pi.lun[0]; | 1040 | lunp = &v_req_pi.lun[0]; |
1144 | target = &v_req_pi.lun[1]; | 1041 | target = &v_req_pi.lun[1]; |
1145 | req_size = sizeof(v_req_pi); | ||
1146 | hdr_pi = true; | ||
1147 | } else { | 1042 | } else { |
1148 | req = &v_req; | 1043 | req = &v_req; |
1044 | req_size = sizeof(v_req); | ||
1149 | lunp = &v_req.lun[0]; | 1045 | lunp = &v_req.lun[0]; |
1150 | target = &v_req.lun[1]; | 1046 | target = &v_req.lun[1]; |
1151 | req_size = sizeof(v_req); | ||
1152 | hdr_pi = false; | ||
1153 | } | 1047 | } |
1048 | /* | ||
1049 | * FIXME: Not correct for BIDI operation | ||
1050 | */ | ||
1051 | out_size = iov_length(vq->iov, out); | ||
1052 | in_size = iov_length(&vq->iov[out], in); | ||
1154 | 1053 | ||
1155 | if (unlikely(vq->iov[0].iov_len < req_size)) { | 1054 | /* |
1156 | pr_err("Expecting virtio-scsi header: %zu, got %zu\n", | 1055 | * Copy over the virtio-scsi request header, which for a |
1157 | req_size, vq->iov[0].iov_len); | 1056 | * ANY_LAYOUT enabled guest may span multiple iovecs, or a |
1158 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1057 | * single iovec may contain both the header + outgoing |
1159 | continue; | 1058 | * WRITE payloads. |
1160 | } | 1059 | * |
1161 | ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); | 1060 | * copy_from_iter() will advance out_iter, so that it will |
1162 | if (unlikely(ret)) { | 1061 | * point at the start of the outgoing WRITE payload, if |
1163 | vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); | 1062 | * DMA_TO_DEVICE is set. |
1063 | */ | ||
1064 | iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); | ||
1065 | |||
1066 | ret = copy_from_iter(req, req_size, &out_iter); | ||
1067 | if (unlikely(ret != req_size)) { | ||
1068 | vq_err(vq, "Faulted on copy_from_iter\n"); | ||
1164 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1069 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1165 | continue; | 1070 | continue; |
1166 | } | 1071 | } |
1167 | |||
1168 | /* virtio-scsi spec requires byte 0 of the lun to be 1 */ | 1072 | /* virtio-scsi spec requires byte 0 of the lun to be 1 */ |
1169 | if (unlikely(*lunp != 1)) { | 1073 | if (unlikely(*lunp != 1)) { |
1074 | vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); | ||
1170 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1075 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1171 | continue; | 1076 | continue; |
1172 | } | 1077 | } |
1173 | 1078 | ||
1174 | tpg = ACCESS_ONCE(vs_tpg[*target]); | 1079 | tpg = ACCESS_ONCE(vs_tpg[*target]); |
1175 | |||
1176 | /* Target does not exist, fail the request */ | ||
1177 | if (unlikely(!tpg)) { | 1080 | if (unlikely(!tpg)) { |
1081 | /* Target does not exist, fail the request */ | ||
1178 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1082 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1179 | continue; | 1083 | continue; |
1180 | } | 1084 | } |
1181 | |||
1182 | data_niov = data_num; | ||
1183 | prot_niov = prot_first = prot_bytes = 0; | ||
1184 | /* | 1085 | /* |
1185 | * Determine if any protection information iovecs are preceeding | 1086 | * Determine data_direction by calculating the total outgoing |
1186 | * the actual data payload, and adjust data_first + data_niov | 1087 | * iovec sizes + incoming iovec sizes vs. virtio-scsi request + |
1187 | * values accordingly for vhost_scsi_map_iov_to_sgl() below. | 1088 | * response headers respectively. |
1188 | * | 1089 | * |
1189 | * Also extract virtio_scsi header bits for vhost_scsi_get_tag() | 1090 | * For DMA_TO_DEVICE this is out_iter, which is already pointing |
1091 | * to the right place. | ||
1092 | * | ||
1093 | * For DMA_FROM_DEVICE, the iovec will be just past the end | ||
1094 | * of the virtio-scsi response header in either the same | ||
1095 | * or immediately following iovec. | ||
1096 | * | ||
1097 | * Any associated T10_PI bytes for the outgoing / incoming | ||
1098 | * payloads are included in calculation of exp_data_len here. | ||
1099 | */ | ||
1100 | prot_bytes = 0; | ||
1101 | |||
1102 | if (out_size > req_size) { | ||
1103 | data_direction = DMA_TO_DEVICE; | ||
1104 | exp_data_len = out_size - req_size; | ||
1105 | data_iter = out_iter; | ||
1106 | } else if (in_size > rsp_size) { | ||
1107 | data_direction = DMA_FROM_DEVICE; | ||
1108 | exp_data_len = in_size - rsp_size; | ||
1109 | |||
1110 | iov_iter_init(&in_iter, READ, &vq->iov[out], in, | ||
1111 | rsp_size + exp_data_len); | ||
1112 | iov_iter_advance(&in_iter, rsp_size); | ||
1113 | data_iter = in_iter; | ||
1114 | } else { | ||
1115 | data_direction = DMA_NONE; | ||
1116 | exp_data_len = 0; | ||
1117 | } | ||
1118 | /* | ||
1119 | * If T10_PI header + payload is present, setup prot_iter values | ||
1120 | * and recalculate data_iter for vhost_scsi_mapal() mapping to | ||
1121 | * host scatterlists via get_user_pages_fast(). | ||
1190 | */ | 1122 | */ |
1191 | if (hdr_pi) { | 1123 | if (t10_pi) { |
1192 | if (v_req_pi.pi_bytesout) { | 1124 | if (v_req_pi.pi_bytesout) { |
1193 | if (data_direction != DMA_TO_DEVICE) { | 1125 | if (data_direction != DMA_TO_DEVICE) { |
1194 | vq_err(vq, "Received non zero do_pi_niov" | 1126 | vq_err(vq, "Received non zero pi_bytesout," |
1195 | ", but wrong data_direction\n"); | 1127 | " but wrong data_direction\n"); |
1196 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1128 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1197 | continue; | 1129 | continue; |
1198 | } | 1130 | } |
1199 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); | 1131 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); |
1200 | } else if (v_req_pi.pi_bytesin) { | 1132 | } else if (v_req_pi.pi_bytesin) { |
1201 | if (data_direction != DMA_FROM_DEVICE) { | 1133 | if (data_direction != DMA_FROM_DEVICE) { |
1202 | vq_err(vq, "Received non zero di_pi_niov" | 1134 | vq_err(vq, "Received non zero pi_bytesin," |
1203 | ", but wrong data_direction\n"); | 1135 | " but wrong data_direction\n"); |
1204 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1136 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1205 | continue; | 1137 | continue; |
1206 | } | 1138 | } |
1207 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); | 1139 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); |
1208 | } | 1140 | } |
1141 | /* | ||
1142 | * Set prot_iter to data_iter, and advance past any | ||
1143 | * preceeding prot_bytes that may be present. | ||
1144 | * | ||
1145 | * Also fix up the exp_data_len to reflect only the | ||
1146 | * actual data payload length. | ||
1147 | */ | ||
1209 | if (prot_bytes) { | 1148 | if (prot_bytes) { |
1210 | int tmp = 0; | 1149 | exp_data_len -= prot_bytes; |
1211 | 1150 | prot_iter = data_iter; | |
1212 | for (i = 0; i < data_num; i++) { | 1151 | iov_iter_advance(&data_iter, prot_bytes); |
1213 | tmp += vq->iov[data_first + i].iov_len; | ||
1214 | prot_niov++; | ||
1215 | if (tmp >= prot_bytes) | ||
1216 | break; | ||
1217 | } | ||
1218 | prot_first = data_first; | ||
1219 | data_first += prot_niov; | ||
1220 | data_niov = data_num - prot_niov; | ||
1221 | } | 1152 | } |
1222 | tag = vhost64_to_cpu(vq, v_req_pi.tag); | 1153 | tag = vhost64_to_cpu(vq, v_req_pi.tag); |
1223 | task_attr = v_req_pi.task_attr; | 1154 | task_attr = v_req_pi.task_attr; |
@@ -1229,12 +1160,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1229 | cdb = &v_req.cdb[0]; | 1160 | cdb = &v_req.cdb[0]; |
1230 | lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; | 1161 | lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; |
1231 | } | 1162 | } |
1232 | exp_data_len = 0; | ||
1233 | for (i = 0; i < data_niov; i++) | ||
1234 | exp_data_len += vq->iov[data_first + i].iov_len; | ||
1235 | /* | 1163 | /* |
1236 | * Check that the recieved CDB size does not exceeded our | 1164 | * Check that the received CDB size does not exceeded our |
1237 | * hardcoded max for vhost-scsi | 1165 | * hardcoded max for vhost-scsi, then get a pre-allocated |
1166 | * cmd descriptor for the new virtio-scsi tag. | ||
1238 | * | 1167 | * |
1239 | * TODO what if cdb was too small for varlen cdb header? | 1168 | * TODO what if cdb was too small for varlen cdb header? |
1240 | */ | 1169 | */ |
@@ -1245,44 +1174,29 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1245 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1174 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1246 | continue; | 1175 | continue; |
1247 | } | 1176 | } |
1248 | |||
1249 | cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, | 1177 | cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, |
1250 | exp_data_len + prot_bytes, | 1178 | exp_data_len + prot_bytes, |
1251 | data_direction); | 1179 | data_direction); |
1252 | if (IS_ERR(cmd)) { | 1180 | if (IS_ERR(cmd)) { |
1253 | vq_err(vq, "vhost_scsi_get_tag failed %ld\n", | 1181 | vq_err(vq, "vhost_scsi_get_tag failed %ld\n", |
1254 | PTR_ERR(cmd)); | 1182 | PTR_ERR(cmd)); |
1255 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1183 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1256 | continue; | 1184 | continue; |
1257 | } | 1185 | } |
1258 | |||
1259 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" | ||
1260 | ": %d\n", cmd, exp_data_len, data_direction); | ||
1261 | |||
1262 | cmd->tvc_vhost = vs; | 1186 | cmd->tvc_vhost = vs; |
1263 | cmd->tvc_vq = vq; | 1187 | cmd->tvc_vq = vq; |
1264 | cmd->tvc_resp_iov = &vq->iov[out]; | 1188 | cmd->tvc_resp_iov = &vq->iov[out]; |
1265 | cmd->tvc_in_iovs = in; | 1189 | cmd->tvc_in_iovs = in; |
1266 | 1190 | ||
1267 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", | 1191 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", |
1268 | cmd->tvc_cdb[0], cmd->tvc_lun); | 1192 | cmd->tvc_cdb[0], cmd->tvc_lun); |
1193 | pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:" | ||
1194 | " %d\n", cmd, exp_data_len, prot_bytes, data_direction); | ||
1269 | 1195 | ||
1270 | if (prot_niov) { | ||
1271 | ret = vhost_scsi_map_iov_to_prot(cmd, | ||
1272 | &vq->iov[prot_first], prot_niov, | ||
1273 | data_direction == DMA_FROM_DEVICE); | ||
1274 | if (unlikely(ret)) { | ||
1275 | vq_err(vq, "Failed to map iov to" | ||
1276 | " prot_sgl\n"); | ||
1277 | tcm_vhost_release_cmd(&cmd->tvc_se_cmd); | ||
1278 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
1279 | continue; | ||
1280 | } | ||
1281 | } | ||
1282 | if (data_direction != DMA_NONE) { | 1196 | if (data_direction != DMA_NONE) { |
1283 | ret = vhost_scsi_map_iov_to_sgl(cmd, | 1197 | ret = vhost_scsi_mapal(cmd, |
1284 | &vq->iov[data_first], data_niov, | 1198 | prot_bytes, &prot_iter, |
1285 | data_direction == DMA_FROM_DEVICE); | 1199 | exp_data_len, &data_iter); |
1286 | if (unlikely(ret)) { | 1200 | if (unlikely(ret)) { |
1287 | vq_err(vq, "Failed to map iov to sgl\n"); | 1201 | vq_err(vq, "Failed to map iov to sgl\n"); |
1288 | tcm_vhost_release_cmd(&cmd->tvc_se_cmd); | 1202 | tcm_vhost_release_cmd(&cmd->tvc_se_cmd); |
@@ -1293,14 +1207,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1293 | /* | 1207 | /* |
1294 | * Save the descriptor from vhost_get_vq_desc() to be used to | 1208 | * Save the descriptor from vhost_get_vq_desc() to be used to |
1295 | * complete the virtio-scsi request in TCM callback context via | 1209 | * complete the virtio-scsi request in TCM callback context via |
1296 | * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() | 1210 | * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() |
1297 | */ | 1211 | */ |
1298 | cmd->tvc_vq_desc = head; | 1212 | cmd->tvc_vq_desc = head; |
1299 | /* | 1213 | /* |
1300 | * Dispatch tv_cmd descriptor for cmwq execution in process | 1214 | * Dispatch cmd descriptor for cmwq execution in process |
1301 | * context provided by tcm_vhost_workqueue. This also ensures | 1215 | * context provided by vhost_scsi_workqueue. This also ensures |
1302 | * tv_cmd is executed on the same kworker CPU as this vhost | 1216 | * cmd is executed on the same kworker CPU as this vhost |
1303 | * thread to gain positive L2 cache locality effects.. | 1217 | * thread to gain positive L2 cache locality effects. |
1304 | */ | 1218 | */ |
1305 | INIT_WORK(&cmd->work, tcm_vhost_submission_work); | 1219 | INIT_WORK(&cmd->work, tcm_vhost_submission_work); |
1306 | queue_work(tcm_vhost_workqueue, &cmd->work); | 1220 | queue_work(tcm_vhost_workqueue, &cmd->work); |