summaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorBijan Mottahedeh <bijan.mottahedeh@oracle.com>2018-09-17 20:09:48 -0400
committerMichael S. Tsirkin <mst@redhat.com>2018-10-24 21:16:13 -0400
commit3f8ca2e115e55af4c15d97dda635e948d2e380be (patch)
tree53653e3a23ed8a423fadab88a7c7606d672f5b2d /drivers/vhost
parent0d02dbd68c47b66367130b696baef7246720791c (diff)
vhost/scsi: Extract common handling code from control queue handler
Prepare to change the request queue handler to use common handling routines. Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@oracle.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/scsi.c271
1 files changed, 172 insertions, 99 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 1c33d6e39152..4cd03a1d7f21 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -203,6 +203,19 @@ struct vhost_scsi {
203 int vs_events_nr; /* num of pending events, protected by vq->mutex */ 203 int vs_events_nr; /* num of pending events, protected by vq->mutex */
204}; 204};
205 205
206/*
207 * Context for processing request and control queue operations.
208 */
209struct vhost_scsi_ctx {
210 int head;
211 unsigned int out, in;
212 size_t req_size, rsp_size;
213 size_t out_size, in_size;
214 u8 *target, *lunp;
215 void *req;
216 struct iov_iter out_iter;
217};
218
206static struct workqueue_struct *vhost_scsi_workqueue; 219static struct workqueue_struct *vhost_scsi_workqueue;
207 220
208/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ 221/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -1050,10 +1063,107 @@ out:
1050 mutex_unlock(&vq->mutex); 1063 mutex_unlock(&vq->mutex);
1051} 1064}
1052 1065
1066static int
1067vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1068 struct vhost_scsi_ctx *vc)
1069{
1070 int ret = -ENXIO;
1071
1072 vc->head = vhost_get_vq_desc(vq, vq->iov,
1073 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
1074 NULL, NULL);
1075
1076 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1077 vc->head, vc->out, vc->in);
1078
1079 /* On error, stop handling until the next kick. */
1080 if (unlikely(vc->head < 0))
1081 goto done;
1082
1083 /* Nothing new? Wait for eventfd to tell us they refilled. */
1084 if (vc->head == vq->num) {
1085 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1086 vhost_disable_notify(&vs->dev, vq);
1087 ret = -EAGAIN;
1088 }
1089 goto done;
1090 }
1091
1092 /*
1093 * Get the size of request and response buffers.
1094 */
1095 vc->out_size = iov_length(vq->iov, vc->out);
1096 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
1097
1098 /*
1099 * Copy over the virtio-scsi request header, which for a
1100 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1101 * single iovec may contain both the header + outgoing
1102 * WRITE payloads.
1103 *
1104 * copy_from_iter() will advance out_iter, so that it will
1105 * point at the start of the outgoing WRITE payload, if
1106 * DMA_TO_DEVICE is set.
1107 */
1108 iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
1109 ret = 0;
1110
1111done:
1112 return ret;
1113}
1114
1115static int
1116vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
1117{
1118 if (unlikely(vc->in_size < vc->rsp_size)) {
1119 vq_err(vq,
1120 "Response buf too small, need min %zu bytes got %zu",
1121 vc->rsp_size, vc->in_size);
1122 return -EINVAL;
1123 } else if (unlikely(vc->out_size < vc->req_size)) {
1124 vq_err(vq,
1125 "Request buf too small, need min %zu bytes got %zu",
1126 vc->req_size, vc->out_size);
1127 return -EIO;
1128 }
1129
1130 return 0;
1131}
1132
1133static int
1134vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1135 struct vhost_scsi_tpg **tpgp)
1136{
1137 int ret = -EIO;
1138
1139 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1140 &vc->out_iter)))
1141 vq_err(vq, "Faulted on copy_from_iter\n");
1142 else if (unlikely(*vc->lunp != 1))
1143 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1144 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1145 else {
1146 struct vhost_scsi_tpg **vs_tpg, *tpg;
1147
1148 vs_tpg = vq->private_data; /* validated at handler entry */
1149
1150 tpg = READ_ONCE(vs_tpg[*vc->target]);
1151 if (unlikely(!tpg))
1152 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
1153 else {
1154 if (tpgp)
1155 *tpgp = tpg;
1156 ret = 0;
1157 }
1158 }
1159
1160 return ret;
1161}
1162
1053static void 1163static void
1054vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, 1164vhost_scsi_send_tmf_resp(struct vhost_scsi *vs,
1055 struct vhost_virtqueue *vq, 1165 struct vhost_virtqueue *vq,
1056 int head, unsigned int out) 1166 struct vhost_scsi_ctx *vc)
1057{ 1167{
1058 struct virtio_scsi_ctrl_tmf_resp __user *resp; 1168 struct virtio_scsi_ctrl_tmf_resp __user *resp;
1059 struct virtio_scsi_ctrl_tmf_resp rsp; 1169 struct virtio_scsi_ctrl_tmf_resp rsp;
@@ -1062,18 +1172,18 @@ vhost_scsi_send_tmf_resp(struct vhost_scsi *vs,
1062 pr_debug("%s\n", __func__); 1172 pr_debug("%s\n", __func__);
1063 memset(&rsp, 0, sizeof(rsp)); 1173 memset(&rsp, 0, sizeof(rsp));
1064 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; 1174 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1065 resp = vq->iov[out].iov_base; 1175 resp = vq->iov[vc->out].iov_base;
1066 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1176 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
1067 if (!ret) 1177 if (!ret)
1068 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 1178 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1069 else 1179 else
1070 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); 1180 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1071} 1181}
1072 1182
1073static void 1183static void
1074vhost_scsi_send_an_resp(struct vhost_scsi *vs, 1184vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1075 struct vhost_virtqueue *vq, 1185 struct vhost_virtqueue *vq,
1076 int head, unsigned int out) 1186 struct vhost_scsi_ctx *vc)
1077{ 1187{
1078 struct virtio_scsi_ctrl_an_resp __user *resp; 1188 struct virtio_scsi_ctrl_an_resp __user *resp;
1079 struct virtio_scsi_ctrl_an_resp rsp; 1189 struct virtio_scsi_ctrl_an_resp rsp;
@@ -1082,10 +1192,10 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1082 pr_debug("%s\n", __func__); 1192 pr_debug("%s\n", __func__);
1083 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ 1193 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1084 rsp.response = VIRTIO_SCSI_S_OK; 1194 rsp.response = VIRTIO_SCSI_S_OK;
1085 resp = vq->iov[out].iov_base; 1195 resp = vq->iov[vc->out].iov_base;
1086 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1196 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
1087 if (!ret) 1197 if (!ret)
1088 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 1198 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1089 else 1199 else
1090 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); 1200 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1091} 1201}
@@ -1098,13 +1208,9 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1098 struct virtio_scsi_ctrl_an_req an; 1208 struct virtio_scsi_ctrl_an_req an;
1099 struct virtio_scsi_ctrl_tmf_req tmf; 1209 struct virtio_scsi_ctrl_tmf_req tmf;
1100 } v_req; 1210 } v_req;
1101 struct iov_iter out_iter; 1211 struct vhost_scsi_ctx vc;
1102 unsigned int out = 0, in = 0; 1212 size_t typ_size;
1103 int head; 1213 int ret;
1104 size_t req_size, rsp_size, typ_size;
1105 size_t out_size, in_size;
1106 u8 *lunp;
1107 void *req;
1108 1214
1109 mutex_lock(&vq->mutex); 1215 mutex_lock(&vq->mutex);
1110 /* 1216 /*
@@ -1114,52 +1220,28 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1114 if (!vq->private_data) 1220 if (!vq->private_data)
1115 goto out; 1221 goto out;
1116 1222
1223 memset(&vc, 0, sizeof(vc));
1224
1117 vhost_disable_notify(&vs->dev, vq); 1225 vhost_disable_notify(&vs->dev, vq);
1118 1226
1119 for (;;) { 1227 for (;;) {
1120 head = vhost_get_vq_desc(vq, vq->iov, 1228 ret = vhost_scsi_get_desc(vs, vq, &vc);
1121 ARRAY_SIZE(vq->iov), &out, &in, 1229 if (ret)
1122 NULL, NULL); 1230 goto err;
1123 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1124 head, out, in);
1125 /* On error, stop handling until the next kick. */
1126 if (unlikely(head < 0))
1127 break;
1128 /* Nothing new? Wait for eventfd to tell us they refilled. */
1129 if (head == vq->num) {
1130 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1131 vhost_disable_notify(&vs->dev, vq);
1132 continue;
1133 }
1134 break;
1135 }
1136 1231
1137 /* 1232 /*
1138 * Get the size of request and response buffers. 1233 * Get the request type first in order to setup
1234 * other parameters dependent on the type.
1139 */ 1235 */
1140 out_size = iov_length(vq->iov, out); 1236 vc.req = &v_req.type;
1141 in_size = iov_length(&vq->iov[out], in);
1142
1143 /*
1144 * Copy over the virtio-scsi request header, which for a
1145 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1146 * single iovec may contain both the header + outgoing
1147 * WRITE payloads.
1148 *
1149 * copy_from_iter() will advance out_iter, so that it will
1150 * point at the start of the outgoing WRITE payload, if
1151 * DMA_TO_DEVICE is set.
1152 */
1153 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
1154
1155 req = &v_req.type;
1156 typ_size = sizeof(v_req.type); 1237 typ_size = sizeof(v_req.type);
1157 1238
1158 if (unlikely(!copy_from_iter_full(req, typ_size, &out_iter))) { 1239 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1240 &vc.out_iter))) {
1159 vq_err(vq, "Faulted on copy_from_iter tmf type\n"); 1241 vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1160 /* 1242 /*
1161 * The size of the response buffer varies based on 1243 * The size of the response buffer depends on the
1162 * the request type and must be validated against it. 1244 * request type and must be validated against it.
1163 * Since the request type is not known, don't send 1245 * Since the request type is not known, don't send
1164 * a response. 1246 * a response.
1165 */ 1247 */
@@ -1168,17 +1250,19 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1168 1250
1169 switch (v_req.type) { 1251 switch (v_req.type) {
1170 case VIRTIO_SCSI_T_TMF: 1252 case VIRTIO_SCSI_T_TMF:
1171 req = &v_req.tmf; 1253 vc.req = &v_req.tmf;
1172 lunp = &v_req.tmf.lun[0]; 1254 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1173 req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); 1255 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1174 rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp); 1256 vc.lunp = &v_req.tmf.lun[0];
1257 vc.target = &v_req.tmf.lun[1];
1175 break; 1258 break;
1176 case VIRTIO_SCSI_T_AN_QUERY: 1259 case VIRTIO_SCSI_T_AN_QUERY:
1177 case VIRTIO_SCSI_T_AN_SUBSCRIBE: 1260 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1178 req = &v_req.an; 1261 vc.req = &v_req.an;
1179 lunp = &v_req.an.lun[0]; 1262 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1180 req_size = sizeof(struct virtio_scsi_ctrl_an_req); 1263 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1181 rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp); 1264 vc.lunp = &v_req.an.lun[0];
1265 vc.target = NULL;
1182 break; 1266 break;
1183 default: 1267 default:
1184 vq_err(vq, "Unknown control request %d", v_req.type); 1268 vq_err(vq, "Unknown control request %d", v_req.type);
@@ -1186,50 +1270,39 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1186 } 1270 }
1187 1271
1188 /* 1272 /*
1189 * Check for a sane response buffer so we can report early 1273 * Validate the size of request and response buffers.
1190 * errors back to the guest. 1274 * Check for a sane response buffer so we can report
1275 * early errors back to the guest.
1191 */ 1276 */
1192 if (unlikely(in_size < rsp_size)) { 1277 ret = vhost_scsi_chk_size(vq, &vc);
1193 vq_err(vq, 1278 if (ret)
1194 "Resp buf too small, need min %zu bytes got %zu", 1279 goto err;
1195 rsp_size, in_size);
1196 /*
1197 * Notifications are disabled at this point;
1198 * continue so they can be eventually enabled
1199 * when processing terminates.
1200 */
1201 continue;
1202 }
1203 1280
1204 if (unlikely(out_size < req_size)) { 1281 /*
1205 vq_err(vq, 1282 * Get the rest of the request now that its size is known.
1206 "Req buf too small, need min %zu bytes got %zu", 1283 */
1207 req_size, out_size); 1284 vc.req += typ_size;
1208 vhost_scsi_send_bad_target(vs, vq, head, out); 1285 vc.req_size -= typ_size;
1209 continue;
1210 }
1211
1212 req += typ_size;
1213 req_size -= typ_size;
1214
1215 if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
1216 vq_err(vq, "Faulted on copy_from_iter\n");
1217 vhost_scsi_send_bad_target(vs, vq, head, out);
1218 continue;
1219 }
1220 1286
1221 /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 1287 ret = vhost_scsi_get_req(vq, &vc, NULL);
1222 if (unlikely(*lunp != 1)) { 1288 if (ret)
1223 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); 1289 goto err;
1224 vhost_scsi_send_bad_target(vs, vq, head, out);
1225 continue;
1226 }
1227 1290
1228 if (v_req.type == VIRTIO_SCSI_T_TMF) { 1291 if (v_req.type == VIRTIO_SCSI_T_TMF)
1229 pr_debug("%s tmf %d\n", __func__, v_req.tmf.subtype); 1292 vhost_scsi_send_tmf_resp(vs, vq, &vc);
1230 vhost_scsi_send_tmf_resp(vs, vq, head, out); 1293 else
1231 } else 1294 vhost_scsi_send_an_resp(vs, vq, &vc);
1232 vhost_scsi_send_an_resp(vs, vq, head, out); 1295err:
1296 /*
1297 * ENXIO: No more requests, or read error, wait for next kick
1298 * EINVAL: Invalid response buffer, drop the request
1299 * EIO: Respond with bad target
1300 * EAGAIN: Pending request
1301 */
1302 if (ret == -ENXIO)
1303 break;
1304 else if (ret == -EIO)
1305 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1233 } 1306 }
1234out: 1307out:
1235 mutex_unlock(&vq->mutex); 1308 mutex_unlock(&vq->mutex);