diff options
author | Bijan Mottahedeh <bijan.mottahedeh@oracle.com> | 2018-09-17 20:09:47 -0400 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2018-10-24 21:16:13 -0400 |
commit | 0d02dbd68c47b66367130b696baef7246720791c (patch) | |
tree | d0f5fe1edcc46df92d890e443e1b5d6d9b1be9a1 | |
parent | 4542d623c7134bc1738f8a68ccb6dd546f1c264f (diff) |
vhost/scsi: Respond to control queue operations
The vhost-scsi driver currently does not handle any control queue
operations. In particular, vhost_scsi_ctl_handle_kick, merely prints out
a debug message but does nothing else. This can cause guest VMs to hang.
As part of SCSI recovery from an error, e.g., an I/O timeout, the SCSI
midlayer attempts to abort the failed operation. The SCSI virtio driver
translates the abort to a SCSI TMF request that gets put on the control
queue (virtscsi_abort -> virtscsi_tmf). The SCSI virtio driver then
waits indefinitely for this request to be completed, but it never will
because vhost-scsi never responds to that request.
To avoid a hang, always respond to control queue operations; explicitly
reject TMF requests, and return a no-op response to event requests.
Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r-- | drivers/vhost/scsi.c | 190 |
1 files changed, 190 insertions, 0 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index e7e3ae13516d..1c33d6e39152 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -1050,9 +1050,199 @@ out: | |||
1050 | mutex_unlock(&vq->mutex); | 1050 | mutex_unlock(&vq->mutex); |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | static void | ||
1054 | vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, | ||
1055 | struct vhost_virtqueue *vq, | ||
1056 | int head, unsigned int out) | ||
1057 | { | ||
1058 | struct virtio_scsi_ctrl_tmf_resp __user *resp; | ||
1059 | struct virtio_scsi_ctrl_tmf_resp rsp; | ||
1060 | int ret; | ||
1061 | |||
1062 | pr_debug("%s\n", __func__); | ||
1063 | memset(&rsp, 0, sizeof(rsp)); | ||
1064 | rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; | ||
1065 | resp = vq->iov[out].iov_base; | ||
1066 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | ||
1067 | if (!ret) | ||
1068 | vhost_add_used_and_signal(&vs->dev, vq, head, 0); | ||
1069 | else | ||
1070 | pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); | ||
1071 | } | ||
1072 | |||
1073 | static void | ||
1074 | vhost_scsi_send_an_resp(struct vhost_scsi *vs, | ||
1075 | struct vhost_virtqueue *vq, | ||
1076 | int head, unsigned int out) | ||
1077 | { | ||
1078 | struct virtio_scsi_ctrl_an_resp __user *resp; | ||
1079 | struct virtio_scsi_ctrl_an_resp rsp; | ||
1080 | int ret; | ||
1081 | |||
1082 | pr_debug("%s\n", __func__); | ||
1083 | memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ | ||
1084 | rsp.response = VIRTIO_SCSI_S_OK; | ||
1085 | resp = vq->iov[out].iov_base; | ||
1086 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | ||
1087 | if (!ret) | ||
1088 | vhost_add_used_and_signal(&vs->dev, vq, head, 0); | ||
1089 | else | ||
1090 | pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); | ||
1091 | } | ||
1092 | |||
1093 | static void | ||
1094 | vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | ||
1095 | { | ||
1096 | union { | ||
1097 | __virtio32 type; | ||
1098 | struct virtio_scsi_ctrl_an_req an; | ||
1099 | struct virtio_scsi_ctrl_tmf_req tmf; | ||
1100 | } v_req; | ||
1101 | struct iov_iter out_iter; | ||
1102 | unsigned int out = 0, in = 0; | ||
1103 | int head; | ||
1104 | size_t req_size, rsp_size, typ_size; | ||
1105 | size_t out_size, in_size; | ||
1106 | u8 *lunp; | ||
1107 | void *req; | ||
1108 | |||
1109 | mutex_lock(&vq->mutex); | ||
1110 | /* | ||
1111 | * We can handle the vq only after the endpoint is setup by calling the | ||
1112 | * VHOST_SCSI_SET_ENDPOINT ioctl. | ||
1113 | */ | ||
1114 | if (!vq->private_data) | ||
1115 | goto out; | ||
1116 | |||
1117 | vhost_disable_notify(&vs->dev, vq); | ||
1118 | |||
1119 | for (;;) { | ||
1120 | head = vhost_get_vq_desc(vq, vq->iov, | ||
1121 | ARRAY_SIZE(vq->iov), &out, &in, | ||
1122 | NULL, NULL); | ||
1123 | pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", | ||
1124 | head, out, in); | ||
1125 | /* On error, stop handling until the next kick. */ | ||
1126 | if (unlikely(head < 0)) | ||
1127 | break; | ||
1128 | /* Nothing new? Wait for eventfd to tell us they refilled. */ | ||
1129 | if (head == vq->num) { | ||
1130 | if (unlikely(vhost_enable_notify(&vs->dev, vq))) { | ||
1131 | vhost_disable_notify(&vs->dev, vq); | ||
1132 | continue; | ||
1133 | } | ||
1134 | break; | ||
1135 | } | ||
1136 | |||
1137 | /* | ||
1138 | * Get the size of request and response buffers. | ||
1139 | */ | ||
1140 | out_size = iov_length(vq->iov, out); | ||
1141 | in_size = iov_length(&vq->iov[out], in); | ||
1142 | |||
1143 | /* | ||
1144 | * Copy over the virtio-scsi request header, which for a | ||
1145 | * ANY_LAYOUT enabled guest may span multiple iovecs, or a | ||
1146 | * single iovec may contain both the header + outgoing | ||
1147 | * WRITE payloads. | ||
1148 | * | ||
1149 | * copy_from_iter() will advance out_iter, so that it will | ||
1150 | * point at the start of the outgoing WRITE payload, if | ||
1151 | * DMA_TO_DEVICE is set. | ||
1152 | */ | ||
1153 | iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); | ||
1154 | |||
1155 | req = &v_req.type; | ||
1156 | typ_size = sizeof(v_req.type); | ||
1157 | |||
1158 | if (unlikely(!copy_from_iter_full(req, typ_size, &out_iter))) { | ||
1159 | vq_err(vq, "Faulted on copy_from_iter tmf type\n"); | ||
1160 | /* | ||
1161 | * The size of the response buffer varies based on | ||
1162 | * the request type and must be validated against it. | ||
1163 | * Since the request type is not known, don't send | ||
1164 | * a response. | ||
1165 | */ | ||
1166 | continue; | ||
1167 | } | ||
1168 | |||
1169 | switch (v_req.type) { | ||
1170 | case VIRTIO_SCSI_T_TMF: | ||
1171 | req = &v_req.tmf; | ||
1172 | lunp = &v_req.tmf.lun[0]; | ||
1173 | req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); | ||
1174 | rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp); | ||
1175 | break; | ||
1176 | case VIRTIO_SCSI_T_AN_QUERY: | ||
1177 | case VIRTIO_SCSI_T_AN_SUBSCRIBE: | ||
1178 | req = &v_req.an; | ||
1179 | lunp = &v_req.an.lun[0]; | ||
1180 | req_size = sizeof(struct virtio_scsi_ctrl_an_req); | ||
1181 | rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp); | ||
1182 | break; | ||
1183 | default: | ||
1184 | vq_err(vq, "Unknown control request %d", v_req.type); | ||
1185 | continue; | ||
1186 | } | ||
1187 | |||
1188 | /* | ||
1189 | * Check for a sane response buffer so we can report early | ||
1190 | * errors back to the guest. | ||
1191 | */ | ||
1192 | if (unlikely(in_size < rsp_size)) { | ||
1193 | vq_err(vq, | ||
1194 | "Resp buf too small, need min %zu bytes got %zu", | ||
1195 | rsp_size, in_size); | ||
1196 | /* | ||
1197 | * Notifications are disabled at this point; | ||
1198 | * continue so they can be eventually enabled | ||
1199 | * when processing terminates. | ||
1200 | */ | ||
1201 | continue; | ||
1202 | } | ||
1203 | |||
1204 | if (unlikely(out_size < req_size)) { | ||
1205 | vq_err(vq, | ||
1206 | "Req buf too small, need min %zu bytes got %zu", | ||
1207 | req_size, out_size); | ||
1208 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
1209 | continue; | ||
1210 | } | ||
1211 | |||
1212 | req += typ_size; | ||
1213 | req_size -= typ_size; | ||
1214 | |||
1215 | if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) { | ||
1216 | vq_err(vq, "Faulted on copy_from_iter\n"); | ||
1217 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
1218 | continue; | ||
1219 | } | ||
1220 | |||
1221 | /* virtio-scsi spec requires byte 0 of the lun to be 1 */ | ||
1222 | if (unlikely(*lunp != 1)) { | ||
1223 | vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); | ||
1224 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
1225 | continue; | ||
1226 | } | ||
1227 | |||
1228 | if (v_req.type == VIRTIO_SCSI_T_TMF) { | ||
1229 | pr_debug("%s tmf %d\n", __func__, v_req.tmf.subtype); | ||
1230 | vhost_scsi_send_tmf_resp(vs, vq, head, out); | ||
1231 | } else | ||
1232 | vhost_scsi_send_an_resp(vs, vq, head, out); | ||
1233 | } | ||
1234 | out: | ||
1235 | mutex_unlock(&vq->mutex); | ||
1236 | } | ||
1237 | |||
1053 | static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) | 1238 | static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) |
1054 | { | 1239 | { |
1240 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | ||
1241 | poll.work); | ||
1242 | struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); | ||
1243 | |||
1055 | pr_debug("%s: The handling func for control queue.\n", __func__); | 1244 | pr_debug("%s: The handling func for control queue.\n", __func__); |
1245 | vhost_scsi_ctl_handle_vq(vs, vq); | ||
1056 | } | 1246 | } |
1057 | 1247 | ||
1058 | static void | 1248 | static void |