aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2015-01-29 20:21:13 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2015-02-04 13:55:35 -0500
commitde1419e42088e99f8f839710c42aec759e45de77 (patch)
tree4ac43a279beeb7504ef2b4216a549fafe67d1b19 /drivers/vhost
parent79c14141a487211d1bb7840d3f607766f6115dd2 (diff)
vhost/scsi: Fix incorrect early vhost_scsi_handle_vq failures
This patch fixes vhost_scsi_handle_vq() failure cases that result in BUG_ON() getting triggered when vhost_scsi_free_cmd() is called, and ->tvc_se_cmd has not been initialized by target_submit_cmd_map_sgls(). It changes tcm_vhost_release_cmd() to use tcm_vhost_cmd->tvc_nexus for obtaining se_session pointer reference. Also, avoid calling put_page() on NULL sg->page entries in vhost_scsi_map_to_sgl() failure path. Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/scsi.c52
1 files changed, 29 insertions, 23 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 29dfdf66bbc5..62de820ae2f4 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -462,7 +462,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
462{ 462{
463 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 463 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
464 struct tcm_vhost_cmd, tvc_se_cmd); 464 struct tcm_vhost_cmd, tvc_se_cmd);
465 struct se_session *se_sess = se_cmd->se_sess; 465 struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
466 int i; 466 int i;
467 467
468 if (tv_cmd->tvc_sgl_count) { 468 if (tv_cmd->tvc_sgl_count) {
@@ -864,9 +864,11 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
864 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i], 864 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
865 cmd->tvc_upages, write); 865 cmd->tvc_upages, write);
866 if (ret < 0) { 866 if (ret < 0) {
867 for (i = 0; i < cmd->tvc_sgl_count; i++) 867 for (i = 0; i < cmd->tvc_sgl_count; i++) {
868 put_page(sg_page(&cmd->tvc_sgl[i])); 868 struct page *page = sg_page(&cmd->tvc_sgl[i]);
869 869 if (page)
870 put_page(page);
871 }
870 cmd->tvc_sgl_count = 0; 872 cmd->tvc_sgl_count = 0;
871 return ret; 873 return ret;
872 } 874 }
@@ -905,9 +907,11 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
905 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i], 907 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
906 cmd->tvc_upages, write); 908 cmd->tvc_upages, write);
907 if (ret < 0) { 909 if (ret < 0) {
908 for (i = 0; i < cmd->tvc_prot_sgl_count; i++) 910 for (i = 0; i < cmd->tvc_prot_sgl_count; i++) {
909 put_page(sg_page(&cmd->tvc_prot_sgl[i])); 911 struct page *page = sg_page(&cmd->tvc_prot_sgl[i]);
910 912 if (page)
913 put_page(page);
914 }
911 cmd->tvc_prot_sgl_count = 0; 915 cmd->tvc_prot_sgl_count = 0;
912 return ret; 916 return ret;
913 } 917 }
@@ -1065,12 +1069,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1065 if (unlikely(vq->iov[0].iov_len < req_size)) { 1069 if (unlikely(vq->iov[0].iov_len < req_size)) {
1066 pr_err("Expecting virtio-scsi header: %zu, got %zu\n", 1070 pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
1067 req_size, vq->iov[0].iov_len); 1071 req_size, vq->iov[0].iov_len);
1068 break; 1072 vhost_scsi_send_bad_target(vs, vq, head, out);
1073 continue;
1069 } 1074 }
1070 ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); 1075 ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
1071 if (unlikely(ret)) { 1076 if (unlikely(ret)) {
1072 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); 1077 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1073 break; 1078 vhost_scsi_send_bad_target(vs, vq, head, out);
1079 continue;
1074 } 1080 }
1075 1081
1076 /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 1082 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
@@ -1101,14 +1107,16 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1101 if (data_direction != DMA_TO_DEVICE) { 1107 if (data_direction != DMA_TO_DEVICE) {
1102 vq_err(vq, "Received non zero do_pi_niov" 1108 vq_err(vq, "Received non zero do_pi_niov"
1103 ", but wrong data_direction\n"); 1109 ", but wrong data_direction\n");
1104 goto err_cmd; 1110 vhost_scsi_send_bad_target(vs, vq, head, out);
1111 continue;
1105 } 1112 }
1106 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); 1113 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1107 } else if (v_req_pi.pi_bytesin) { 1114 } else if (v_req_pi.pi_bytesin) {
1108 if (data_direction != DMA_FROM_DEVICE) { 1115 if (data_direction != DMA_FROM_DEVICE) {
1109 vq_err(vq, "Received non zero di_pi_niov" 1116 vq_err(vq, "Received non zero di_pi_niov"
1110 ", but wrong data_direction\n"); 1117 ", but wrong data_direction\n");
1111 goto err_cmd; 1118 vhost_scsi_send_bad_target(vs, vq, head, out);
1119 continue;
1112 } 1120 }
1113 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); 1121 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1114 } 1122 }
@@ -1148,7 +1156,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1148 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1156 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1149 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1157 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1150 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); 1158 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
1151 goto err_cmd; 1159 vhost_scsi_send_bad_target(vs, vq, head, out);
1160 continue;
1152 } 1161 }
1153 1162
1154 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, 1163 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
@@ -1157,7 +1166,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1157 if (IS_ERR(cmd)) { 1166 if (IS_ERR(cmd)) {
1158 vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1167 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1159 PTR_ERR(cmd)); 1168 PTR_ERR(cmd));
1160 goto err_cmd; 1169 vhost_scsi_send_bad_target(vs, vq, head, out);
1170 continue;
1161 } 1171 }
1162 1172
1163 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" 1173 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
@@ -1178,7 +1188,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1178 if (unlikely(ret)) { 1188 if (unlikely(ret)) {
1179 vq_err(vq, "Failed to map iov to" 1189 vq_err(vq, "Failed to map iov to"
1180 " prot_sgl\n"); 1190 " prot_sgl\n");
1181 goto err_free; 1191 tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
1192 vhost_scsi_send_bad_target(vs, vq, head, out);
1193 continue;
1182 } 1194 }
1183 } 1195 }
1184 if (data_direction != DMA_NONE) { 1196 if (data_direction != DMA_NONE) {
@@ -1187,7 +1199,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1187 data_direction == DMA_FROM_DEVICE); 1199 data_direction == DMA_FROM_DEVICE);
1188 if (unlikely(ret)) { 1200 if (unlikely(ret)) {
1189 vq_err(vq, "Failed to map iov to sgl\n"); 1201 vq_err(vq, "Failed to map iov to sgl\n");
1190 goto err_free; 1202 tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
1203 vhost_scsi_send_bad_target(vs, vq, head, out);
1204 continue;
1191 } 1205 }
1192 } 1206 }
1193 /* 1207 /*
@@ -1205,14 +1219,6 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1205 INIT_WORK(&cmd->work, tcm_vhost_submission_work); 1219 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1206 queue_work(tcm_vhost_workqueue, &cmd->work); 1220 queue_work(tcm_vhost_workqueue, &cmd->work);
1207 } 1221 }
1208
1209 mutex_unlock(&vq->mutex);
1210 return;
1211
1212err_free:
1213 vhost_scsi_free_cmd(cmd);
1214err_cmd:
1215 vhost_scsi_send_bad_target(vs, vq, head, out);
1216out: 1222out:
1217 mutex_unlock(&vq->mutex); 1223 mutex_unlock(&vq->mutex);
1218} 1224}