diff options
author | Nicholas Bellinger <nab@linux-iscsi.org> | 2014-02-22 21:22:31 -0500 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2014-06-02 15:42:14 -0400 |
commit | 95e7c4341b8e28dae5204378087c1e2a115abc82 (patch) | |
tree | 72d011daf953191f311342d80cd51159f2678315 /drivers/vhost | |
parent | e31885dd901e80d5bd528c1cbedde07ebbf051b2 (diff) |
vhost/scsi: Enable T10 PI IOV -> SGL memory mapping
This patch updates vhost_scsi_handle_vq() to check for the existance
of virtio_scsi_cmd_req_pi comparing vq->iov[0].iov_len in order to
calculate seperate data + protection SGLs from data_num.
Also update tcm_vhost_submission_work() to pass the pre-allocated
cmd->tvc_prot_sgl[] memory into target_submit_cmd_map_sgls(), and
update vhost_scsi_get_tag() parameters to accept scsi_tag, lun, and
task_attr.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Sagi Grimberg <sagig@dev.mellanox.co.il>
Cc: H. Peter Anvin <hpa@zytor.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/vhost')
-rw-r--r-- | drivers/vhost/scsi.c | 183 |
1 files changed, 124 insertions, 59 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index eabcf1875831..667e72d46998 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -169,7 +169,8 @@ enum { | |||
169 | }; | 169 | }; |
170 | 170 | ||
171 | enum { | 171 | enum { |
172 | VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | 172 | VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | |
173 | (1ULL << VIRTIO_SCSI_F_T10_PI) | ||
173 | }; | 174 | }; |
174 | 175 | ||
175 | #define VHOST_SCSI_MAX_TARGET 256 | 176 | #define VHOST_SCSI_MAX_TARGET 256 |
@@ -720,11 +721,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
720 | } | 721 | } |
721 | 722 | ||
722 | static struct tcm_vhost_cmd * | 723 | static struct tcm_vhost_cmd * |
723 | vhost_scsi_get_tag(struct vhost_virtqueue *vq, | 724 | vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, |
724 | struct tcm_vhost_tpg *tpg, | 725 | unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, |
725 | struct virtio_scsi_cmd_req *v_req, | 726 | u32 exp_data_len, int data_direction) |
726 | u32 exp_data_len, | ||
727 | int data_direction) | ||
728 | { | 727 | { |
729 | struct tcm_vhost_cmd *cmd; | 728 | struct tcm_vhost_cmd *cmd; |
730 | struct tcm_vhost_nexus *tv_nexus; | 729 | struct tcm_vhost_nexus *tv_nexus; |
@@ -756,13 +755,16 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, | |||
756 | cmd->tvc_prot_sgl = prot_sg; | 755 | cmd->tvc_prot_sgl = prot_sg; |
757 | cmd->tvc_upages = pages; | 756 | cmd->tvc_upages = pages; |
758 | cmd->tvc_se_cmd.map_tag = tag; | 757 | cmd->tvc_se_cmd.map_tag = tag; |
759 | cmd->tvc_tag = v_req->tag; | 758 | cmd->tvc_tag = scsi_tag; |
760 | cmd->tvc_task_attr = v_req->task_attr; | 759 | cmd->tvc_lun = lun; |
760 | cmd->tvc_task_attr = task_attr; | ||
761 | cmd->tvc_exp_data_len = exp_data_len; | 761 | cmd->tvc_exp_data_len = exp_data_len; |
762 | cmd->tvc_data_direction = data_direction; | 762 | cmd->tvc_data_direction = data_direction; |
763 | cmd->tvc_nexus = tv_nexus; | 763 | cmd->tvc_nexus = tv_nexus; |
764 | cmd->inflight = tcm_vhost_get_inflight(vq); | 764 | cmd->inflight = tcm_vhost_get_inflight(vq); |
765 | 765 | ||
766 | memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE); | ||
767 | |||
766 | return cmd; | 768 | return cmd; |
767 | } | 769 | } |
768 | 770 | ||
@@ -913,18 +915,17 @@ static void tcm_vhost_submission_work(struct work_struct *work) | |||
913 | container_of(work, struct tcm_vhost_cmd, work); | 915 | container_of(work, struct tcm_vhost_cmd, work); |
914 | struct tcm_vhost_nexus *tv_nexus; | 916 | struct tcm_vhost_nexus *tv_nexus; |
915 | struct se_cmd *se_cmd = &cmd->tvc_se_cmd; | 917 | struct se_cmd *se_cmd = &cmd->tvc_se_cmd; |
916 | struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; | 918 | struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; |
917 | int rc, sg_no_bidi = 0; | 919 | int rc; |
918 | 920 | ||
921 | /* FIXME: BIDI operation */ | ||
919 | if (cmd->tvc_sgl_count) { | 922 | if (cmd->tvc_sgl_count) { |
920 | sg_ptr = cmd->tvc_sgl; | 923 | sg_ptr = cmd->tvc_sgl; |
921 | /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ | 924 | |
922 | #if 0 | 925 | if (cmd->tvc_prot_sgl_count) |
923 | if (se_cmd->se_cmd_flags & SCF_BIDI) { | 926 | sg_prot_ptr = cmd->tvc_prot_sgl; |
924 | sg_bidi_ptr = NULL; | 927 | else |
925 | sg_no_bidi = 0; | 928 | se_cmd->prot_pto = true; |
926 | } | ||
927 | #endif | ||
928 | } else { | 929 | } else { |
929 | sg_ptr = NULL; | 930 | sg_ptr = NULL; |
930 | } | 931 | } |
@@ -935,7 +936,7 @@ static void tcm_vhost_submission_work(struct work_struct *work) | |||
935 | cmd->tvc_lun, cmd->tvc_exp_data_len, | 936 | cmd->tvc_lun, cmd->tvc_exp_data_len, |
936 | cmd->tvc_task_attr, cmd->tvc_data_direction, | 937 | cmd->tvc_task_attr, cmd->tvc_data_direction, |
937 | TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, | 938 | TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, |
938 | sg_bidi_ptr, sg_no_bidi, NULL, 0); | 939 | NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); |
939 | if (rc < 0) { | 940 | if (rc < 0) { |
940 | transport_send_check_condition_and_sense(se_cmd, | 941 | transport_send_check_condition_and_sense(se_cmd, |
941 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 942 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
@@ -967,12 +968,18 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
967 | { | 968 | { |
968 | struct tcm_vhost_tpg **vs_tpg; | 969 | struct tcm_vhost_tpg **vs_tpg; |
969 | struct virtio_scsi_cmd_req v_req; | 970 | struct virtio_scsi_cmd_req v_req; |
971 | struct virtio_scsi_cmd_req_pi v_req_pi; | ||
970 | struct tcm_vhost_tpg *tpg; | 972 | struct tcm_vhost_tpg *tpg; |
971 | struct tcm_vhost_cmd *cmd; | 973 | struct tcm_vhost_cmd *cmd; |
972 | u32 exp_data_len, data_first, data_num, data_direction; | 974 | u64 tag; |
975 | u32 exp_data_len, data_first, data_num, data_direction, prot_first; | ||
973 | unsigned out, in, i; | 976 | unsigned out, in, i; |
974 | int head, ret; | 977 | int head, ret, data_niov, prot_niov, prot_bytes; |
975 | u8 target; | 978 | size_t req_size; |
979 | u16 lun; | ||
980 | u8 *target, *lunp, task_attr; | ||
981 | bool hdr_pi; | ||
982 | void *req, *cdb; | ||
976 | 983 | ||
977 | mutex_lock(&vq->mutex); | 984 | mutex_lock(&vq->mutex); |
978 | /* | 985 | /* |
@@ -1003,7 +1010,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1003 | break; | 1010 | break; |
1004 | } | 1011 | } |
1005 | 1012 | ||
1006 | /* FIXME: BIDI operation */ | 1013 | /* FIXME: BIDI operation */ |
1007 | if (out == 1 && in == 1) { | 1014 | if (out == 1 && in == 1) { |
1008 | data_direction = DMA_NONE; | 1015 | data_direction = DMA_NONE; |
1009 | data_first = 0; | 1016 | data_first = 0; |
@@ -1033,29 +1040,38 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1033 | break; | 1040 | break; |
1034 | } | 1041 | } |
1035 | 1042 | ||
1036 | if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) { | 1043 | if (vs->dev.acked_features & VIRTIO_SCSI_F_T10_PI) { |
1037 | vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu" | 1044 | req = &v_req_pi; |
1038 | " bytes\n", vq->iov[0].iov_len); | 1045 | lunp = &v_req_pi.lun[0]; |
1046 | target = &v_req_pi.lun[1]; | ||
1047 | req_size = sizeof(v_req_pi); | ||
1048 | hdr_pi = true; | ||
1049 | } else { | ||
1050 | req = &v_req; | ||
1051 | lunp = &v_req.lun[0]; | ||
1052 | target = &v_req.lun[1]; | ||
1053 | req_size = sizeof(v_req); | ||
1054 | hdr_pi = false; | ||
1055 | } | ||
1056 | |||
1057 | if (unlikely(vq->iov[0].iov_len < req_size)) { | ||
1058 | pr_err("Expecting virtio-scsi header: %zu, got %zu\n", | ||
1059 | req_size, vq->iov[0].iov_len); | ||
1039 | break; | 1060 | break; |
1040 | } | 1061 | } |
1041 | pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p," | 1062 | ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); |
1042 | " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req)); | ||
1043 | ret = __copy_from_user(&v_req, vq->iov[0].iov_base, | ||
1044 | sizeof(v_req)); | ||
1045 | if (unlikely(ret)) { | 1063 | if (unlikely(ret)) { |
1046 | vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); | 1064 | vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); |
1047 | break; | 1065 | break; |
1048 | } | 1066 | } |
1049 | 1067 | ||
1050 | /* virtio-scsi spec requires byte 0 of the lun to be 1 */ | 1068 | /* virtio-scsi spec requires byte 0 of the lun to be 1 */ |
1051 | if (unlikely(v_req.lun[0] != 1)) { | 1069 | if (unlikely(*lunp != 1)) { |
1052 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1070 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1053 | continue; | 1071 | continue; |
1054 | } | 1072 | } |
1055 | 1073 | ||
1056 | /* Extract the tpgt */ | 1074 | tpg = ACCESS_ONCE(vs_tpg[*target]); |
1057 | target = v_req.lun[1]; | ||
1058 | tpg = ACCESS_ONCE(vs_tpg[target]); | ||
1059 | 1075 | ||
1060 | /* Target does not exist, fail the request */ | 1076 | /* Target does not exist, fail the request */ |
1061 | if (unlikely(!tpg)) { | 1077 | if (unlikely(!tpg)) { |
@@ -1063,17 +1079,78 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1063 | continue; | 1079 | continue; |
1064 | } | 1080 | } |
1065 | 1081 | ||
1082 | data_niov = data_num; | ||
1083 | prot_niov = prot_first = prot_bytes = 0; | ||
1084 | /* | ||
1085 | * Determine if any protection information iovecs are preceeding | ||
1086 | * the actual data payload, and adjust data_first + data_niov | ||
1087 | * values accordingly for vhost_scsi_map_iov_to_sgl() below. | ||
1088 | * | ||
1089 | * Also extract virtio_scsi header bits for vhost_scsi_get_tag() | ||
1090 | */ | ||
1091 | if (hdr_pi) { | ||
1092 | if (v_req_pi.pi_bytesout) { | ||
1093 | if (data_direction != DMA_TO_DEVICE) { | ||
1094 | vq_err(vq, "Received non zero do_pi_niov" | ||
1095 | ", but wrong data_direction\n"); | ||
1096 | goto err_cmd; | ||
1097 | } | ||
1098 | prot_bytes = v_req_pi.pi_bytesout; | ||
1099 | } else if (v_req_pi.pi_bytesin) { | ||
1100 | if (data_direction != DMA_FROM_DEVICE) { | ||
1101 | vq_err(vq, "Received non zero di_pi_niov" | ||
1102 | ", but wrong data_direction\n"); | ||
1103 | goto err_cmd; | ||
1104 | } | ||
1105 | prot_bytes = v_req_pi.pi_bytesin; | ||
1106 | } | ||
1107 | if (prot_bytes) { | ||
1108 | int tmp = 0; | ||
1109 | |||
1110 | for (i = 0; i < data_num; i++) { | ||
1111 | tmp += vq->iov[data_first + i].iov_len; | ||
1112 | prot_niov++; | ||
1113 | if (tmp >= prot_bytes) | ||
1114 | break; | ||
1115 | } | ||
1116 | prot_first = data_first; | ||
1117 | data_first += prot_niov; | ||
1118 | data_niov = data_num - prot_niov; | ||
1119 | } | ||
1120 | tag = v_req_pi.tag; | ||
1121 | task_attr = v_req_pi.task_attr; | ||
1122 | cdb = &v_req_pi.cdb[0]; | ||
1123 | lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF; | ||
1124 | } else { | ||
1125 | tag = v_req.tag; | ||
1126 | task_attr = v_req.task_attr; | ||
1127 | cdb = &v_req.cdb[0]; | ||
1128 | lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; | ||
1129 | } | ||
1066 | exp_data_len = 0; | 1130 | exp_data_len = 0; |
1067 | for (i = 0; i < data_num; i++) | 1131 | for (i = 0; i < data_niov; i++) |
1068 | exp_data_len += vq->iov[data_first + i].iov_len; | 1132 | exp_data_len += vq->iov[data_first + i].iov_len; |
1133 | /* | ||
1134 | * Check that the recieved CDB size does not exceeded our | ||
1135 | * hardcoded max for vhost-scsi | ||
1136 | * | ||
1137 | * TODO what if cdb was too small for varlen cdb header? | ||
1138 | */ | ||
1139 | if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) { | ||
1140 | vq_err(vq, "Received SCSI CDB with command_size: %d that" | ||
1141 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | ||
1142 | scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); | ||
1143 | goto err_cmd; | ||
1144 | } | ||
1069 | 1145 | ||
1070 | cmd = vhost_scsi_get_tag(vq, tpg, &v_req, | 1146 | cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, |
1071 | exp_data_len, data_direction); | 1147 | exp_data_len, data_direction); |
1072 | if (IS_ERR(cmd)) { | 1148 | if (IS_ERR(cmd)) { |
1073 | vq_err(vq, "vhost_scsi_get_tag failed %ld\n", | 1149 | vq_err(vq, "vhost_scsi_get_tag failed %ld\n", |
1074 | PTR_ERR(cmd)); | 1150 | PTR_ERR(cmd)); |
1075 | goto err_cmd; | 1151 | goto err_cmd; |
1076 | } | 1152 | } |
1153 | |||
1077 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" | 1154 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" |
1078 | ": %d\n", cmd, exp_data_len, data_direction); | 1155 | ": %d\n", cmd, exp_data_len, data_direction); |
1079 | 1156 | ||
@@ -1081,40 +1158,28 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1081 | cmd->tvc_vq = vq; | 1158 | cmd->tvc_vq = vq; |
1082 | cmd->tvc_resp = vq->iov[out].iov_base; | 1159 | cmd->tvc_resp = vq->iov[out].iov_base; |
1083 | 1160 | ||
1084 | /* | ||
1085 | * Copy in the recieved CDB descriptor into cmd->tvc_cdb | ||
1086 | * that will be used by tcm_vhost_new_cmd_map() and down into | ||
1087 | * target_setup_cmd_from_cdb() | ||
1088 | */ | ||
1089 | memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); | ||
1090 | /* | ||
1091 | * Check that the recieved CDB size does not exceeded our | ||
1092 | * hardcoded max for tcm_vhost | ||
1093 | */ | ||
1094 | /* TODO what if cdb was too small for varlen cdb header? */ | ||
1095 | if (unlikely(scsi_command_size(cmd->tvc_cdb) > | ||
1096 | TCM_VHOST_MAX_CDB_SIZE)) { | ||
1097 | vq_err(vq, "Received SCSI CDB with command_size: %d that" | ||
1098 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | ||
1099 | scsi_command_size(cmd->tvc_cdb), | ||
1100 | TCM_VHOST_MAX_CDB_SIZE); | ||
1101 | goto err_free; | ||
1102 | } | ||
1103 | cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; | ||
1104 | |||
1105 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", | 1161 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", |
1106 | cmd->tvc_cdb[0], cmd->tvc_lun); | 1162 | cmd->tvc_cdb[0], cmd->tvc_lun); |
1107 | 1163 | ||
1164 | if (prot_niov) { | ||
1165 | ret = vhost_scsi_map_iov_to_prot(cmd, | ||
1166 | &vq->iov[prot_first], prot_niov, | ||
1167 | data_direction == DMA_FROM_DEVICE); | ||
1168 | if (unlikely(ret)) { | ||
1169 | vq_err(vq, "Failed to map iov to" | ||
1170 | " prot_sgl\n"); | ||
1171 | goto err_free; | ||
1172 | } | ||
1173 | } | ||
1108 | if (data_direction != DMA_NONE) { | 1174 | if (data_direction != DMA_NONE) { |
1109 | ret = vhost_scsi_map_iov_to_sgl(cmd, | 1175 | ret = vhost_scsi_map_iov_to_sgl(cmd, |
1110 | &vq->iov[data_first], data_num, | 1176 | &vq->iov[data_first], data_niov, |
1111 | data_direction == DMA_FROM_DEVICE); | 1177 | data_direction == DMA_FROM_DEVICE); |
1112 | if (unlikely(ret)) { | 1178 | if (unlikely(ret)) { |
1113 | vq_err(vq, "Failed to map iov to sgl\n"); | 1179 | vq_err(vq, "Failed to map iov to sgl\n"); |
1114 | goto err_free; | 1180 | goto err_free; |
1115 | } | 1181 | } |
1116 | } | 1182 | } |
1117 | |||
1118 | /* | 1183 | /* |
1119 | * Save the descriptor from vhost_get_vq_desc() to be used to | 1184 | * Save the descriptor from vhost_get_vq_desc() to be used to |
1120 | * complete the virtio-scsi request in TCM callback context via | 1185 | * complete the virtio-scsi request in TCM callback context via |
@@ -1788,7 +1853,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, | |||
1788 | tv_nexus->tvn_se_sess = transport_init_session_tags( | 1853 | tv_nexus->tvn_se_sess = transport_init_session_tags( |
1789 | TCM_VHOST_DEFAULT_TAGS, | 1854 | TCM_VHOST_DEFAULT_TAGS, |
1790 | sizeof(struct tcm_vhost_cmd), | 1855 | sizeof(struct tcm_vhost_cmd), |
1791 | TARGET_PROT_NORMAL); | 1856 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); |
1792 | if (IS_ERR(tv_nexus->tvn_se_sess)) { | 1857 | if (IS_ERR(tv_nexus->tvn_se_sess)) { |
1793 | mutex_unlock(&tpg->tv_tpg_mutex); | 1858 | mutex_unlock(&tpg->tv_tpg_mutex); |
1794 | kfree(tv_nexus); | 1859 | kfree(tv_nexus); |