aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-01 22:33:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-01 22:33:09 -0500
commit702256e604ec143f69b391485ab32b2948772838 (patch)
treea62aa9ccd12ffe95dea6f5e0b97e0d7d907dd6b9
parent3750c14022d185f4a583dda243f2f46f51ce1a2c (diff)
parent7fe412d07d881020022a188b95c63a19b651a391 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger: "The bulk of the series are bugfixes for qla2xxx target NPIV support that went in for v3.14-rc1. Also included are a few DIF related fixes, a qla2xxx fix (Cc'ed to stable) from Greg W., and vhost/scsi protocol version related fix from Venkatesh. Also just a heads up that a series to address a number of issues with iser-target active I/O reset/shutdown is still being tested, and will be included in a separate -rc6 PULL request" * git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: vhost/scsi: Check LUN structure byte 0 is set to 1, per spec qla2xxx: Fix kernel panic on selective retransmission request Target/sbc: Don't use sg as iterator in sbc_verify_read target: Add DIF sense codes in transport_generic_request_failure target/sbc: Fix sbc_dif_copy_prot addr offset bug tcm_qla2xxx: Fix NAA formatted name for NPIV WWPNs tcm_qla2xxx: Perform configfs depend/undepend for base_tpg tcm_qla2xxx: Add NPIV specific enable/disable attribute logic qla2xxx: Check + fail when npiv_vports_inuse exists in shutdown qla2xxx: Fix qlt_lport_register base_vha callback race
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c158
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h7
-rw-r--r--drivers/target/target_core_sbc.c15
-rw-r--r--drivers/target/target_core_transport.c3
-rw-r--r--drivers/vhost/scsi.c6
7 files changed, 171 insertions, 62 deletions
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2eb97d7e8d12..0cb73074c199 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -790,17 +790,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
790} 790}
791 791
792/* Called by tcm_qla2xxx configfs code */ 792/* Called by tcm_qla2xxx configfs code */
793void qlt_stop_phase1(struct qla_tgt *tgt) 793int qlt_stop_phase1(struct qla_tgt *tgt)
794{ 794{
795 struct scsi_qla_host *vha = tgt->vha; 795 struct scsi_qla_host *vha = tgt->vha;
796 struct qla_hw_data *ha = tgt->ha; 796 struct qla_hw_data *ha = tgt->ha;
797 unsigned long flags; 797 unsigned long flags;
798 798
799 mutex_lock(&qla_tgt_mutex);
800 if (!vha->fc_vport) {
801 struct Scsi_Host *sh = vha->host;
802 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
803 bool npiv_vports;
804
805 spin_lock_irqsave(sh->host_lock, flags);
806 npiv_vports = (fc_host->npiv_vports_inuse);
807 spin_unlock_irqrestore(sh->host_lock, flags);
808
809 if (npiv_vports) {
810 mutex_unlock(&qla_tgt_mutex);
811 return -EPERM;
812 }
813 }
799 if (tgt->tgt_stop || tgt->tgt_stopped) { 814 if (tgt->tgt_stop || tgt->tgt_stopped) {
800 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 815 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
801 "Already in tgt->tgt_stop or tgt_stopped state\n"); 816 "Already in tgt->tgt_stop or tgt_stopped state\n");
802 dump_stack(); 817 mutex_unlock(&qla_tgt_mutex);
803 return; 818 return -EPERM;
804 } 819 }
805 820
806 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 821 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
@@ -815,6 +830,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
815 qlt_clear_tgt_db(tgt, true); 830 qlt_clear_tgt_db(tgt, true);
816 spin_unlock_irqrestore(&ha->hardware_lock, flags); 831 spin_unlock_irqrestore(&ha->hardware_lock, flags);
817 mutex_unlock(&vha->vha_tgt.tgt_mutex); 832 mutex_unlock(&vha->vha_tgt.tgt_mutex);
833 mutex_unlock(&qla_tgt_mutex);
818 834
819 flush_delayed_work(&tgt->sess_del_work); 835 flush_delayed_work(&tgt->sess_del_work);
820 836
@@ -841,6 +857,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
841 857
842 /* Wait for sessions to clear out (just in case) */ 858 /* Wait for sessions to clear out (just in case) */
843 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 859 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
860 return 0;
844} 861}
845EXPORT_SYMBOL(qlt_stop_phase1); 862EXPORT_SYMBOL(qlt_stop_phase1);
846 863
@@ -3185,7 +3202,8 @@ restart:
3185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 3202 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
3186 "SRR cmd %p (se_cmd %p, tag %d, op %x), " 3203 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3187 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, 3204 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
3188 se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); 3205 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
3206 cmd->sg_cnt, cmd->offset);
3189 3207
3190 qlt_handle_srr(vha, sctio, imm); 3208 qlt_handle_srr(vha, sctio, imm);
3191 3209
@@ -4181,6 +4199,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4181 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 4199 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
4182 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 4200 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
4183 4201
4202 if (base_vha->fc_vport)
4203 return 0;
4204
4184 mutex_lock(&qla_tgt_mutex); 4205 mutex_lock(&qla_tgt_mutex);
4185 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 4206 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
4186 mutex_unlock(&qla_tgt_mutex); 4207 mutex_unlock(&qla_tgt_mutex);
@@ -4194,6 +4215,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4194 if (!vha->vha_tgt.qla_tgt) 4215 if (!vha->vha_tgt.qla_tgt)
4195 return 0; 4216 return 0;
4196 4217
4218 if (vha->fc_vport) {
4219 qlt_release(vha->vha_tgt.qla_tgt);
4220 return 0;
4221 }
4197 mutex_lock(&qla_tgt_mutex); 4222 mutex_lock(&qla_tgt_mutex);
4198 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 4223 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
4199 mutex_unlock(&qla_tgt_mutex); 4224 mutex_unlock(&qla_tgt_mutex);
@@ -4265,6 +4290,12 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
4265 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4290 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4266 continue; 4291 continue;
4267 } 4292 }
4293 if (tgt->tgt_stop) {
4294 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
4295 host->host_no);
4296 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4297 continue;
4298 }
4268 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4269 4300
4270 if (!scsi_host_get(host)) { 4301 if (!scsi_host_get(host)) {
@@ -4279,12 +4310,11 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
4279 scsi_host_put(host); 4310 scsi_host_put(host);
4280 continue; 4311 continue;
4281 } 4312 }
4282 mutex_unlock(&qla_tgt_mutex);
4283
4284 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 4313 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
4285 if (rc != 0) 4314 if (rc != 0)
4286 scsi_host_put(host); 4315 scsi_host_put(host);
4287 4316
4317 mutex_unlock(&qla_tgt_mutex);
4288 return rc; 4318 return rc;
4289 } 4319 }
4290 mutex_unlock(&qla_tgt_mutex); 4320 mutex_unlock(&qla_tgt_mutex);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 66e755cdde57..ce33d8c26406 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1001,7 +1001,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *,
1001extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); 1001extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
1002extern int qlt_mem_alloc(struct qla_hw_data *); 1002extern int qlt_mem_alloc(struct qla_hw_data *);
1003extern void qlt_mem_free(struct qla_hw_data *); 1003extern void qlt_mem_free(struct qla_hw_data *);
1004extern void qlt_stop_phase1(struct qla_tgt *); 1004extern int qlt_stop_phase1(struct qla_tgt *);
1005extern void qlt_stop_phase2(struct qla_tgt *); 1005extern void qlt_stop_phase2(struct qla_tgt *);
1006extern irqreturn_t qla83xx_msix_atio_q(int, void *); 1006extern irqreturn_t qla83xx_msix_atio_q(int, void *);
1007extern void qlt_83xx_iospace_config(struct qla_hw_data *); 1007extern void qlt_83xx_iospace_config(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 75a141bbe74d..788c4fe2b0c9 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -182,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(
182 return 0; 182 return 0;
183} 183}
184 184
185static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
186 u64 wwpn, u64 wwnn)
187{
188 u8 b[8], b2[8];
189
190 put_unaligned_be64(wwpn, b);
191 put_unaligned_be64(wwnn, b2);
192 return snprintf(buf, len,
193 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
194 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
195 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
196 b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
197}
198
199static char *tcm_qla2xxx_npiv_get_fabric_name(void) 185static char *tcm_qla2xxx_npiv_get_fabric_name(void)
200{ 186{
201 return "qla2xxx_npiv"; 187 return "qla2xxx_npiv";
@@ -227,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
227 return lport->lport_naa_name; 213 return lport->lport_naa_name;
228} 214}
229 215
230static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
231{
232 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
233 struct tcm_qla2xxx_tpg, se_tpg);
234 struct tcm_qla2xxx_lport *lport = tpg->lport;
235
236 return &lport->lport_npiv_name[0];
237}
238
239static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) 216static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
240{ 217{
241 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 218 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -941,15 +918,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable(
941 atomic_read(&tpg->lport_tpg_enabled)); 918 atomic_read(&tpg->lport_tpg_enabled));
942} 919}
943 920
921static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
922{
923 struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
924 struct tcm_qla2xxx_tpg, tpg_base_work);
925 struct se_portal_group *se_tpg = &base_tpg->se_tpg;
926 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
927
928 if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
929 &se_tpg->tpg_group.cg_item)) {
930 atomic_set(&base_tpg->lport_tpg_enabled, 1);
931 qlt_enable_vha(base_vha);
932 }
933 complete(&base_tpg->tpg_base_comp);
934}
935
936static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
937{
938 struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
939 struct tcm_qla2xxx_tpg, tpg_base_work);
940 struct se_portal_group *se_tpg = &base_tpg->se_tpg;
941 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
942
943 if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
944 atomic_set(&base_tpg->lport_tpg_enabled, 0);
945 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
946 &se_tpg->tpg_group.cg_item);
947 }
948 complete(&base_tpg->tpg_base_comp);
949}
950
944static ssize_t tcm_qla2xxx_tpg_store_enable( 951static ssize_t tcm_qla2xxx_tpg_store_enable(
945 struct se_portal_group *se_tpg, 952 struct se_portal_group *se_tpg,
946 const char *page, 953 const char *page,
947 size_t count) 954 size_t count)
948{ 955{
949 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
950 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
951 struct tcm_qla2xxx_lport, lport_wwn);
952 struct scsi_qla_host *vha = lport->qla_vha;
953 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 956 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
954 struct tcm_qla2xxx_tpg, se_tpg); 957 struct tcm_qla2xxx_tpg, se_tpg);
955 unsigned long op; 958 unsigned long op;
@@ -964,19 +967,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
964 pr_err("Illegal value for tpg_enable: %lu\n", op); 967 pr_err("Illegal value for tpg_enable: %lu\n", op);
965 return -EINVAL; 968 return -EINVAL;
966 } 969 }
967
968 if (op) { 970 if (op) {
969 atomic_set(&tpg->lport_tpg_enabled, 1); 971 if (atomic_read(&tpg->lport_tpg_enabled))
970 qlt_enable_vha(vha); 972 return -EEXIST;
973
974 INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
971 } else { 975 } else {
972 if (!vha->vha_tgt.qla_tgt) { 976 if (!atomic_read(&tpg->lport_tpg_enabled))
973 pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n"); 977 return count;
974 return -ENODEV; 978
975 } 979 INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
976 atomic_set(&tpg->lport_tpg_enabled, 0);
977 qlt_stop_phase1(vha->vha_tgt.qla_tgt);
978 } 980 }
981 init_completion(&tpg->tpg_base_comp);
982 schedule_work(&tpg->tpg_base_work);
983 wait_for_completion(&tpg->tpg_base_comp);
979 984
985 if (op) {
986 if (!atomic_read(&tpg->lport_tpg_enabled))
987 return -ENODEV;
988 } else {
989 if (atomic_read(&tpg->lport_tpg_enabled))
990 return -EPERM;
991 }
980 return count; 992 return count;
981} 993}
982 994
@@ -1053,11 +1065,64 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1053 /* 1065 /*
1054 * Clear local TPG=1 pointer for non NPIV mode. 1066 * Clear local TPG=1 pointer for non NPIV mode.
1055 */ 1067 */
1056 lport->tpg_1 = NULL; 1068 lport->tpg_1 = NULL;
1057
1058 kfree(tpg); 1069 kfree(tpg);
1059} 1070}
1060 1071
1072static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
1073 struct se_portal_group *se_tpg,
1074 char *page)
1075{
1076 return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
1077}
1078
1079static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
1080 struct se_portal_group *se_tpg,
1081 const char *page,
1082 size_t count)
1083{
1084 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
1085 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
1086 struct tcm_qla2xxx_lport, lport_wwn);
1087 struct scsi_qla_host *vha = lport->qla_vha;
1088 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1089 struct tcm_qla2xxx_tpg, se_tpg);
1090 unsigned long op;
1091 int rc;
1092
1093 rc = kstrtoul(page, 0, &op);
1094 if (rc < 0) {
1095 pr_err("kstrtoul() returned %d\n", rc);
1096 return -EINVAL;
1097 }
1098 if ((op != 1) && (op != 0)) {
1099 pr_err("Illegal value for tpg_enable: %lu\n", op);
1100 return -EINVAL;
1101 }
1102 if (op) {
1103 if (atomic_read(&tpg->lport_tpg_enabled))
1104 return -EEXIST;
1105
1106 atomic_set(&tpg->lport_tpg_enabled, 1);
1107 qlt_enable_vha(vha);
1108 } else {
1109 if (!atomic_read(&tpg->lport_tpg_enabled))
1110 return count;
1111
1112 atomic_set(&tpg->lport_tpg_enabled, 0);
1113 qlt_stop_phase1(vha->vha_tgt.qla_tgt);
1114 }
1115
1116 return count;
1117}
1118
1119TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
1120
1121static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
1122 &tcm_qla2xxx_npiv_tpg_enable.attr,
1123 NULL,
1124};
1125
1061static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( 1126static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1062 struct se_wwn *wwn, 1127 struct se_wwn *wwn,
1063 struct config_group *group, 1128 struct config_group *group,
@@ -1650,6 +1715,9 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
1650 struct scsi_qla_host *npiv_vha; 1715 struct scsi_qla_host *npiv_vha;
1651 struct tcm_qla2xxx_lport *lport = 1716 struct tcm_qla2xxx_lport *lport =
1652 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1717 (struct tcm_qla2xxx_lport *)target_lport_ptr;
1718 struct tcm_qla2xxx_lport *base_lport =
1719 (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
1720 struct tcm_qla2xxx_tpg *base_tpg;
1653 struct fc_vport_identifiers vport_id; 1721 struct fc_vport_identifiers vport_id;
1654 1722
1655 if (!qla_tgt_mode_enabled(base_vha)) { 1723 if (!qla_tgt_mode_enabled(base_vha)) {
@@ -1657,6 +1725,13 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
1657 return -EPERM; 1725 return -EPERM;
1658 } 1726 }
1659 1727
1728 if (!base_lport || !base_lport->tpg_1 ||
1729 !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
1730 pr_err("qla2xxx base_lport or tpg_1 not available\n");
1731 return -EPERM;
1732 }
1733 base_tpg = base_lport->tpg_1;
1734
1660 memset(&vport_id, 0, sizeof(vport_id)); 1735 memset(&vport_id, 0, sizeof(vport_id));
1661 vport_id.port_name = npiv_wwpn; 1736 vport_id.port_name = npiv_wwpn;
1662 vport_id.node_name = npiv_wwnn; 1737 vport_id.node_name = npiv_wwnn;
@@ -1675,7 +1750,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
1675 npiv_vha = (struct scsi_qla_host *)vport->dd_data; 1750 npiv_vha = (struct scsi_qla_host *)vport->dd_data;
1676 npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1751 npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
1677 lport->qla_vha = npiv_vha; 1752 lport->qla_vha = npiv_vha;
1678
1679 scsi_host_get(npiv_vha->host); 1753 scsi_host_get(npiv_vha->host);
1680 return 0; 1754 return 0;
1681} 1755}
@@ -1714,8 +1788,6 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1714 } 1788 }
1715 lport->lport_npiv_wwpn = npiv_wwpn; 1789 lport->lport_npiv_wwpn = npiv_wwpn;
1716 lport->lport_npiv_wwnn = npiv_wwnn; 1790 lport->lport_npiv_wwnn = npiv_wwnn;
1717 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
1718 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
1719 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1791 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
1720 1792
1721 ret = tcm_qla2xxx_init_lport(lport); 1793 ret = tcm_qla2xxx_init_lport(lport);
@@ -1824,7 +1896,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1824static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1896static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1825 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, 1897 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
1826 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1898 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1827 .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn, 1899 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1828 .tpg_get_tag = tcm_qla2xxx_get_tag, 1900 .tpg_get_tag = tcm_qla2xxx_get_tag,
1829 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, 1901 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1830 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1902 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
@@ -1935,7 +2007,7 @@ static int tcm_qla2xxx_register_configfs(void)
1935 */ 2007 */
1936 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 2008 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1937 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = 2009 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
1938 tcm_qla2xxx_tpg_attrs; 2010 tcm_qla2xxx_npiv_tpg_attrs;
1939 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2011 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1940 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2012 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1941 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2013 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 275d8b9a7a34..33aaac8c7d59 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,8 +4,6 @@
4#define TCM_QLA2XXX_VERSION "v0.1" 4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */ 5/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32 6#define TCM_QLA2XXX_NAMELEN 32
7/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
8#define TCM_QLA2XXX_NPIV_NAMELEN 66
9 7
10#include "qla_target.h" 8#include "qla_target.h"
11 9
@@ -43,6 +41,9 @@ struct tcm_qla2xxx_tpg {
43 struct tcm_qla2xxx_tpg_attrib tpg_attrib; 41 struct tcm_qla2xxx_tpg_attrib tpg_attrib;
44 /* Returned by tcm_qla2xxx_make_tpg() */ 42 /* Returned by tcm_qla2xxx_make_tpg() */
45 struct se_portal_group se_tpg; 43 struct se_portal_group se_tpg;
44 /* Items for dealing with configfs_depend_item */
45 struct completion tpg_base_comp;
46 struct work_struct tpg_base_work;
46}; 47};
47 48
48struct tcm_qla2xxx_fc_loopid { 49struct tcm_qla2xxx_fc_loopid {
@@ -62,8 +63,6 @@ struct tcm_qla2xxx_lport {
62 char lport_name[TCM_QLA2XXX_NAMELEN]; 63 char lport_name[TCM_QLA2XXX_NAMELEN];
63 /* ASCII formatted naa WWPN for VPD page 83 etc */ 64 /* ASCII formatted naa WWPN for VPD page 83 etc */
64 char lport_naa_name[TCM_QLA2XXX_NAMELEN]; 65 char lport_naa_name[TCM_QLA2XXX_NAMELEN];
65 /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
66 char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
67 /* map for fc_port pointers in 24-bit FC Port ID space */ 66 /* map for fc_port pointers in 24-bit FC Port ID space */
68 struct btree_head32 lport_fcport_map; 67 struct btree_head32 lport_fcport_map;
69 /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */ 68 /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a4489444ffbc..42f18fc1067b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1074,7 +1074,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1074 struct scatterlist *psg; 1074 struct scatterlist *psg;
1075 void *paddr, *addr; 1075 void *paddr, *addr;
1076 unsigned int i, len, left; 1076 unsigned int i, len, left;
1077 unsigned int offset = 0; 1077 unsigned int offset = sg_off;
1078 1078
1079 left = sectors * dev->prot_length; 1079 left = sectors * dev->prot_length;
1080 1080
@@ -1084,11 +1084,10 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1084 if (offset >= sg->length) { 1084 if (offset >= sg->length) {
1085 sg = sg_next(sg); 1085 sg = sg_next(sg);
1086 offset = 0; 1086 offset = 0;
1087 sg_off = sg->offset;
1088 } 1087 }
1089 1088
1090 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1089 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1091 addr = kmap_atomic(sg_page(sg)) + sg_off; 1090 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1092 1091
1093 if (read) 1092 if (read)
1094 memcpy(paddr, addr, len); 1093 memcpy(paddr, addr, len);
@@ -1163,7 +1162,7 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1163{ 1162{
1164 struct se_device *dev = cmd->se_dev; 1163 struct se_device *dev = cmd->se_dev;
1165 struct se_dif_v1_tuple *sdt; 1164 struct se_dif_v1_tuple *sdt;
1166 struct scatterlist *dsg; 1165 struct scatterlist *dsg, *psg = sg;
1167 sector_t sector = start; 1166 sector_t sector = start;
1168 void *daddr, *paddr; 1167 void *daddr, *paddr;
1169 int i, j, offset = sg_off; 1168 int i, j, offset = sg_off;
@@ -1171,14 +1170,14 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1171 1170
1172 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1171 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1173 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1172 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1174 paddr = kmap_atomic(sg_page(sg)) + sg->offset; 1173 paddr = kmap_atomic(sg_page(psg)) + sg->offset;
1175 1174
1176 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1175 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1177 1176
1178 if (offset >= sg->length) { 1177 if (offset >= psg->length) {
1179 kunmap_atomic(paddr); 1178 kunmap_atomic(paddr);
1180 sg = sg_next(sg); 1179 psg = sg_next(psg);
1181 paddr = kmap_atomic(sg_page(sg)) + sg->offset; 1180 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1182 offset = 0; 1181 offset = 0;
1183 } 1182 }
1184 1183
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 24b4f65d8777..2956250b7225 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1601,6 +1601,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1601 case TCM_CHECK_CONDITION_ABORT_CMD: 1601 case TCM_CHECK_CONDITION_ABORT_CMD:
1602 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1602 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1603 case TCM_CHECK_CONDITION_NOT_READY: 1603 case TCM_CHECK_CONDITION_NOT_READY:
1604 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1605 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1606 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1604 break; 1607 break;
1605 case TCM_OUT_OF_RESOURCES: 1608 case TCM_OUT_OF_RESOURCES:
1606 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1609 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0a025b8e2a12..e48d4a672580 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1001,6 +1001,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1001 break; 1001 break;
1002 } 1002 }
1003 1003
1004 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1005 if (unlikely(v_req.lun[0] != 1)) {
1006 vhost_scsi_send_bad_target(vs, vq, head, out);
1007 continue;
1008 }
1009
1004 /* Extract the tpgt */ 1010 /* Extract the tpgt */
1005 target = v_req.lun[1]; 1011 target = v_req.lun[1];
1006 tpg = ACCESS_ONCE(vs_tpg[target]); 1012 tpg = ACCESS_ONCE(vs_tpg[target]);