aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2015-06-11 04:01:26 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2015-06-17 02:27:04 -0400
commitc51c8e7bcac966f209da83630fc8ca7e6cad279b (patch)
treea4823a8d68916d5cdd7a580d4b1bba4dd2a3fa5f
parent316058134ffa0017199b460318e109aa79432cc6 (diff)
target: use 'se_dev_entry' when allocating UAs
We need to use 'se_dev_entry' as argument when allocating UAs, otherwise we'll never see any UAs for an implicit ALUA state transition triggered from userspace. (Add target_ua_allocate_lun() common caller - nab) Signed-off-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_pr.c12
-rw-r--r--drivers/target/target_core_transport.c12
-rw-r--r--drivers/target/target_core_ua.c42
-rw-r--r--drivers/target/target_core_ua.h3
5 files changed, 54 insertions, 42 deletions
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 228a3c7925e5..aa2e4b103d43 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -972,23 +972,32 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
972 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { 972 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
973 lacl = rcu_dereference_check(se_deve->se_lun_acl, 973 lacl = rcu_dereference_check(se_deve->se_lun_acl,
974 lockdep_is_held(&lun->lun_deve_lock)); 974 lockdep_is_held(&lun->lun_deve_lock));
975
975 /* 976 /*
976 * se_deve->se_lun_acl pointer may be NULL for a 977 * spc4r37 p.242:
977 * entry created without explicit Node+MappedLUN ACLs 978 * After an explicit target port asymmetric access
979 * state change, a device server shall establish a
980 * unit attention condition with the additional sense
981 * code set to ASYMMETRIC ACCESS STATE CHANGED for
982 * the initiator port associated with every I_T nexus
983 * other than the I_T nexus on which the SET TARGET
984 * PORT GROUPS command was received.
978 */ 985 */
979 if (!lacl)
980 continue;
981
982 if ((tg_pt_gp->tg_pt_gp_alua_access_status == 986 if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
983 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 987 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
984 (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
985 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
986 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) && 988 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
987 (tg_pt_gp->tg_pt_gp_alua_lun == lun)) 989 (tg_pt_gp->tg_pt_gp_alua_lun == lun))
988 continue; 990 continue;
989 991
990 core_scsi3_ua_allocate(lacl->se_lun_nacl, 992 /*
991 se_deve->mapped_lun, 0x2A, 993 * se_deve->se_lun_acl pointer may be NULL for a
994 * entry created without explicit Node+MappedLUN ACLs
995 */
996 if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
997 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
998 continue;
999
1000 core_scsi3_ua_allocate(se_deve, 0x2A,
992 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 1001 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
993 } 1002 }
994 spin_unlock_bh(&lun->lun_deve_lock); 1003 spin_unlock_bh(&lun->lun_deve_lock);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 436e30b14a11..0bb329243dba 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2197,7 +2197,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2197 &pr_tmpl->registration_list, 2197 &pr_tmpl->registration_list,
2198 pr_reg_list) { 2198 pr_reg_list) {
2199 2199
2200 core_scsi3_ua_allocate( 2200 target_ua_allocate_lun(
2201 pr_reg_p->pr_reg_nacl, 2201 pr_reg_p->pr_reg_nacl,
2202 pr_reg_p->pr_res_mapped_lun, 2202 pr_reg_p->pr_res_mapped_lun,
2203 0x2A, 2203 0x2A,
@@ -2624,7 +2624,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
2624 if (pr_reg_p == pr_reg) 2624 if (pr_reg_p == pr_reg)
2625 continue; 2625 continue;
2626 2626
2627 core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl, 2627 target_ua_allocate_lun(pr_reg_p->pr_reg_nacl,
2628 pr_reg_p->pr_res_mapped_lun, 2628 pr_reg_p->pr_res_mapped_lun,
2629 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); 2629 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
2630 } 2630 }
@@ -2709,7 +2709,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
2709 * additional sense code set to RESERVATIONS PREEMPTED. 2709 * additional sense code set to RESERVATIONS PREEMPTED.
2710 */ 2710 */
2711 if (!calling_it_nexus) 2711 if (!calling_it_nexus)
2712 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 2712 target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun,
2713 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); 2713 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
2714 } 2714 }
2715 spin_unlock(&pr_tmpl->registration_lock); 2715 spin_unlock(&pr_tmpl->registration_lock);
@@ -2918,7 +2918,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2918 NULL, 0); 2918 NULL, 0);
2919 } 2919 }
2920 if (!calling_it_nexus) 2920 if (!calling_it_nexus)
2921 core_scsi3_ua_allocate(pr_reg_nacl, 2921 target_ua_allocate_lun(pr_reg_nacl,
2922 pr_res_mapped_lun, 0x2A, 2922 pr_res_mapped_lun, 0x2A,
2923 ASCQ_2AH_REGISTRATIONS_PREEMPTED); 2923 ASCQ_2AH_REGISTRATIONS_PREEMPTED);
2924 } 2924 }
@@ -3024,7 +3024,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
3024 * persistent reservation and/or registration, with the 3024 * persistent reservation and/or registration, with the
3025 * additional sense code set to REGISTRATIONS PREEMPTED; 3025 * additional sense code set to REGISTRATIONS PREEMPTED;
3026 */ 3026 */
3027 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, 3027 target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
3028 ASCQ_2AH_REGISTRATIONS_PREEMPTED); 3028 ASCQ_2AH_REGISTRATIONS_PREEMPTED);
3029 } 3029 }
3030 spin_unlock(&pr_tmpl->registration_lock); 3030 spin_unlock(&pr_tmpl->registration_lock);
@@ -3057,7 +3057,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
3057 if (calling_it_nexus) 3057 if (calling_it_nexus)
3058 continue; 3058 continue;
3059 3059
3060 core_scsi3_ua_allocate(pr_reg->pr_reg_nacl, 3060 target_ua_allocate_lun(pr_reg->pr_reg_nacl,
3061 pr_reg->pr_res_mapped_lun, 0x2A, 3061 pr_reg->pr_res_mapped_lun, 0x2A,
3062 ASCQ_2AH_RESERVATIONS_RELEASED); 3062 ASCQ_2AH_RESERVATIONS_RELEASED);
3063 } 3063 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index eed9580b7f6c..0364534f8d46 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1677,13 +1677,13 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1677 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1677 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1678 */ 1678 */
1679 if (cmd->se_sess && 1679 if (cmd->se_sess &&
1680 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) 1680 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1681 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1681 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1682 cmd->orig_fe_lun, 0x2C, 1682 cmd->orig_fe_lun, 0x2C,
1683 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1683 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1684 1684 }
1685 trace_target_cmd_complete(cmd); 1685 trace_target_cmd_complete(cmd);
1686 ret = cmd->se_tfo-> queue_status(cmd); 1686 ret = cmd->se_tfo->queue_status(cmd);
1687 if (ret == -EAGAIN || ret == -ENOMEM) 1687 if (ret == -EAGAIN || ret == -ENOMEM)
1688 goto queue_full; 1688 goto queue_full;
1689 goto check_stop; 1689 goto check_stop;
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index e5062245171e..fc095aed5a88 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -87,18 +87,11 @@ target_scsi3_ua_check(struct se_cmd *cmd)
87} 87}
88 88
89int core_scsi3_ua_allocate( 89int core_scsi3_ua_allocate(
90 struct se_node_acl *nacl, 90 struct se_dev_entry *deve,
91 u64 unpacked_lun,
92 u8 asc, 91 u8 asc,
93 u8 ascq) 92 u8 ascq)
94{ 93{
95 struct se_dev_entry *deve;
96 struct se_ua *ua, *ua_p, *ua_tmp; 94 struct se_ua *ua, *ua_p, *ua_tmp;
97 /*
98 * PASSTHROUGH OPS
99 */
100 if (!nacl)
101 return -EINVAL;
102 95
103 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); 96 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
104 if (!ua) { 97 if (!ua) {
@@ -110,12 +103,6 @@ int core_scsi3_ua_allocate(
110 ua->ua_asc = asc; 103 ua->ua_asc = asc;
111 ua->ua_ascq = ascq; 104 ua->ua_ascq = ascq;
112 105
113 rcu_read_lock();
114 deve = target_nacl_find_deve(nacl, unpacked_lun);
115 if (!deve) {
116 rcu_read_unlock();
117 return -EINVAL;
118 }
119 spin_lock(&deve->ua_lock); 106 spin_lock(&deve->ua_lock);
120 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { 107 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
121 /* 108 /*
@@ -123,7 +110,6 @@ int core_scsi3_ua_allocate(
123 */ 110 */
124 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { 111 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
125 spin_unlock(&deve->ua_lock); 112 spin_unlock(&deve->ua_lock);
126 rcu_read_unlock();
127 kmem_cache_free(se_ua_cache, ua); 113 kmem_cache_free(se_ua_cache, ua);
128 return 0; 114 return 0;
129 } 115 }
@@ -170,22 +156,38 @@ int core_scsi3_ua_allocate(
170 spin_unlock(&deve->ua_lock); 156 spin_unlock(&deve->ua_lock);
171 157
172 atomic_inc_mb(&deve->ua_count); 158 atomic_inc_mb(&deve->ua_count);
173 rcu_read_unlock();
174 return 0; 159 return 0;
175 } 160 }
176 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 161 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
177 spin_unlock(&deve->ua_lock); 162 spin_unlock(&deve->ua_lock);
178 163
179 pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:" 164 pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
180 " 0x%02x, ASCQ: 0x%02x\n", 165 " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
181 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
182 asc, ascq); 166 asc, ascq);
183 167
184 atomic_inc_mb(&deve->ua_count); 168 atomic_inc_mb(&deve->ua_count);
185 rcu_read_unlock();
186 return 0; 169 return 0;
187} 170}
188 171
172void target_ua_allocate_lun(struct se_node_acl *nacl,
173 u32 unpacked_lun, u8 asc, u8 ascq)
174{
175 struct se_dev_entry *deve;
176
177 if (!nacl)
178 return;
179
180 rcu_read_lock();
181 deve = target_nacl_find_deve(nacl, unpacked_lun);
182 if (!deve) {
183 rcu_read_unlock();
184 return;
185 }
186
187 core_scsi3_ua_allocate(deve, asc, ascq);
188 rcu_read_unlock();
189}
190
189void core_scsi3_ua_release_all( 191void core_scsi3_ua_release_all(
190 struct se_dev_entry *deve) 192 struct se_dev_entry *deve)
191{ 193{
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 6e592b10a8c0..96460bff490f 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -28,7 +28,8 @@
28extern struct kmem_cache *se_ua_cache; 28extern struct kmem_cache *se_ua_cache;
29 29
30extern sense_reason_t target_scsi3_ua_check(struct se_cmd *); 30extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
31extern int core_scsi3_ua_allocate(struct se_node_acl *, u64, u8, u8); 31extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
32extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
32extern void core_scsi3_ua_release_all(struct se_dev_entry *); 33extern void core_scsi3_ua_release_all(struct se_dev_entry *);
33extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); 34extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
34extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, 35extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,