aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoern Engel <joern@logfs.org>2014-09-16 16:23:12 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2014-10-01 17:39:06 -0400
commit33940d09937276cd3c81f2874faf43e37c2db0e2 (patch)
tree2c3043e6902ee4e8e23b947f2e30e50967e99c5b
parent74ed7e62289dc6d388996d7c8f89c2e7e95b9657 (diff)
target: encapsulate smp_mb__after_atomic()
The target code has a rather generous helping of smp_mb__after_atomic() throughout the code base. Most atomic operations were followed by one and none were preceded by smp_mb__before_atomic(), nor accompanied by a comment explaining the need for a barrier. Instead of trying to prove for every case whether or not it is needed, this patch introduces atomic_inc_mb() and atomic_dec_mb(), which explicitly include the memory barriers before and after the atomic operation. For now they are defined in a target header, although they could be of general use. Most of the existing atomic/mb combinations were replaced by the new helpers. In a few cases the atomic was sandwiched in spin_lock/spin_unlock and I simply removed the barrier. I suspect that in most cases the correct conversion would have been to drop the barrier. I also suspect that a few cases exist where a) the barrier was necessary and b) a second barrier before the atomic would have been necessary and got added by this patch. Signed-off-by: Joern Engel <joern@logfs.org> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/loopback/tcm_loop.c6
-rw-r--r--drivers/target/target_core_alua.c33
-rw-r--r--drivers/target/target_core_device.c9
-rw-r--r--drivers/target/target_core_pr.c90
-rw-r--r--drivers/target/target_core_transport.c18
-rw-r--r--drivers/target/target_core_ua.c15
-rw-r--r--include/target/target_core_base.h14
7 files changed, 70 insertions, 115 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 340de9d92b15..a7f6dc646045 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -960,8 +960,7 @@ static int tcm_loop_port_link(
960 struct tcm_loop_tpg, tl_se_tpg); 960 struct tcm_loop_tpg, tl_se_tpg);
961 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 961 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
962 962
963 atomic_inc(&tl_tpg->tl_tpg_port_count); 963 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
964 smp_mb__after_atomic();
965 /* 964 /*
966 * Add Linux/SCSI struct scsi_device by HCTL 965 * Add Linux/SCSI struct scsi_device by HCTL
967 */ 966 */
@@ -995,8 +994,7 @@ static void tcm_loop_port_unlink(
995 scsi_remove_device(sd); 994 scsi_remove_device(sd);
996 scsi_device_put(sd); 995 scsi_device_put(sd);
997 996
998 atomic_dec(&tl_tpg->tl_tpg_port_count); 997 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
999 smp_mb__after_atomic();
1000 998
1001 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 999 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
1002} 1000}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fbc5ebb5f761..fb87780929d2 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -392,8 +392,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
393 continue; 393 continue;
394 394
395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 395 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
396 smp_mb__after_atomic();
397 396
398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 397 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399 398
@@ -403,8 +402,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
403 found = true; 402 found = true;
404 403
405 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 404 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 405 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
407 smp_mb__after_atomic();
408 break; 406 break;
409 } 407 }
410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 408 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -998,8 +996,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
998 * every I_T nexus other than the I_T nexus on which the SET 996 * every I_T nexus other than the I_T nexus on which the SET
999 * TARGET PORT GROUPS command 997 * TARGET PORT GROUPS command
1000 */ 998 */
1001 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 999 atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt);
1002 smp_mb__after_atomic();
1003 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1000 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1004 1001
1005 spin_lock_bh(&port->sep_alua_lock); 1002 spin_lock_bh(&port->sep_alua_lock);
@@ -1028,8 +1025,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1028 spin_unlock_bh(&port->sep_alua_lock); 1025 spin_unlock_bh(&port->sep_alua_lock);
1029 1026
1030 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1027 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1031 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 1028 atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt);
1032 smp_mb__after_atomic();
1033 } 1029 }
1034 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1030 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1035 /* 1031 /*
@@ -1063,7 +1059,6 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1063 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 1059 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1064 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1065 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1061 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1066 smp_mb__after_atomic();
1067 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1062 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1068 1063
1069 if (tg_pt_gp->tg_pt_gp_transition_complete) 1064 if (tg_pt_gp->tg_pt_gp_transition_complete)
@@ -1125,7 +1120,6 @@ static int core_alua_do_transition_tg_pt(
1125 */ 1120 */
1126 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1121 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1127 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1122 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1128 smp_mb__after_atomic();
1129 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1123 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1130 1124
1131 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1125 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
@@ -1168,7 +1162,6 @@ int core_alua_do_port_transition(
1168 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1162 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1169 lu_gp = local_lu_gp_mem->lu_gp; 1163 lu_gp = local_lu_gp_mem->lu_gp;
1170 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1164 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1171 smp_mb__after_atomic();
1172 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1165 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1173 /* 1166 /*
1174 * For storage objects that are members of the 'default_lu_gp', 1167 * For storage objects that are members of the 'default_lu_gp',
@@ -1184,8 +1177,7 @@ int core_alua_do_port_transition(
1184 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1177 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1185 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1178 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1186 new_state, explicit); 1179 new_state, explicit);
1187 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1180 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1188 smp_mb__after_atomic();
1189 return rc; 1181 return rc;
1190 } 1182 }
1191 /* 1183 /*
@@ -1198,8 +1190,7 @@ int core_alua_do_port_transition(
1198 lu_gp_mem_list) { 1190 lu_gp_mem_list) {
1199 1191
1200 dev = lu_gp_mem->lu_gp_mem_dev; 1192 dev = lu_gp_mem->lu_gp_mem_dev;
1201 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 1193 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1202 smp_mb__after_atomic();
1203 spin_unlock(&lu_gp->lu_gp_lock); 1194 spin_unlock(&lu_gp->lu_gp_lock);
1204 1195
1205 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1196 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1227,8 +1218,7 @@ int core_alua_do_port_transition(
1227 tg_pt_gp->tg_pt_gp_alua_port = NULL; 1218 tg_pt_gp->tg_pt_gp_alua_port = NULL;
1228 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1219 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1229 } 1220 }
1230 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1221 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1231 smp_mb__after_atomic();
1232 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1222 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1233 /* 1223 /*
1234 * core_alua_do_transition_tg_pt() will always return 1224 * core_alua_do_transition_tg_pt() will always return
@@ -1238,16 +1228,14 @@ int core_alua_do_port_transition(
1238 new_state, explicit); 1228 new_state, explicit);
1239 1229
1240 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1230 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1241 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1231 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1242 smp_mb__after_atomic();
1243 if (rc) 1232 if (rc)
1244 break; 1233 break;
1245 } 1234 }
1246 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1235 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1247 1236
1248 spin_lock(&lu_gp->lu_gp_lock); 1237 spin_lock(&lu_gp->lu_gp_lock);
1249 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 1238 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1250 smp_mb__after_atomic();
1251 } 1239 }
1252 spin_unlock(&lu_gp->lu_gp_lock); 1240 spin_unlock(&lu_gp->lu_gp_lock);
1253 1241
@@ -1260,8 +1248,7 @@ int core_alua_do_port_transition(
1260 core_alua_dump_state(new_state)); 1248 core_alua_dump_state(new_state));
1261 } 1249 }
1262 1250
1263 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1251 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1264 smp_mb__after_atomic();
1265 return rc; 1252 return rc;
1266} 1253}
1267 1254
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e784284cbc2e..f5057a2f4ed1 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -224,8 +224,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
224 if (port->sep_rtpi != rtpi) 224 if (port->sep_rtpi != rtpi)
225 continue; 225 continue;
226 226
227 atomic_inc(&deve->pr_ref_count); 227 atomic_inc_mb(&deve->pr_ref_count);
228 smp_mb__after_atomic();
229 spin_unlock_irq(&nacl->device_list_lock); 228 spin_unlock_irq(&nacl->device_list_lock);
230 229
231 return deve; 230 return deve;
@@ -1388,8 +1387,7 @@ int core_dev_add_initiator_node_lun_acl(
1388 1387
1389 spin_lock(&lun->lun_acl_lock); 1388 spin_lock(&lun->lun_acl_lock);
1390 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1389 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1391 atomic_inc(&lun->lun_acl_count); 1390 atomic_inc_mb(&lun->lun_acl_count);
1392 smp_mb__after_atomic();
1393 spin_unlock(&lun->lun_acl_lock); 1391 spin_unlock(&lun->lun_acl_lock);
1394 1392
1395 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1393 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
@@ -1422,8 +1420,7 @@ int core_dev_del_initiator_node_lun_acl(
1422 1420
1423 spin_lock(&lun->lun_acl_lock); 1421 spin_lock(&lun->lun_acl_lock);
1424 list_del(&lacl->lacl_list); 1422 list_del(&lacl->lacl_list);
1425 atomic_dec(&lun->lun_acl_count); 1423 atomic_dec_mb(&lun->lun_acl_count);
1426 smp_mb__after_atomic();
1427 spin_unlock(&lun->lun_acl_lock); 1424 spin_unlock(&lun->lun_acl_lock);
1428 1425
1429 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1426 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 281d52e3fe99..48a801045176 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -674,8 +674,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
674 */ 674 */
675 spin_lock(&dev->se_port_lock); 675 spin_lock(&dev->se_port_lock);
676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
677 atomic_inc(&port->sep_tg_pt_ref_cnt); 677 atomic_inc_mb(&port->sep_tg_pt_ref_cnt);
678 smp_mb__after_atomic();
679 spin_unlock(&dev->se_port_lock); 678 spin_unlock(&dev->se_port_lock);
680 679
681 spin_lock_bh(&port->sep_alua_lock); 680 spin_lock_bh(&port->sep_alua_lock);
@@ -709,8 +708,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
709 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) 708 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
710 continue; 709 continue;
711 710
712 atomic_inc(&deve_tmp->pr_ref_count); 711 atomic_inc_mb(&deve_tmp->pr_ref_count);
713 smp_mb__after_atomic();
714 spin_unlock_bh(&port->sep_alua_lock); 712 spin_unlock_bh(&port->sep_alua_lock);
715 /* 713 /*
716 * Grab a configfs group dependency that is released 714 * Grab a configfs group dependency that is released
@@ -722,10 +720,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
722 if (ret < 0) { 720 if (ret < 0) {
723 pr_err("core_scsi3_lunacl_depend" 721 pr_err("core_scsi3_lunacl_depend"
724 "_item() failed\n"); 722 "_item() failed\n");
725 atomic_dec(&port->sep_tg_pt_ref_cnt); 723 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
726 smp_mb__after_atomic(); 724 atomic_dec_mb(&deve_tmp->pr_ref_count);
727 atomic_dec(&deve_tmp->pr_ref_count);
728 smp_mb__after_atomic();
729 goto out; 725 goto out;
730 } 726 }
731 /* 727 /*
@@ -739,10 +735,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
739 nacl_tmp, deve_tmp, NULL, 735 nacl_tmp, deve_tmp, NULL,
740 sa_res_key, all_tg_pt, aptpl); 736 sa_res_key, all_tg_pt, aptpl);
741 if (!pr_reg_atp) { 737 if (!pr_reg_atp) {
742 atomic_dec(&port->sep_tg_pt_ref_cnt); 738 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
743 smp_mb__after_atomic(); 739 atomic_dec_mb(&deve_tmp->pr_ref_count);
744 atomic_dec(&deve_tmp->pr_ref_count);
745 smp_mb__after_atomic();
746 core_scsi3_lunacl_undepend_item(deve_tmp); 740 core_scsi3_lunacl_undepend_item(deve_tmp);
747 goto out; 741 goto out;
748 } 742 }
@@ -754,8 +748,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
754 spin_unlock_bh(&port->sep_alua_lock); 748 spin_unlock_bh(&port->sep_alua_lock);
755 749
756 spin_lock(&dev->se_port_lock); 750 spin_lock(&dev->se_port_lock);
757 atomic_dec(&port->sep_tg_pt_ref_cnt); 751 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
758 smp_mb__after_atomic();
759 } 752 }
760 spin_unlock(&dev->se_port_lock); 753 spin_unlock(&dev->se_port_lock);
761 754
@@ -1109,8 +1102,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1109 if (dev->dev_attrib.enforce_pr_isids) 1102 if (dev->dev_attrib.enforce_pr_isids)
1110 continue; 1103 continue;
1111 } 1104 }
1112 atomic_inc(&pr_reg->pr_res_holders); 1105 atomic_inc_mb(&pr_reg->pr_res_holders);
1113 smp_mb__after_atomic();
1114 spin_unlock(&pr_tmpl->registration_lock); 1106 spin_unlock(&pr_tmpl->registration_lock);
1115 return pr_reg; 1107 return pr_reg;
1116 } 1108 }
@@ -1124,8 +1116,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1124 if (strcmp(isid, pr_reg->pr_reg_isid)) 1116 if (strcmp(isid, pr_reg->pr_reg_isid))
1125 continue; 1117 continue;
1126 1118
1127 atomic_inc(&pr_reg->pr_res_holders); 1119 atomic_inc_mb(&pr_reg->pr_res_holders);
1128 smp_mb__after_atomic();
1129 spin_unlock(&pr_tmpl->registration_lock); 1120 spin_unlock(&pr_tmpl->registration_lock);
1130 return pr_reg; 1121 return pr_reg;
1131 } 1122 }
@@ -1154,8 +1145,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1154 1145
1155static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) 1146static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1156{ 1147{
1157 atomic_dec(&pr_reg->pr_res_holders); 1148 atomic_dec_mb(&pr_reg->pr_res_holders);
1158 smp_mb__after_atomic();
1159} 1149}
1160 1150
1161static int core_scsi3_check_implicit_release( 1151static int core_scsi3_check_implicit_release(
@@ -1348,8 +1338,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1348 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1338 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1349 &tpg->tpg_group.cg_item); 1339 &tpg->tpg_group.cg_item);
1350 1340
1351 atomic_dec(&tpg->tpg_pr_ref_count); 1341 atomic_dec_mb(&tpg->tpg_pr_ref_count);
1352 smp_mb__after_atomic();
1353} 1342}
1354 1343
1355static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1344static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
@@ -1368,16 +1357,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1368 struct se_portal_group *tpg = nacl->se_tpg; 1357 struct se_portal_group *tpg = nacl->se_tpg;
1369 1358
1370 if (nacl->dynamic_node_acl) { 1359 if (nacl->dynamic_node_acl) {
1371 atomic_dec(&nacl->acl_pr_ref_count); 1360 atomic_dec_mb(&nacl->acl_pr_ref_count);
1372 smp_mb__after_atomic();
1373 return; 1361 return;
1374 } 1362 }
1375 1363
1376 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1364 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1377 &nacl->acl_group.cg_item); 1365 &nacl->acl_group.cg_item);
1378 1366
1379 atomic_dec(&nacl->acl_pr_ref_count); 1367 atomic_dec_mb(&nacl->acl_pr_ref_count);
1380 smp_mb__after_atomic();
1381} 1368}
1382 1369
1383static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1370static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
@@ -1407,8 +1394,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1407 * For nacl->dynamic_node_acl=1 1394 * For nacl->dynamic_node_acl=1
1408 */ 1395 */
1409 if (!lun_acl) { 1396 if (!lun_acl) {
1410 atomic_dec(&se_deve->pr_ref_count); 1397 atomic_dec_mb(&se_deve->pr_ref_count);
1411 smp_mb__after_atomic();
1412 return; 1398 return;
1413 } 1399 }
1414 nacl = lun_acl->se_lun_nacl; 1400 nacl = lun_acl->se_lun_nacl;
@@ -1417,8 +1403,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1417 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1403 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1418 &lun_acl->se_lun_group.cg_item); 1404 &lun_acl->se_lun_group.cg_item);
1419 1405
1420 atomic_dec(&se_deve->pr_ref_count); 1406 atomic_dec_mb(&se_deve->pr_ref_count);
1421 smp_mb__after_atomic();
1422} 1407}
1423 1408
1424static sense_reason_t 1409static sense_reason_t
@@ -1551,15 +1536,13 @@ core_scsi3_decode_spec_i_port(
1551 if (!i_str) 1536 if (!i_str)
1552 continue; 1537 continue;
1553 1538
1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1539 atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count);
1555 smp_mb__after_atomic();
1556 spin_unlock(&dev->se_port_lock); 1540 spin_unlock(&dev->se_port_lock);
1557 1541
1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) { 1542 if (core_scsi3_tpg_depend_item(tmp_tpg)) {
1559 pr_err(" core_scsi3_tpg_depend_item()" 1543 pr_err(" core_scsi3_tpg_depend_item()"
1560 " for tmp_tpg\n"); 1544 " for tmp_tpg\n");
1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1545 atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count);
1562 smp_mb__after_atomic();
1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1546 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1564 goto out_unmap; 1547 goto out_unmap;
1565 } 1548 }
@@ -1571,10 +1554,8 @@ core_scsi3_decode_spec_i_port(
1571 spin_lock_irq(&tmp_tpg->acl_node_lock); 1554 spin_lock_irq(&tmp_tpg->acl_node_lock);
1572 dest_node_acl = __core_tpg_get_initiator_node_acl( 1555 dest_node_acl = __core_tpg_get_initiator_node_acl(
1573 tmp_tpg, i_str); 1556 tmp_tpg, i_str);
1574 if (dest_node_acl) { 1557 if (dest_node_acl)
1575 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1558 atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
1576 smp_mb__after_atomic();
1577 }
1578 spin_unlock_irq(&tmp_tpg->acl_node_lock); 1559 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1579 1560
1580 if (!dest_node_acl) { 1561 if (!dest_node_acl) {
@@ -1586,8 +1567,7 @@ core_scsi3_decode_spec_i_port(
1586 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { 1567 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
1587 pr_err("configfs_depend_item() failed" 1568 pr_err("configfs_depend_item() failed"
1588 " for dest_node_acl->acl_group\n"); 1569 " for dest_node_acl->acl_group\n");
1589 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1570 atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
1590 smp_mb__after_atomic();
1591 core_scsi3_tpg_undepend_item(tmp_tpg); 1571 core_scsi3_tpg_undepend_item(tmp_tpg);
1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1572 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1593 goto out_unmap; 1573 goto out_unmap;
@@ -1646,8 +1626,7 @@ core_scsi3_decode_spec_i_port(
1646 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 1626 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
1647 pr_err("core_scsi3_lunacl_depend_item()" 1627 pr_err("core_scsi3_lunacl_depend_item()"
1648 " failed\n"); 1628 " failed\n");
1649 atomic_dec(&dest_se_deve->pr_ref_count); 1629 atomic_dec_mb(&dest_se_deve->pr_ref_count);
1650 smp_mb__after_atomic();
1651 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1630 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1652 core_scsi3_tpg_undepend_item(dest_tpg); 1631 core_scsi3_tpg_undepend_item(dest_tpg);
1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1632 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -3167,15 +3146,13 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3167 if (!dest_tf_ops) 3146 if (!dest_tf_ops)
3168 continue; 3147 continue;
3169 3148
3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3149 atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count);
3171 smp_mb__after_atomic();
3172 spin_unlock(&dev->se_port_lock); 3150 spin_unlock(&dev->se_port_lock);
3173 3151
3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) { 3152 if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
3175 pr_err("core_scsi3_tpg_depend_item() failed" 3153 pr_err("core_scsi3_tpg_depend_item() failed"
3176 " for dest_se_tpg\n"); 3154 " for dest_se_tpg\n");
3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3155 atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count);
3178 smp_mb__after_atomic();
3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3156 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3180 goto out_put_pr_reg; 3157 goto out_put_pr_reg;
3181 } 3158 }
@@ -3271,10 +3248,8 @@ after_iport_check:
3271 spin_lock_irq(&dest_se_tpg->acl_node_lock); 3248 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3272 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3249 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3273 initiator_str); 3250 initiator_str);
3274 if (dest_node_acl) { 3251 if (dest_node_acl)
3275 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3252 atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
3276 smp_mb__after_atomic();
3277 }
3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3253 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3279 3254
3280 if (!dest_node_acl) { 3255 if (!dest_node_acl) {
@@ -3288,8 +3263,7 @@ after_iport_check:
3288 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { 3263 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
3289 pr_err("core_scsi3_nodeacl_depend_item() for" 3264 pr_err("core_scsi3_nodeacl_depend_item() for"
3290 " dest_node_acl\n"); 3265 " dest_node_acl\n");
3291 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3266 atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
3292 smp_mb__after_atomic();
3293 dest_node_acl = NULL; 3267 dest_node_acl = NULL;
3294 ret = TCM_INVALID_PARAMETER_LIST; 3268 ret = TCM_INVALID_PARAMETER_LIST;
3295 goto out; 3269 goto out;
@@ -3313,8 +3287,7 @@ after_iport_check:
3313 3287
3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 3288 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
3315 pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3289 pr_err("core_scsi3_lunacl_depend_item() failed\n");
3316 atomic_dec(&dest_se_deve->pr_ref_count); 3290 atomic_dec_mb(&dest_se_deve->pr_ref_count);
3317 smp_mb__after_atomic();
3318 dest_se_deve = NULL; 3291 dest_se_deve = NULL;
3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3292 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3320 goto out; 3293 goto out;
@@ -3879,8 +3852,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3879 se_tpg = pr_reg->pr_reg_nacl->se_tpg; 3852 se_tpg = pr_reg->pr_reg_nacl->se_tpg;
3880 add_desc_len = 0; 3853 add_desc_len = 0;
3881 3854
3882 atomic_inc(&pr_reg->pr_res_holders); 3855 atomic_inc_mb(&pr_reg->pr_res_holders);
3883 smp_mb__after_atomic();
3884 spin_unlock(&pr_tmpl->registration_lock); 3856 spin_unlock(&pr_tmpl->registration_lock);
3885 /* 3857 /*
3886 * Determine expected length of $FABRIC_MOD specific 3858 * Determine expected length of $FABRIC_MOD specific
@@ -3893,8 +3865,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3893 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" 3865 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
3894 " out of buffer: %d\n", cmd->data_length); 3866 " out of buffer: %d\n", cmd->data_length);
3895 spin_lock(&pr_tmpl->registration_lock); 3867 spin_lock(&pr_tmpl->registration_lock);
3896 atomic_dec(&pr_reg->pr_res_holders); 3868 atomic_dec_mb(&pr_reg->pr_res_holders);
3897 smp_mb__after_atomic();
3898 break; 3869 break;
3899 } 3870 }
3900 /* 3871 /*
@@ -3955,8 +3926,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3955 se_nacl, pr_reg, &format_code, &buf[off+4]); 3926 se_nacl, pr_reg, &format_code, &buf[off+4]);
3956 3927
3957 spin_lock(&pr_tmpl->registration_lock); 3928 spin_lock(&pr_tmpl->registration_lock);
3958 atomic_dec(&pr_reg->pr_res_holders); 3929 atomic_dec_mb(&pr_reg->pr_res_holders);
3959 smp_mb__after_atomic();
3960 /* 3930 /*
3961 * Set the ADDITIONAL DESCRIPTOR LENGTH 3931 * Set the ADDITIONAL DESCRIPTOR LENGTH
3962 */ 3932 */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0b43761ed85f..115632ee3ec8 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -752,8 +752,7 @@ void target_qf_do_work(struct work_struct *work)
752 752
753 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 753 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
754 list_del(&cmd->se_qf_node); 754 list_del(&cmd->se_qf_node);
755 atomic_dec(&dev->dev_qf_count); 755 atomic_dec_mb(&dev->dev_qf_count);
756 smp_mb__after_atomic();
757 756
758 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 757 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
759 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 758 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -1721,8 +1720,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1721 cmd->t_task_cdb[0], cmd->se_ordered_id); 1720 cmd->t_task_cdb[0], cmd->se_ordered_id);
1722 return false; 1721 return false;
1723 case MSG_ORDERED_TAG: 1722 case MSG_ORDERED_TAG:
1724 atomic_inc(&dev->dev_ordered_sync); 1723 atomic_inc_mb(&dev->dev_ordered_sync);
1725 smp_mb__after_atomic();
1726 1724
1727 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1725 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1728 " se_ordered_id: %u\n", 1726 " se_ordered_id: %u\n",
@@ -1739,8 +1737,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1739 /* 1737 /*
1740 * For SIMPLE and UNTAGGED Task Attribute commands 1738 * For SIMPLE and UNTAGGED Task Attribute commands
1741 */ 1739 */
1742 atomic_inc(&dev->simple_cmds); 1740 atomic_inc_mb(&dev->simple_cmds);
1743 smp_mb__after_atomic();
1744 break; 1741 break;
1745 } 1742 }
1746 1743
@@ -1844,8 +1841,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1844 return; 1841 return;
1845 1842
1846 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1843 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
1847 atomic_dec(&dev->simple_cmds); 1844 atomic_dec_mb(&dev->simple_cmds);
1848 smp_mb__after_atomic();
1849 dev->dev_cur_ordered_id++; 1845 dev->dev_cur_ordered_id++;
1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1846 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1847 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -1856,8 +1852,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1852 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1857 cmd->se_ordered_id); 1853 cmd->se_ordered_id);
1858 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1854 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1859 atomic_dec(&dev->dev_ordered_sync); 1855 atomic_dec_mb(&dev->dev_ordered_sync);
1860 smp_mb__after_atomic();
1861 1856
1862 dev->dev_cur_ordered_id++; 1857 dev->dev_cur_ordered_id++;
1863 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1858 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -1915,8 +1910,7 @@ static void transport_handle_queue_full(
1915{ 1910{
1916 spin_lock_irq(&dev->qf_cmd_lock); 1911 spin_lock_irq(&dev->qf_cmd_lock);
1917 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1912 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1918 atomic_inc(&dev->dev_qf_count); 1913 atomic_inc_mb(&dev->dev_qf_count);
1919 smp_mb__after_atomic();
1920 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1914 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
1921 1915
1922 schedule_work(&cmd->se_dev->qf_work_queue); 1916 schedule_work(&cmd->se_dev->qf_work_queue);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 101858e245b3..1738b1646988 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -161,8 +161,7 @@ int core_scsi3_ua_allocate(
161 spin_unlock(&deve->ua_lock); 161 spin_unlock(&deve->ua_lock);
162 spin_unlock_irq(&nacl->device_list_lock); 162 spin_unlock_irq(&nacl->device_list_lock);
163 163
164 atomic_inc(&deve->ua_count); 164 atomic_inc_mb(&deve->ua_count);
165 smp_mb__after_atomic();
166 return 0; 165 return 0;
167 } 166 }
168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 167 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -174,8 +173,7 @@ int core_scsi3_ua_allocate(
174 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 173 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
175 asc, ascq); 174 asc, ascq);
176 175
177 atomic_inc(&deve->ua_count); 176 atomic_inc_mb(&deve->ua_count);
178 smp_mb__after_atomic();
179 return 0; 177 return 0;
180} 178}
181 179
@@ -189,8 +187,7 @@ void core_scsi3_ua_release_all(
189 list_del(&ua->ua_nacl_list); 187 list_del(&ua->ua_nacl_list);
190 kmem_cache_free(se_ua_cache, ua); 188 kmem_cache_free(se_ua_cache, ua);
191 189
192 atomic_dec(&deve->ua_count); 190 atomic_dec_mb(&deve->ua_count);
193 smp_mb__after_atomic();
194 } 191 }
195 spin_unlock(&deve->ua_lock); 192 spin_unlock(&deve->ua_lock);
196} 193}
@@ -250,8 +247,7 @@ void core_scsi3_ua_for_check_condition(
250 list_del(&ua->ua_nacl_list); 247 list_del(&ua->ua_nacl_list);
251 kmem_cache_free(se_ua_cache, ua); 248 kmem_cache_free(se_ua_cache, ua);
252 249
253 atomic_dec(&deve->ua_count); 250 atomic_dec_mb(&deve->ua_count);
254 smp_mb__after_atomic();
255 } 251 }
256 spin_unlock(&deve->ua_lock); 252 spin_unlock(&deve->ua_lock);
257 spin_unlock_irq(&nacl->device_list_lock); 253 spin_unlock_irq(&nacl->device_list_lock);
@@ -309,8 +305,7 @@ int core_scsi3_ua_clear_for_request_sense(
309 list_del(&ua->ua_nacl_list); 305 list_del(&ua->ua_nacl_list);
310 kmem_cache_free(se_ua_cache, ua); 306 kmem_cache_free(se_ua_cache, ua);
311 307
312 atomic_dec(&deve->ua_count); 308 atomic_dec_mb(&deve->ua_count);
313 smp_mb__after_atomic();
314 } 309 }
315 spin_unlock(&deve->ua_lock); 310 spin_unlock(&deve->ua_lock);
316 spin_unlock_irq(&nacl->device_list_lock); 311 spin_unlock_irq(&nacl->device_list_lock);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 9ec9864ecf38..b106240d8385 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -903,4 +903,18 @@ struct se_wwn {
903 struct config_group fabric_stat_group; 903 struct config_group fabric_stat_group;
904}; 904};
905 905
906static inline void atomic_inc_mb(atomic_t *v)
907{
908 smp_mb__before_atomic();
909 atomic_inc(v);
910 smp_mb__after_atomic();
911}
912
913static inline void atomic_dec_mb(atomic_t *v)
914{
915 smp_mb__before_atomic();
916 atomic_dec(v);
917 smp_mb__after_atomic();
918}
919
906#endif /* TARGET_CORE_BASE_H */ 920#endif /* TARGET_CORE_BASE_H */