aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
commit776edb59317ada867dfcddde40b55648beeb0078 (patch)
treef6a6136374642323cfefd7d6399ea429f9018ade /drivers/target
parent59a3d4c3631e553357b7305dc09db1990aa6757c (diff)
parent3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/loopback/tcm_loop.c4
-rw-r--r--drivers/target/target_core_alua.c26
-rw-r--r--drivers/target/target_core_device.c6
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c56
-rw-r--r--drivers/target/target_core_transport.c16
-rw-r--r--drivers/target/target_core_ua.c10
7 files changed, 60 insertions, 60 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index c886ad1c39fb..73ab75ddaf42 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -951,7 +951,7 @@ static int tcm_loop_port_link(
951 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 951 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
952 952
953 atomic_inc(&tl_tpg->tl_tpg_port_count); 953 atomic_inc(&tl_tpg->tl_tpg_port_count);
954 smp_mb__after_atomic_inc(); 954 smp_mb__after_atomic();
955 /* 955 /*
956 * Add Linux/SCSI struct scsi_device by HCTL 956 * Add Linux/SCSI struct scsi_device by HCTL
957 */ 957 */
@@ -986,7 +986,7 @@ static void tcm_loop_port_unlink(
986 scsi_device_put(sd); 986 scsi_device_put(sd);
987 987
988 atomic_dec(&tl_tpg->tl_tpg_port_count); 988 atomic_dec(&tl_tpg->tl_tpg_port_count);
989 smp_mb__after_atomic_dec(); 989 smp_mb__after_atomic();
990 990
991 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 991 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
992} 992}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fcbe6125b73e..0b79b852f4b2 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -393,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
393 continue; 393 continue;
394 394
395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
396 smp_mb__after_atomic_inc(); 396 smp_mb__after_atomic();
397 397
398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399 399
@@ -404,7 +404,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
404 404
405 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 405 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
407 smp_mb__after_atomic_dec(); 407 smp_mb__after_atomic();
408 break; 408 break;
409 } 409 }
410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -990,7 +990,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
990 * TARGET PORT GROUPS command 990 * TARGET PORT GROUPS command
991 */ 991 */
992 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 992 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
993 smp_mb__after_atomic_inc(); 993 smp_mb__after_atomic();
994 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 994 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
995 995
996 spin_lock_bh(&port->sep_alua_lock); 996 spin_lock_bh(&port->sep_alua_lock);
@@ -1020,7 +1020,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1020 1020
1021 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1021 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1022 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 1022 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
1023 smp_mb__after_atomic_dec(); 1023 smp_mb__after_atomic();
1024 } 1024 }
1025 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1025 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1026 /* 1026 /*
@@ -1054,7 +1054,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1054 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 1054 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1055 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1055 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1056 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1056 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1057 smp_mb__after_atomic_dec(); 1057 smp_mb__after_atomic();
1058 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1058 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1059 1059
1060 if (tg_pt_gp->tg_pt_gp_transition_complete) 1060 if (tg_pt_gp->tg_pt_gp_transition_complete)
@@ -1116,7 +1116,7 @@ static int core_alua_do_transition_tg_pt(
1116 */ 1116 */
1117 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1117 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1118 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1118 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1119 smp_mb__after_atomic_inc(); 1119 smp_mb__after_atomic();
1120 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1120 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1121 1121
1122 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1122 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
@@ -1159,7 +1159,7 @@ int core_alua_do_port_transition(
1159 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1159 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1160 lu_gp = local_lu_gp_mem->lu_gp; 1160 lu_gp = local_lu_gp_mem->lu_gp;
1161 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1161 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1162 smp_mb__after_atomic_inc(); 1162 smp_mb__after_atomic();
1163 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1163 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1164 /* 1164 /*
1165 * For storage objects that are members of the 'default_lu_gp', 1165 * For storage objects that are members of the 'default_lu_gp',
@@ -1176,7 +1176,7 @@ int core_alua_do_port_transition(
1176 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1176 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1177 new_state, explicit); 1177 new_state, explicit);
1178 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1178 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1179 smp_mb__after_atomic_dec(); 1179 smp_mb__after_atomic();
1180 return rc; 1180 return rc;
1181 } 1181 }
1182 /* 1182 /*
@@ -1190,7 +1190,7 @@ int core_alua_do_port_transition(
1190 1190
1191 dev = lu_gp_mem->lu_gp_mem_dev; 1191 dev = lu_gp_mem->lu_gp_mem_dev;
1192 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 1192 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
1193 smp_mb__after_atomic_inc(); 1193 smp_mb__after_atomic();
1194 spin_unlock(&lu_gp->lu_gp_lock); 1194 spin_unlock(&lu_gp->lu_gp_lock);
1195 1195
1196 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1196 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1219,7 +1219,7 @@ int core_alua_do_port_transition(
1219 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1219 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1220 } 1220 }
1221 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1221 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1222 smp_mb__after_atomic_inc(); 1222 smp_mb__after_atomic();
1223 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1223 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1224 /* 1224 /*
1225 * core_alua_do_transition_tg_pt() will always return 1225 * core_alua_do_transition_tg_pt() will always return
@@ -1230,7 +1230,7 @@ int core_alua_do_port_transition(
1230 1230
1231 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1231 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1232 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1232 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1233 smp_mb__after_atomic_dec(); 1233 smp_mb__after_atomic();
1234 if (rc) 1234 if (rc)
1235 break; 1235 break;
1236 } 1236 }
@@ -1238,7 +1238,7 @@ int core_alua_do_port_transition(
1238 1238
1239 spin_lock(&lu_gp->lu_gp_lock); 1239 spin_lock(&lu_gp->lu_gp_lock);
1240 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 1240 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
1241 smp_mb__after_atomic_dec(); 1241 smp_mb__after_atomic();
1242 } 1242 }
1243 spin_unlock(&lu_gp->lu_gp_lock); 1243 spin_unlock(&lu_gp->lu_gp_lock);
1244 1244
@@ -1252,7 +1252,7 @@ int core_alua_do_port_transition(
1252 } 1252 }
1253 1253
1254 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1254 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1255 smp_mb__after_atomic_dec(); 1255 smp_mb__after_atomic();
1256 return rc; 1256 return rc;
1257} 1257}
1258 1258
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 26416c15d65c..11d26fe65bfb 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -225,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
225 continue; 225 continue;
226 226
227 atomic_inc(&deve->pr_ref_count); 227 atomic_inc(&deve->pr_ref_count);
228 smp_mb__after_atomic_inc(); 228 smp_mb__after_atomic();
229 spin_unlock_irq(&nacl->device_list_lock); 229 spin_unlock_irq(&nacl->device_list_lock);
230 230
231 return deve; 231 return deve;
@@ -1396,7 +1396,7 @@ int core_dev_add_initiator_node_lun_acl(
1396 spin_lock(&lun->lun_acl_lock); 1396 spin_lock(&lun->lun_acl_lock);
1397 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1397 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1398 atomic_inc(&lun->lun_acl_count); 1398 atomic_inc(&lun->lun_acl_count);
1399 smp_mb__after_atomic_inc(); 1399 smp_mb__after_atomic();
1400 spin_unlock(&lun->lun_acl_lock); 1400 spin_unlock(&lun->lun_acl_lock);
1401 1401
1402 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1402 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
@@ -1430,7 +1430,7 @@ int core_dev_del_initiator_node_lun_acl(
1430 spin_lock(&lun->lun_acl_lock); 1430 spin_lock(&lun->lun_acl_lock);
1431 list_del(&lacl->lacl_list); 1431 list_del(&lacl->lacl_list);
1432 atomic_dec(&lun->lun_acl_count); 1432 atomic_dec(&lun->lun_acl_count);
1433 smp_mb__after_atomic_dec(); 1433 smp_mb__after_atomic();
1434 spin_unlock(&lun->lun_acl_lock); 1434 spin_unlock(&lun->lun_acl_lock);
1435 1435
1436 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1436 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 9e0232cca92e..7e6b857c6b3f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -323,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err)
323 * Bump the ib_bio_err_cnt and release bio. 323 * Bump the ib_bio_err_cnt and release bio.
324 */ 324 */
325 atomic_inc(&ibr->ib_bio_err_cnt); 325 atomic_inc(&ibr->ib_bio_err_cnt);
326 smp_mb__after_atomic_inc(); 326 smp_mb__after_atomic();
327 } 327 }
328 328
329 bio_put(bio); 329 bio_put(bio);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3013287a2aaa..df357862286e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -675,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
675 spin_lock(&dev->se_port_lock); 675 spin_lock(&dev->se_port_lock);
676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
677 atomic_inc(&port->sep_tg_pt_ref_cnt); 677 atomic_inc(&port->sep_tg_pt_ref_cnt);
678 smp_mb__after_atomic_inc(); 678 smp_mb__after_atomic();
679 spin_unlock(&dev->se_port_lock); 679 spin_unlock(&dev->se_port_lock);
680 680
681 spin_lock_bh(&port->sep_alua_lock); 681 spin_lock_bh(&port->sep_alua_lock);
@@ -710,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
710 continue; 710 continue;
711 711
712 atomic_inc(&deve_tmp->pr_ref_count); 712 atomic_inc(&deve_tmp->pr_ref_count);
713 smp_mb__after_atomic_inc(); 713 smp_mb__after_atomic();
714 spin_unlock_bh(&port->sep_alua_lock); 714 spin_unlock_bh(&port->sep_alua_lock);
715 /* 715 /*
716 * Grab a configfs group dependency that is released 716 * Grab a configfs group dependency that is released
@@ -723,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
723 pr_err("core_scsi3_lunacl_depend" 723 pr_err("core_scsi3_lunacl_depend"
724 "_item() failed\n"); 724 "_item() failed\n");
725 atomic_dec(&port->sep_tg_pt_ref_cnt); 725 atomic_dec(&port->sep_tg_pt_ref_cnt);
726 smp_mb__after_atomic_dec(); 726 smp_mb__after_atomic();
727 atomic_dec(&deve_tmp->pr_ref_count); 727 atomic_dec(&deve_tmp->pr_ref_count);
728 smp_mb__after_atomic_dec(); 728 smp_mb__after_atomic();
729 goto out; 729 goto out;
730 } 730 }
731 /* 731 /*
@@ -740,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
740 sa_res_key, all_tg_pt, aptpl); 740 sa_res_key, all_tg_pt, aptpl);
741 if (!pr_reg_atp) { 741 if (!pr_reg_atp) {
742 atomic_dec(&port->sep_tg_pt_ref_cnt); 742 atomic_dec(&port->sep_tg_pt_ref_cnt);
743 smp_mb__after_atomic_dec(); 743 smp_mb__after_atomic();
744 atomic_dec(&deve_tmp->pr_ref_count); 744 atomic_dec(&deve_tmp->pr_ref_count);
745 smp_mb__after_atomic_dec(); 745 smp_mb__after_atomic();
746 core_scsi3_lunacl_undepend_item(deve_tmp); 746 core_scsi3_lunacl_undepend_item(deve_tmp);
747 goto out; 747 goto out;
748 } 748 }
@@ -755,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
755 755
756 spin_lock(&dev->se_port_lock); 756 spin_lock(&dev->se_port_lock);
757 atomic_dec(&port->sep_tg_pt_ref_cnt); 757 atomic_dec(&port->sep_tg_pt_ref_cnt);
758 smp_mb__after_atomic_dec(); 758 smp_mb__after_atomic();
759 } 759 }
760 spin_unlock(&dev->se_port_lock); 760 spin_unlock(&dev->se_port_lock);
761 761
@@ -1110,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1110 continue; 1110 continue;
1111 } 1111 }
1112 atomic_inc(&pr_reg->pr_res_holders); 1112 atomic_inc(&pr_reg->pr_res_holders);
1113 smp_mb__after_atomic_inc(); 1113 smp_mb__after_atomic();
1114 spin_unlock(&pr_tmpl->registration_lock); 1114 spin_unlock(&pr_tmpl->registration_lock);
1115 return pr_reg; 1115 return pr_reg;
1116 } 1116 }
@@ -1125,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1125 continue; 1125 continue;
1126 1126
1127 atomic_inc(&pr_reg->pr_res_holders); 1127 atomic_inc(&pr_reg->pr_res_holders);
1128 smp_mb__after_atomic_inc(); 1128 smp_mb__after_atomic();
1129 spin_unlock(&pr_tmpl->registration_lock); 1129 spin_unlock(&pr_tmpl->registration_lock);
1130 return pr_reg; 1130 return pr_reg;
1131 } 1131 }
@@ -1155,7 +1155,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1155static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) 1155static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1156{ 1156{
1157 atomic_dec(&pr_reg->pr_res_holders); 1157 atomic_dec(&pr_reg->pr_res_holders);
1158 smp_mb__after_atomic_dec(); 1158 smp_mb__after_atomic();
1159} 1159}
1160 1160
1161static int core_scsi3_check_implicit_release( 1161static int core_scsi3_check_implicit_release(
@@ -1349,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1349 &tpg->tpg_group.cg_item); 1349 &tpg->tpg_group.cg_item);
1350 1350
1351 atomic_dec(&tpg->tpg_pr_ref_count); 1351 atomic_dec(&tpg->tpg_pr_ref_count);
1352 smp_mb__after_atomic_dec(); 1352 smp_mb__after_atomic();
1353} 1353}
1354 1354
1355static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1355static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
@@ -1369,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1369 1369
1370 if (nacl->dynamic_node_acl) { 1370 if (nacl->dynamic_node_acl) {
1371 atomic_dec(&nacl->acl_pr_ref_count); 1371 atomic_dec(&nacl->acl_pr_ref_count);
1372 smp_mb__after_atomic_dec(); 1372 smp_mb__after_atomic();
1373 return; 1373 return;
1374 } 1374 }
1375 1375
@@ -1377,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1377 &nacl->acl_group.cg_item); 1377 &nacl->acl_group.cg_item);
1378 1378
1379 atomic_dec(&nacl->acl_pr_ref_count); 1379 atomic_dec(&nacl->acl_pr_ref_count);
1380 smp_mb__after_atomic_dec(); 1380 smp_mb__after_atomic();
1381} 1381}
1382 1382
1383static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1383static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
@@ -1408,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1408 */ 1408 */
1409 if (!lun_acl) { 1409 if (!lun_acl) {
1410 atomic_dec(&se_deve->pr_ref_count); 1410 atomic_dec(&se_deve->pr_ref_count);
1411 smp_mb__after_atomic_dec(); 1411 smp_mb__after_atomic();
1412 return; 1412 return;
1413 } 1413 }
1414 nacl = lun_acl->se_lun_nacl; 1414 nacl = lun_acl->se_lun_nacl;
@@ -1418,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1418 &lun_acl->se_lun_group.cg_item); 1418 &lun_acl->se_lun_group.cg_item);
1419 1419
1420 atomic_dec(&se_deve->pr_ref_count); 1420 atomic_dec(&se_deve->pr_ref_count);
1421 smp_mb__after_atomic_dec(); 1421 smp_mb__after_atomic();
1422} 1422}
1423 1423
1424static sense_reason_t 1424static sense_reason_t
@@ -1552,14 +1552,14 @@ core_scsi3_decode_spec_i_port(
1552 continue; 1552 continue;
1553 1553
1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count);
1555 smp_mb__after_atomic_inc(); 1555 smp_mb__after_atomic();
1556 spin_unlock(&dev->se_port_lock); 1556 spin_unlock(&dev->se_port_lock);
1557 1557
1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) { 1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) {
1559 pr_err(" core_scsi3_tpg_depend_item()" 1559 pr_err(" core_scsi3_tpg_depend_item()"
1560 " for tmp_tpg\n"); 1560 " for tmp_tpg\n");
1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1562 smp_mb__after_atomic_dec(); 1562 smp_mb__after_atomic();
1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1564 goto out_unmap; 1564 goto out_unmap;
1565 } 1565 }
@@ -1573,7 +1573,7 @@ core_scsi3_decode_spec_i_port(
1573 tmp_tpg, i_str); 1573 tmp_tpg, i_str);
1574 if (dest_node_acl) { 1574 if (dest_node_acl) {
1575 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1575 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1576 smp_mb__after_atomic_inc(); 1576 smp_mb__after_atomic();
1577 } 1577 }
1578 spin_unlock_irq(&tmp_tpg->acl_node_lock); 1578 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1579 1579
@@ -1587,7 +1587,7 @@ core_scsi3_decode_spec_i_port(
1587 pr_err("configfs_depend_item() failed" 1587 pr_err("configfs_depend_item() failed"
1588 " for dest_node_acl->acl_group\n"); 1588 " for dest_node_acl->acl_group\n");
1589 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1589 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1590 smp_mb__after_atomic_dec(); 1590 smp_mb__after_atomic();
1591 core_scsi3_tpg_undepend_item(tmp_tpg); 1591 core_scsi3_tpg_undepend_item(tmp_tpg);
1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1593 goto out_unmap; 1593 goto out_unmap;
@@ -1647,7 +1647,7 @@ core_scsi3_decode_spec_i_port(
1647 pr_err("core_scsi3_lunacl_depend_item()" 1647 pr_err("core_scsi3_lunacl_depend_item()"
1648 " failed\n"); 1648 " failed\n");
1649 atomic_dec(&dest_se_deve->pr_ref_count); 1649 atomic_dec(&dest_se_deve->pr_ref_count);
1650 smp_mb__after_atomic_dec(); 1650 smp_mb__after_atomic();
1651 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1651 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1652 core_scsi3_tpg_undepend_item(dest_tpg); 1652 core_scsi3_tpg_undepend_item(dest_tpg);
1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -3168,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3168 continue; 3168 continue;
3169 3169
3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
3171 smp_mb__after_atomic_inc(); 3171 smp_mb__after_atomic();
3172 spin_unlock(&dev->se_port_lock); 3172 spin_unlock(&dev->se_port_lock);
3173 3173
3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) { 3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
3175 pr_err("core_scsi3_tpg_depend_item() failed" 3175 pr_err("core_scsi3_tpg_depend_item() failed"
3176 " for dest_se_tpg\n"); 3176 " for dest_se_tpg\n");
3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3178 smp_mb__after_atomic_dec(); 3178 smp_mb__after_atomic();
3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3180 goto out_put_pr_reg; 3180 goto out_put_pr_reg;
3181 } 3181 }
@@ -3273,7 +3273,7 @@ after_iport_check:
3273 initiator_str); 3273 initiator_str);
3274 if (dest_node_acl) { 3274 if (dest_node_acl) {
3275 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3275 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3276 smp_mb__after_atomic_inc(); 3276 smp_mb__after_atomic();
3277 } 3277 }
3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3279 3279
@@ -3289,7 +3289,7 @@ after_iport_check:
3289 pr_err("core_scsi3_nodeacl_depend_item() for" 3289 pr_err("core_scsi3_nodeacl_depend_item() for"
3290 " dest_node_acl\n"); 3290 " dest_node_acl\n");
3291 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3291 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3292 smp_mb__after_atomic_dec(); 3292 smp_mb__after_atomic();
3293 dest_node_acl = NULL; 3293 dest_node_acl = NULL;
3294 ret = TCM_INVALID_PARAMETER_LIST; 3294 ret = TCM_INVALID_PARAMETER_LIST;
3295 goto out; 3295 goto out;
@@ -3314,7 +3314,7 @@ after_iport_check:
3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
3315 pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3315 pr_err("core_scsi3_lunacl_depend_item() failed\n");
3316 atomic_dec(&dest_se_deve->pr_ref_count); 3316 atomic_dec(&dest_se_deve->pr_ref_count);
3317 smp_mb__after_atomic_dec(); 3317 smp_mb__after_atomic();
3318 dest_se_deve = NULL; 3318 dest_se_deve = NULL;
3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3320 goto out; 3320 goto out;
@@ -3880,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3880 add_desc_len = 0; 3880 add_desc_len = 0;
3881 3881
3882 atomic_inc(&pr_reg->pr_res_holders); 3882 atomic_inc(&pr_reg->pr_res_holders);
3883 smp_mb__after_atomic_inc(); 3883 smp_mb__after_atomic();
3884 spin_unlock(&pr_tmpl->registration_lock); 3884 spin_unlock(&pr_tmpl->registration_lock);
3885 /* 3885 /*
3886 * Determine expected length of $FABRIC_MOD specific 3886 * Determine expected length of $FABRIC_MOD specific
@@ -3894,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3894 " out of buffer: %d\n", cmd->data_length); 3894 " out of buffer: %d\n", cmd->data_length);
3895 spin_lock(&pr_tmpl->registration_lock); 3895 spin_lock(&pr_tmpl->registration_lock);
3896 atomic_dec(&pr_reg->pr_res_holders); 3896 atomic_dec(&pr_reg->pr_res_holders);
3897 smp_mb__after_atomic_dec(); 3897 smp_mb__after_atomic();
3898 break; 3898 break;
3899 } 3899 }
3900 /* 3900 /*
@@ -3956,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3956 3956
3957 spin_lock(&pr_tmpl->registration_lock); 3957 spin_lock(&pr_tmpl->registration_lock);
3958 atomic_dec(&pr_reg->pr_res_holders); 3958 atomic_dec(&pr_reg->pr_res_holders);
3959 smp_mb__after_atomic_dec(); 3959 smp_mb__after_atomic();
3960 /* 3960 /*
3961 * Set the ADDITIONAL DESCRIPTOR LENGTH 3961 * Set the ADDITIONAL DESCRIPTOR LENGTH
3962 */ 3962 */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 789aa9eb0a1e..2179feed0d63 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -736,7 +736,7 @@ void target_qf_do_work(struct work_struct *work)
736 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 736 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
737 list_del(&cmd->se_qf_node); 737 list_del(&cmd->se_qf_node);
738 atomic_dec(&dev->dev_qf_count); 738 atomic_dec(&dev->dev_qf_count);
739 smp_mb__after_atomic_dec(); 739 smp_mb__after_atomic();
740 740
741 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 741 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
742 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 742 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -1149,7 +1149,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1149 * Dormant to Active status. 1149 * Dormant to Active status.
1150 */ 1150 */
1151 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1151 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
1152 smp_mb__after_atomic_inc(); 1152 smp_mb__after_atomic();
1153 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1153 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1154 cmd->se_ordered_id, cmd->sam_task_attr, 1154 cmd->se_ordered_id, cmd->sam_task_attr,
1155 dev->transport->name); 1155 dev->transport->name);
@@ -1706,7 +1706,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1706 return false; 1706 return false;
1707 case MSG_ORDERED_TAG: 1707 case MSG_ORDERED_TAG:
1708 atomic_inc(&dev->dev_ordered_sync); 1708 atomic_inc(&dev->dev_ordered_sync);
1709 smp_mb__after_atomic_inc(); 1709 smp_mb__after_atomic();
1710 1710
1711 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1711 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1712 " se_ordered_id: %u\n", 1712 " se_ordered_id: %u\n",
@@ -1724,7 +1724,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1724 * For SIMPLE and UNTAGGED Task Attribute commands 1724 * For SIMPLE and UNTAGGED Task Attribute commands
1725 */ 1725 */
1726 atomic_inc(&dev->simple_cmds); 1726 atomic_inc(&dev->simple_cmds);
1727 smp_mb__after_atomic_inc(); 1727 smp_mb__after_atomic();
1728 break; 1728 break;
1729 } 1729 }
1730 1730
@@ -1829,7 +1829,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1829 1829
1830 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1830 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
1831 atomic_dec(&dev->simple_cmds); 1831 atomic_dec(&dev->simple_cmds);
1832 smp_mb__after_atomic_dec(); 1832 smp_mb__after_atomic();
1833 dev->dev_cur_ordered_id++; 1833 dev->dev_cur_ordered_id++;
1834 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1834 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1835 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1835 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -1841,7 +1841,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1841 cmd->se_ordered_id); 1841 cmd->se_ordered_id);
1842 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1842 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1843 atomic_dec(&dev->dev_ordered_sync); 1843 atomic_dec(&dev->dev_ordered_sync);
1844 smp_mb__after_atomic_dec(); 1844 smp_mb__after_atomic();
1845 1845
1846 dev->dev_cur_ordered_id++; 1846 dev->dev_cur_ordered_id++;
1847 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1847 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -1900,7 +1900,7 @@ static void transport_handle_queue_full(
1900 spin_lock_irq(&dev->qf_cmd_lock); 1900 spin_lock_irq(&dev->qf_cmd_lock);
1901 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1901 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1902 atomic_inc(&dev->dev_qf_count); 1902 atomic_inc(&dev->dev_qf_count);
1903 smp_mb__after_atomic_inc(); 1903 smp_mb__after_atomic();
1904 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1904 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
1905 1905
1906 schedule_work(&cmd->se_dev->qf_work_queue); 1906 schedule_work(&cmd->se_dev->qf_work_queue);
@@ -2875,7 +2875,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
2875 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2875 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2876 cmd->transport_state |= CMD_T_ABORTED; 2876 cmd->transport_state |= CMD_T_ABORTED;
2877 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2877 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2878 smp_mb__after_atomic_inc(); 2878 smp_mb__after_atomic();
2879 return; 2879 return;
2880 } 2880 }
2881 } 2881 }
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 505519b10cb7..101858e245b3 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -162,7 +162,7 @@ int core_scsi3_ua_allocate(
162 spin_unlock_irq(&nacl->device_list_lock); 162 spin_unlock_irq(&nacl->device_list_lock);
163 163
164 atomic_inc(&deve->ua_count); 164 atomic_inc(&deve->ua_count);
165 smp_mb__after_atomic_inc(); 165 smp_mb__after_atomic();
166 return 0; 166 return 0;
167 } 167 }
168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -175,7 +175,7 @@ int core_scsi3_ua_allocate(
175 asc, ascq); 175 asc, ascq);
176 176
177 atomic_inc(&deve->ua_count); 177 atomic_inc(&deve->ua_count);
178 smp_mb__after_atomic_inc(); 178 smp_mb__after_atomic();
179 return 0; 179 return 0;
180} 180}
181 181
@@ -190,7 +190,7 @@ void core_scsi3_ua_release_all(
190 kmem_cache_free(se_ua_cache, ua); 190 kmem_cache_free(se_ua_cache, ua);
191 191
192 atomic_dec(&deve->ua_count); 192 atomic_dec(&deve->ua_count);
193 smp_mb__after_atomic_dec(); 193 smp_mb__after_atomic();
194 } 194 }
195 spin_unlock(&deve->ua_lock); 195 spin_unlock(&deve->ua_lock);
196} 196}
@@ -251,7 +251,7 @@ void core_scsi3_ua_for_check_condition(
251 kmem_cache_free(se_ua_cache, ua); 251 kmem_cache_free(se_ua_cache, ua);
252 252
253 atomic_dec(&deve->ua_count); 253 atomic_dec(&deve->ua_count);
254 smp_mb__after_atomic_dec(); 254 smp_mb__after_atomic();
255 } 255 }
256 spin_unlock(&deve->ua_lock); 256 spin_unlock(&deve->ua_lock);
257 spin_unlock_irq(&nacl->device_list_lock); 257 spin_unlock_irq(&nacl->device_list_lock);
@@ -310,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense(
310 kmem_cache_free(se_ua_cache, ua); 310 kmem_cache_free(se_ua_cache, ua);
311 311
312 atomic_dec(&deve->ua_count); 312 atomic_dec(&deve->ua_count);
313 smp_mb__after_atomic_dec(); 313 smp_mb__after_atomic();
314 } 314 }
315 spin_unlock(&deve->ua_lock); 315 spin_unlock(&deve->ua_lock);
316 spin_unlock_irq(&nacl->device_list_lock); 316 spin_unlock_irq(&nacl->device_list_lock);