aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 14:31:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 14:31:16 -0400
commit3f17ea6dea8ba5668873afa54628a91aaa3fb1c0 (patch)
treeafbeb2accd4c2199ddd705ae943995b143a0af02 /drivers/target
parent1860e379875dfe7271c649058aeddffe5afd9d0d (diff)
parent1a5700bc2d10cd379a795fd2bb377a190af5acd4 (diff)
Merge branch 'next' (accumulated 3.16 merge window patches) into master
Now that 3.15 is released, this merges the 'next' branch into 'master', bringing us to the normal situation where my 'master' branch is the merge window. * accumulated work in next: (6809 commits) ufs: sb mutex merge + mutex_destroy powerpc: update comments for generic idle conversion cris: update comments for generic idle conversion idle: remove cpu_idle() forward declarations nbd: zero from and len fields in NBD_CMD_DISCONNECT. mm: convert some level-less printks to pr_* MAINTAINERS: adi-buildroot-devel is moderated MAINTAINERS: add linux-api for review of API/ABI changes mm/kmemleak-test.c: use pr_fmt for logging fs/dlm/debug_fs.c: replace seq_printf by seq_puts fs/dlm/lockspace.c: convert simple_str to kstr fs/dlm/config.c: convert simple_str to kstr mm: mark remap_file_pages() syscall as deprecated mm: memcontrol: remove unnecessary memcg argument from soft limit functions mm: memcontrol: clean up memcg zoneinfo lookup mm/memblock.c: call kmemleak directly from memblock_(alloc|free) mm/mempool.c: update the kmemleak stack trace for mempool allocations lib/radix-tree.c: update the kmemleak stack trace for radix tree allocations mm: introduce kmemleak_update_trace() mm/kmemleak.c: use %u to print ->checksum ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/loopback/tcm_loop.c4
-rw-r--r--drivers/target/target_core_alua.c26
-rw-r--r--drivers/target/target_core_device.c6
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c56
-rw-r--r--drivers/target/target_core_transport.c16
-rw-r--r--drivers/target/target_core_ua.c10
7 files changed, 60 insertions, 60 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index c886ad1c39fb..73ab75ddaf42 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -951,7 +951,7 @@ static int tcm_loop_port_link(
951 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 951 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
952 952
953 atomic_inc(&tl_tpg->tl_tpg_port_count); 953 atomic_inc(&tl_tpg->tl_tpg_port_count);
954 smp_mb__after_atomic_inc(); 954 smp_mb__after_atomic();
955 /* 955 /*
956 * Add Linux/SCSI struct scsi_device by HCTL 956 * Add Linux/SCSI struct scsi_device by HCTL
957 */ 957 */
@@ -986,7 +986,7 @@ static void tcm_loop_port_unlink(
986 scsi_device_put(sd); 986 scsi_device_put(sd);
987 987
988 atomic_dec(&tl_tpg->tl_tpg_port_count); 988 atomic_dec(&tl_tpg->tl_tpg_port_count);
989 smp_mb__after_atomic_dec(); 989 smp_mb__after_atomic();
990 990
991 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 991 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
992} 992}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 63512cc01a1f..fbc5ebb5f761 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -393,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
393 continue; 393 continue;
394 394
395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
396 smp_mb__after_atomic_inc(); 396 smp_mb__after_atomic();
397 397
398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399 399
@@ -404,7 +404,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
404 404
405 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 405 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
407 smp_mb__after_atomic_dec(); 407 smp_mb__after_atomic();
408 break; 408 break;
409 } 409 }
410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -999,7 +999,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
999 * TARGET PORT GROUPS command 999 * TARGET PORT GROUPS command
1000 */ 1000 */
1001 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 1001 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
1002 smp_mb__after_atomic_inc(); 1002 smp_mb__after_atomic();
1003 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1003 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1004 1004
1005 spin_lock_bh(&port->sep_alua_lock); 1005 spin_lock_bh(&port->sep_alua_lock);
@@ -1029,7 +1029,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1029 1029
1030 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1030 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1031 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 1031 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
1032 smp_mb__after_atomic_dec(); 1032 smp_mb__after_atomic();
1033 } 1033 }
1034 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1034 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1035 /* 1035 /*
@@ -1063,7 +1063,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1063 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 1063 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1064 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1064 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1065 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1065 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1066 smp_mb__after_atomic_dec(); 1066 smp_mb__after_atomic();
1067 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1067 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1068 1068
1069 if (tg_pt_gp->tg_pt_gp_transition_complete) 1069 if (tg_pt_gp->tg_pt_gp_transition_complete)
@@ -1125,7 +1125,7 @@ static int core_alua_do_transition_tg_pt(
1125 */ 1125 */
1126 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1126 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1127 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1127 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1128 smp_mb__after_atomic_inc(); 1128 smp_mb__after_atomic();
1129 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1129 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1130 1130
1131 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1131 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
@@ -1168,7 +1168,7 @@ int core_alua_do_port_transition(
1168 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1168 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1169 lu_gp = local_lu_gp_mem->lu_gp; 1169 lu_gp = local_lu_gp_mem->lu_gp;
1170 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1170 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1171 smp_mb__after_atomic_inc(); 1171 smp_mb__after_atomic();
1172 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1172 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1173 /* 1173 /*
1174 * For storage objects that are members of the 'default_lu_gp', 1174 * For storage objects that are members of the 'default_lu_gp',
@@ -1185,7 +1185,7 @@ int core_alua_do_port_transition(
1185 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1185 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1186 new_state, explicit); 1186 new_state, explicit);
1187 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1187 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1188 smp_mb__after_atomic_dec(); 1188 smp_mb__after_atomic();
1189 return rc; 1189 return rc;
1190 } 1190 }
1191 /* 1191 /*
@@ -1199,7 +1199,7 @@ int core_alua_do_port_transition(
1199 1199
1200 dev = lu_gp_mem->lu_gp_mem_dev; 1200 dev = lu_gp_mem->lu_gp_mem_dev;
1201 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 1201 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
1202 smp_mb__after_atomic_inc(); 1202 smp_mb__after_atomic();
1203 spin_unlock(&lu_gp->lu_gp_lock); 1203 spin_unlock(&lu_gp->lu_gp_lock);
1204 1204
1205 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1205 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1228,7 +1228,7 @@ int core_alua_do_port_transition(
1228 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1228 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1229 } 1229 }
1230 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1230 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1231 smp_mb__after_atomic_inc(); 1231 smp_mb__after_atomic();
1232 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1232 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1233 /* 1233 /*
1234 * core_alua_do_transition_tg_pt() will always return 1234 * core_alua_do_transition_tg_pt() will always return
@@ -1239,7 +1239,7 @@ int core_alua_do_port_transition(
1239 1239
1240 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1240 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1241 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1241 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1242 smp_mb__after_atomic_dec(); 1242 smp_mb__after_atomic();
1243 if (rc) 1243 if (rc)
1244 break; 1244 break;
1245 } 1245 }
@@ -1247,7 +1247,7 @@ int core_alua_do_port_transition(
1247 1247
1248 spin_lock(&lu_gp->lu_gp_lock); 1248 spin_lock(&lu_gp->lu_gp_lock);
1249 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 1249 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
1250 smp_mb__after_atomic_dec(); 1250 smp_mb__after_atomic();
1251 } 1251 }
1252 spin_unlock(&lu_gp->lu_gp_lock); 1252 spin_unlock(&lu_gp->lu_gp_lock);
1253 1253
@@ -1261,7 +1261,7 @@ int core_alua_do_port_transition(
1261 } 1261 }
1262 1262
1263 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1263 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1264 smp_mb__after_atomic_dec(); 1264 smp_mb__after_atomic();
1265 return rc; 1265 return rc;
1266} 1266}
1267 1267
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 26416c15d65c..11d26fe65bfb 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -225,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
225 continue; 225 continue;
226 226
227 atomic_inc(&deve->pr_ref_count); 227 atomic_inc(&deve->pr_ref_count);
228 smp_mb__after_atomic_inc(); 228 smp_mb__after_atomic();
229 spin_unlock_irq(&nacl->device_list_lock); 229 spin_unlock_irq(&nacl->device_list_lock);
230 230
231 return deve; 231 return deve;
@@ -1396,7 +1396,7 @@ int core_dev_add_initiator_node_lun_acl(
1396 spin_lock(&lun->lun_acl_lock); 1396 spin_lock(&lun->lun_acl_lock);
1397 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1397 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1398 atomic_inc(&lun->lun_acl_count); 1398 atomic_inc(&lun->lun_acl_count);
1399 smp_mb__after_atomic_inc(); 1399 smp_mb__after_atomic();
1400 spin_unlock(&lun->lun_acl_lock); 1400 spin_unlock(&lun->lun_acl_lock);
1401 1401
1402 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1402 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
@@ -1430,7 +1430,7 @@ int core_dev_del_initiator_node_lun_acl(
1430 spin_lock(&lun->lun_acl_lock); 1430 spin_lock(&lun->lun_acl_lock);
1431 list_del(&lacl->lacl_list); 1431 list_del(&lacl->lacl_list);
1432 atomic_dec(&lun->lun_acl_count); 1432 atomic_dec(&lun->lun_acl_count);
1433 smp_mb__after_atomic_dec(); 1433 smp_mb__after_atomic();
1434 spin_unlock(&lun->lun_acl_lock); 1434 spin_unlock(&lun->lun_acl_lock);
1435 1435
1436 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1436 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 9e0232cca92e..7e6b857c6b3f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -323,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err)
323 * Bump the ib_bio_err_cnt and release bio. 323 * Bump the ib_bio_err_cnt and release bio.
324 */ 324 */
325 atomic_inc(&ibr->ib_bio_err_cnt); 325 atomic_inc(&ibr->ib_bio_err_cnt);
326 smp_mb__after_atomic_inc(); 326 smp_mb__after_atomic();
327 } 327 }
328 328
329 bio_put(bio); 329 bio_put(bio);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3013287a2aaa..df357862286e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -675,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
675 spin_lock(&dev->se_port_lock); 675 spin_lock(&dev->se_port_lock);
676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
677 atomic_inc(&port->sep_tg_pt_ref_cnt); 677 atomic_inc(&port->sep_tg_pt_ref_cnt);
678 smp_mb__after_atomic_inc(); 678 smp_mb__after_atomic();
679 spin_unlock(&dev->se_port_lock); 679 spin_unlock(&dev->se_port_lock);
680 680
681 spin_lock_bh(&port->sep_alua_lock); 681 spin_lock_bh(&port->sep_alua_lock);
@@ -710,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
710 continue; 710 continue;
711 711
712 atomic_inc(&deve_tmp->pr_ref_count); 712 atomic_inc(&deve_tmp->pr_ref_count);
713 smp_mb__after_atomic_inc(); 713 smp_mb__after_atomic();
714 spin_unlock_bh(&port->sep_alua_lock); 714 spin_unlock_bh(&port->sep_alua_lock);
715 /* 715 /*
716 * Grab a configfs group dependency that is released 716 * Grab a configfs group dependency that is released
@@ -723,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
723 pr_err("core_scsi3_lunacl_depend" 723 pr_err("core_scsi3_lunacl_depend"
724 "_item() failed\n"); 724 "_item() failed\n");
725 atomic_dec(&port->sep_tg_pt_ref_cnt); 725 atomic_dec(&port->sep_tg_pt_ref_cnt);
726 smp_mb__after_atomic_dec(); 726 smp_mb__after_atomic();
727 atomic_dec(&deve_tmp->pr_ref_count); 727 atomic_dec(&deve_tmp->pr_ref_count);
728 smp_mb__after_atomic_dec(); 728 smp_mb__after_atomic();
729 goto out; 729 goto out;
730 } 730 }
731 /* 731 /*
@@ -740,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
740 sa_res_key, all_tg_pt, aptpl); 740 sa_res_key, all_tg_pt, aptpl);
741 if (!pr_reg_atp) { 741 if (!pr_reg_atp) {
742 atomic_dec(&port->sep_tg_pt_ref_cnt); 742 atomic_dec(&port->sep_tg_pt_ref_cnt);
743 smp_mb__after_atomic_dec(); 743 smp_mb__after_atomic();
744 atomic_dec(&deve_tmp->pr_ref_count); 744 atomic_dec(&deve_tmp->pr_ref_count);
745 smp_mb__after_atomic_dec(); 745 smp_mb__after_atomic();
746 core_scsi3_lunacl_undepend_item(deve_tmp); 746 core_scsi3_lunacl_undepend_item(deve_tmp);
747 goto out; 747 goto out;
748 } 748 }
@@ -755,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
755 755
756 spin_lock(&dev->se_port_lock); 756 spin_lock(&dev->se_port_lock);
757 atomic_dec(&port->sep_tg_pt_ref_cnt); 757 atomic_dec(&port->sep_tg_pt_ref_cnt);
758 smp_mb__after_atomic_dec(); 758 smp_mb__after_atomic();
759 } 759 }
760 spin_unlock(&dev->se_port_lock); 760 spin_unlock(&dev->se_port_lock);
761 761
@@ -1110,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1110 continue; 1110 continue;
1111 } 1111 }
1112 atomic_inc(&pr_reg->pr_res_holders); 1112 atomic_inc(&pr_reg->pr_res_holders);
1113 smp_mb__after_atomic_inc(); 1113 smp_mb__after_atomic();
1114 spin_unlock(&pr_tmpl->registration_lock); 1114 spin_unlock(&pr_tmpl->registration_lock);
1115 return pr_reg; 1115 return pr_reg;
1116 } 1116 }
@@ -1125,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1125 continue; 1125 continue;
1126 1126
1127 atomic_inc(&pr_reg->pr_res_holders); 1127 atomic_inc(&pr_reg->pr_res_holders);
1128 smp_mb__after_atomic_inc(); 1128 smp_mb__after_atomic();
1129 spin_unlock(&pr_tmpl->registration_lock); 1129 spin_unlock(&pr_tmpl->registration_lock);
1130 return pr_reg; 1130 return pr_reg;
1131 } 1131 }
@@ -1155,7 +1155,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1155static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) 1155static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1156{ 1156{
1157 atomic_dec(&pr_reg->pr_res_holders); 1157 atomic_dec(&pr_reg->pr_res_holders);
1158 smp_mb__after_atomic_dec(); 1158 smp_mb__after_atomic();
1159} 1159}
1160 1160
1161static int core_scsi3_check_implicit_release( 1161static int core_scsi3_check_implicit_release(
@@ -1349,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1349 &tpg->tpg_group.cg_item); 1349 &tpg->tpg_group.cg_item);
1350 1350
1351 atomic_dec(&tpg->tpg_pr_ref_count); 1351 atomic_dec(&tpg->tpg_pr_ref_count);
1352 smp_mb__after_atomic_dec(); 1352 smp_mb__after_atomic();
1353} 1353}
1354 1354
1355static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1355static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
@@ -1369,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1369 1369
1370 if (nacl->dynamic_node_acl) { 1370 if (nacl->dynamic_node_acl) {
1371 atomic_dec(&nacl->acl_pr_ref_count); 1371 atomic_dec(&nacl->acl_pr_ref_count);
1372 smp_mb__after_atomic_dec(); 1372 smp_mb__after_atomic();
1373 return; 1373 return;
1374 } 1374 }
1375 1375
@@ -1377,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1377 &nacl->acl_group.cg_item); 1377 &nacl->acl_group.cg_item);
1378 1378
1379 atomic_dec(&nacl->acl_pr_ref_count); 1379 atomic_dec(&nacl->acl_pr_ref_count);
1380 smp_mb__after_atomic_dec(); 1380 smp_mb__after_atomic();
1381} 1381}
1382 1382
1383static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1383static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
@@ -1408,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1408 */ 1408 */
1409 if (!lun_acl) { 1409 if (!lun_acl) {
1410 atomic_dec(&se_deve->pr_ref_count); 1410 atomic_dec(&se_deve->pr_ref_count);
1411 smp_mb__after_atomic_dec(); 1411 smp_mb__after_atomic();
1412 return; 1412 return;
1413 } 1413 }
1414 nacl = lun_acl->se_lun_nacl; 1414 nacl = lun_acl->se_lun_nacl;
@@ -1418,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1418 &lun_acl->se_lun_group.cg_item); 1418 &lun_acl->se_lun_group.cg_item);
1419 1419
1420 atomic_dec(&se_deve->pr_ref_count); 1420 atomic_dec(&se_deve->pr_ref_count);
1421 smp_mb__after_atomic_dec(); 1421 smp_mb__after_atomic();
1422} 1422}
1423 1423
1424static sense_reason_t 1424static sense_reason_t
@@ -1552,14 +1552,14 @@ core_scsi3_decode_spec_i_port(
1552 continue; 1552 continue;
1553 1553
1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count);
1555 smp_mb__after_atomic_inc(); 1555 smp_mb__after_atomic();
1556 spin_unlock(&dev->se_port_lock); 1556 spin_unlock(&dev->se_port_lock);
1557 1557
1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) { 1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) {
1559 pr_err(" core_scsi3_tpg_depend_item()" 1559 pr_err(" core_scsi3_tpg_depend_item()"
1560 " for tmp_tpg\n"); 1560 " for tmp_tpg\n");
1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1562 smp_mb__after_atomic_dec(); 1562 smp_mb__after_atomic();
1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1564 goto out_unmap; 1564 goto out_unmap;
1565 } 1565 }
@@ -1573,7 +1573,7 @@ core_scsi3_decode_spec_i_port(
1573 tmp_tpg, i_str); 1573 tmp_tpg, i_str);
1574 if (dest_node_acl) { 1574 if (dest_node_acl) {
1575 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1575 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1576 smp_mb__after_atomic_inc(); 1576 smp_mb__after_atomic();
1577 } 1577 }
1578 spin_unlock_irq(&tmp_tpg->acl_node_lock); 1578 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1579 1579
@@ -1587,7 +1587,7 @@ core_scsi3_decode_spec_i_port(
1587 pr_err("configfs_depend_item() failed" 1587 pr_err("configfs_depend_item() failed"
1588 " for dest_node_acl->acl_group\n"); 1588 " for dest_node_acl->acl_group\n");
1589 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1589 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1590 smp_mb__after_atomic_dec(); 1590 smp_mb__after_atomic();
1591 core_scsi3_tpg_undepend_item(tmp_tpg); 1591 core_scsi3_tpg_undepend_item(tmp_tpg);
1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1593 goto out_unmap; 1593 goto out_unmap;
@@ -1647,7 +1647,7 @@ core_scsi3_decode_spec_i_port(
1647 pr_err("core_scsi3_lunacl_depend_item()" 1647 pr_err("core_scsi3_lunacl_depend_item()"
1648 " failed\n"); 1648 " failed\n");
1649 atomic_dec(&dest_se_deve->pr_ref_count); 1649 atomic_dec(&dest_se_deve->pr_ref_count);
1650 smp_mb__after_atomic_dec(); 1650 smp_mb__after_atomic();
1651 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1651 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1652 core_scsi3_tpg_undepend_item(dest_tpg); 1652 core_scsi3_tpg_undepend_item(dest_tpg);
1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -3168,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3168 continue; 3168 continue;
3169 3169
3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
3171 smp_mb__after_atomic_inc(); 3171 smp_mb__after_atomic();
3172 spin_unlock(&dev->se_port_lock); 3172 spin_unlock(&dev->se_port_lock);
3173 3173
3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) { 3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
3175 pr_err("core_scsi3_tpg_depend_item() failed" 3175 pr_err("core_scsi3_tpg_depend_item() failed"
3176 " for dest_se_tpg\n"); 3176 " for dest_se_tpg\n");
3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3178 smp_mb__after_atomic_dec(); 3178 smp_mb__after_atomic();
3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3180 goto out_put_pr_reg; 3180 goto out_put_pr_reg;
3181 } 3181 }
@@ -3273,7 +3273,7 @@ after_iport_check:
3273 initiator_str); 3273 initiator_str);
3274 if (dest_node_acl) { 3274 if (dest_node_acl) {
3275 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3275 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3276 smp_mb__after_atomic_inc(); 3276 smp_mb__after_atomic();
3277 } 3277 }
3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3279 3279
@@ -3289,7 +3289,7 @@ after_iport_check:
3289 pr_err("core_scsi3_nodeacl_depend_item() for" 3289 pr_err("core_scsi3_nodeacl_depend_item() for"
3290 " dest_node_acl\n"); 3290 " dest_node_acl\n");
3291 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3291 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3292 smp_mb__after_atomic_dec(); 3292 smp_mb__after_atomic();
3293 dest_node_acl = NULL; 3293 dest_node_acl = NULL;
3294 ret = TCM_INVALID_PARAMETER_LIST; 3294 ret = TCM_INVALID_PARAMETER_LIST;
3295 goto out; 3295 goto out;
@@ -3314,7 +3314,7 @@ after_iport_check:
3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
3315 pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3315 pr_err("core_scsi3_lunacl_depend_item() failed\n");
3316 atomic_dec(&dest_se_deve->pr_ref_count); 3316 atomic_dec(&dest_se_deve->pr_ref_count);
3317 smp_mb__after_atomic_dec(); 3317 smp_mb__after_atomic();
3318 dest_se_deve = NULL; 3318 dest_se_deve = NULL;
3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3320 goto out; 3320 goto out;
@@ -3880,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3880 add_desc_len = 0; 3880 add_desc_len = 0;
3881 3881
3882 atomic_inc(&pr_reg->pr_res_holders); 3882 atomic_inc(&pr_reg->pr_res_holders);
3883 smp_mb__after_atomic_inc(); 3883 smp_mb__after_atomic();
3884 spin_unlock(&pr_tmpl->registration_lock); 3884 spin_unlock(&pr_tmpl->registration_lock);
3885 /* 3885 /*
3886 * Determine expected length of $FABRIC_MOD specific 3886 * Determine expected length of $FABRIC_MOD specific
@@ -3894,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3894 " out of buffer: %d\n", cmd->data_length); 3894 " out of buffer: %d\n", cmd->data_length);
3895 spin_lock(&pr_tmpl->registration_lock); 3895 spin_lock(&pr_tmpl->registration_lock);
3896 atomic_dec(&pr_reg->pr_res_holders); 3896 atomic_dec(&pr_reg->pr_res_holders);
3897 smp_mb__after_atomic_dec(); 3897 smp_mb__after_atomic();
3898 break; 3898 break;
3899 } 3899 }
3900 /* 3900 /*
@@ -3956,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3956 3956
3957 spin_lock(&pr_tmpl->registration_lock); 3957 spin_lock(&pr_tmpl->registration_lock);
3958 atomic_dec(&pr_reg->pr_res_holders); 3958 atomic_dec(&pr_reg->pr_res_holders);
3959 smp_mb__after_atomic_dec(); 3959 smp_mb__after_atomic();
3960 /* 3960 /*
3961 * Set the ADDITIONAL DESCRIPTOR LENGTH 3961 * Set the ADDITIONAL DESCRIPTOR LENGTH
3962 */ 3962 */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 789aa9eb0a1e..2179feed0d63 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -736,7 +736,7 @@ void target_qf_do_work(struct work_struct *work)
736 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 736 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
737 list_del(&cmd->se_qf_node); 737 list_del(&cmd->se_qf_node);
738 atomic_dec(&dev->dev_qf_count); 738 atomic_dec(&dev->dev_qf_count);
739 smp_mb__after_atomic_dec(); 739 smp_mb__after_atomic();
740 740
741 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 741 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
742 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 742 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -1149,7 +1149,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1149 * Dormant to Active status. 1149 * Dormant to Active status.
1150 */ 1150 */
1151 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1151 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
1152 smp_mb__after_atomic_inc(); 1152 smp_mb__after_atomic();
1153 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1153 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1154 cmd->se_ordered_id, cmd->sam_task_attr, 1154 cmd->se_ordered_id, cmd->sam_task_attr,
1155 dev->transport->name); 1155 dev->transport->name);
@@ -1706,7 +1706,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1706 return false; 1706 return false;
1707 case MSG_ORDERED_TAG: 1707 case MSG_ORDERED_TAG:
1708 atomic_inc(&dev->dev_ordered_sync); 1708 atomic_inc(&dev->dev_ordered_sync);
1709 smp_mb__after_atomic_inc(); 1709 smp_mb__after_atomic();
1710 1710
1711 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1711 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1712 " se_ordered_id: %u\n", 1712 " se_ordered_id: %u\n",
@@ -1724,7 +1724,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1724 * For SIMPLE and UNTAGGED Task Attribute commands 1724 * For SIMPLE and UNTAGGED Task Attribute commands
1725 */ 1725 */
1726 atomic_inc(&dev->simple_cmds); 1726 atomic_inc(&dev->simple_cmds);
1727 smp_mb__after_atomic_inc(); 1727 smp_mb__after_atomic();
1728 break; 1728 break;
1729 } 1729 }
1730 1730
@@ -1829,7 +1829,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1829 1829
1830 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1830 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
1831 atomic_dec(&dev->simple_cmds); 1831 atomic_dec(&dev->simple_cmds);
1832 smp_mb__after_atomic_dec(); 1832 smp_mb__after_atomic();
1833 dev->dev_cur_ordered_id++; 1833 dev->dev_cur_ordered_id++;
1834 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1834 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1835 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1835 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -1841,7 +1841,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1841 cmd->se_ordered_id); 1841 cmd->se_ordered_id);
1842 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1842 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1843 atomic_dec(&dev->dev_ordered_sync); 1843 atomic_dec(&dev->dev_ordered_sync);
1844 smp_mb__after_atomic_dec(); 1844 smp_mb__after_atomic();
1845 1845
1846 dev->dev_cur_ordered_id++; 1846 dev->dev_cur_ordered_id++;
1847 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1847 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -1900,7 +1900,7 @@ static void transport_handle_queue_full(
1900 spin_lock_irq(&dev->qf_cmd_lock); 1900 spin_lock_irq(&dev->qf_cmd_lock);
1901 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1901 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1902 atomic_inc(&dev->dev_qf_count); 1902 atomic_inc(&dev->dev_qf_count);
1903 smp_mb__after_atomic_inc(); 1903 smp_mb__after_atomic();
1904 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1904 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
1905 1905
1906 schedule_work(&cmd->se_dev->qf_work_queue); 1906 schedule_work(&cmd->se_dev->qf_work_queue);
@@ -2875,7 +2875,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
2875 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2875 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2876 cmd->transport_state |= CMD_T_ABORTED; 2876 cmd->transport_state |= CMD_T_ABORTED;
2877 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2877 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2878 smp_mb__after_atomic_inc(); 2878 smp_mb__after_atomic();
2879 return; 2879 return;
2880 } 2880 }
2881 } 2881 }
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 505519b10cb7..101858e245b3 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -162,7 +162,7 @@ int core_scsi3_ua_allocate(
162 spin_unlock_irq(&nacl->device_list_lock); 162 spin_unlock_irq(&nacl->device_list_lock);
163 163
164 atomic_inc(&deve->ua_count); 164 atomic_inc(&deve->ua_count);
165 smp_mb__after_atomic_inc(); 165 smp_mb__after_atomic();
166 return 0; 166 return 0;
167 } 167 }
168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -175,7 +175,7 @@ int core_scsi3_ua_allocate(
175 asc, ascq); 175 asc, ascq);
176 176
177 atomic_inc(&deve->ua_count); 177 atomic_inc(&deve->ua_count);
178 smp_mb__after_atomic_inc(); 178 smp_mb__after_atomic();
179 return 0; 179 return 0;
180} 180}
181 181
@@ -190,7 +190,7 @@ void core_scsi3_ua_release_all(
190 kmem_cache_free(se_ua_cache, ua); 190 kmem_cache_free(se_ua_cache, ua);
191 191
192 atomic_dec(&deve->ua_count); 192 atomic_dec(&deve->ua_count);
193 smp_mb__after_atomic_dec(); 193 smp_mb__after_atomic();
194 } 194 }
195 spin_unlock(&deve->ua_lock); 195 spin_unlock(&deve->ua_lock);
196} 196}
@@ -251,7 +251,7 @@ void core_scsi3_ua_for_check_condition(
251 kmem_cache_free(se_ua_cache, ua); 251 kmem_cache_free(se_ua_cache, ua);
252 252
253 atomic_dec(&deve->ua_count); 253 atomic_dec(&deve->ua_count);
254 smp_mb__after_atomic_dec(); 254 smp_mb__after_atomic();
255 } 255 }
256 spin_unlock(&deve->ua_lock); 256 spin_unlock(&deve->ua_lock);
257 spin_unlock_irq(&nacl->device_list_lock); 257 spin_unlock_irq(&nacl->device_list_lock);
@@ -310,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense(
310 kmem_cache_free(se_ua_cache, ua); 310 kmem_cache_free(se_ua_cache, ua);
311 311
312 atomic_dec(&deve->ua_count); 312 atomic_dec(&deve->ua_count);
313 smp_mb__after_atomic_dec(); 313 smp_mb__after_atomic();
314 } 314 }
315 spin_unlock(&deve->ua_lock); 315 spin_unlock(&deve->ua_lock);
316 spin_unlock_irq(&nacl->device_list_lock); 316 spin_unlock_irq(&nacl->device_list_lock);