aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-03-03 17:57:58 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-05-27 15:56:15 -0400
commit7d0ae8086b828311250c6afdf800b568ac9bd693 (patch)
treea1bb0c6a5e66f1e48c4667fd247a41c4b9253fd2 /kernel/rcu/tree.c
parent030bbdbf4c833bc69f502eae58498bc5572db736 (diff)
rcu: Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE()
This commit moves from the old ACCESS_ONCE() API to the new READ_ONCE() and WRITE_ONCE() APIs. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> [ paulmck: Updated to include kernel/torture.c as suggested by Jason Low. ]
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c184
1 files changed, 92 insertions, 92 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8cf7304b2867..0628df155970 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -191,17 +191,17 @@ unsigned long rcutorture_vernum;
191 */ 191 */
192unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 192unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
193{ 193{
194 return ACCESS_ONCE(rnp->qsmaskinitnext); 194 return READ_ONCE(rnp->qsmaskinitnext);
195} 195}
196 196
197/* 197/*
198 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 198 * Return true if an RCU grace period is in progress. The READ_ONCE()s
199 * permit this function to be invoked without holding the root rcu_node 199 * permit this function to be invoked without holding the root rcu_node
200 * structure's ->lock, but of course results can be subject to change. 200 * structure's ->lock, but of course results can be subject to change.
201 */ 201 */
202static int rcu_gp_in_progress(struct rcu_state *rsp) 202static int rcu_gp_in_progress(struct rcu_state *rsp)
203{ 203{
204 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); 204 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
205} 205}
206 206
207/* 207/*
@@ -278,8 +278,8 @@ static void rcu_momentary_dyntick_idle(void)
278 if (!(resched_mask & rsp->flavor_mask)) 278 if (!(resched_mask & rsp->flavor_mask))
279 continue; 279 continue;
280 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */ 280 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
281 if (ACCESS_ONCE(rdp->mynode->completed) != 281 if (READ_ONCE(rdp->mynode->completed) !=
282 ACCESS_ONCE(rdp->cond_resched_completed)) 282 READ_ONCE(rdp->cond_resched_completed))
283 continue; 283 continue;
284 284
285 /* 285 /*
@@ -491,9 +491,9 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
491 break; 491 break;
492 } 492 }
493 if (rsp != NULL) { 493 if (rsp != NULL) {
494 *flags = ACCESS_ONCE(rsp->gp_flags); 494 *flags = READ_ONCE(rsp->gp_flags);
495 *gpnum = ACCESS_ONCE(rsp->gpnum); 495 *gpnum = READ_ONCE(rsp->gpnum);
496 *completed = ACCESS_ONCE(rsp->completed); 496 *completed = READ_ONCE(rsp->completed);
497 return; 497 return;
498 } 498 }
499 *flags = 0; 499 *flags = 0;
@@ -539,10 +539,10 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
539static int rcu_future_needs_gp(struct rcu_state *rsp) 539static int rcu_future_needs_gp(struct rcu_state *rsp)
540{ 540{
541 struct rcu_node *rnp = rcu_get_root(rsp); 541 struct rcu_node *rnp = rcu_get_root(rsp);
542 int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1; 542 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
543 int *fp = &rnp->need_future_gp[idx]; 543 int *fp = &rnp->need_future_gp[idx];
544 544
545 return ACCESS_ONCE(*fp); 545 return READ_ONCE(*fp);
546} 546}
547 547
548/* 548/*
@@ -565,7 +565,7 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
565 return 1; /* Yes, this CPU has newly registered callbacks. */ 565 return 1; /* Yes, this CPU has newly registered callbacks. */
566 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) 566 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
567 if (rdp->nxttail[i - 1] != rdp->nxttail[i] && 567 if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
568 ULONG_CMP_LT(ACCESS_ONCE(rsp->completed), 568 ULONG_CMP_LT(READ_ONCE(rsp->completed),
569 rdp->nxtcompleted[i])) 569 rdp->nxtcompleted[i]))
570 return 1; /* Yes, CBs for future grace period. */ 570 return 1; /* Yes, CBs for future grace period. */
571 return 0; /* No grace period needed. */ 571 return 0; /* No grace period needed. */
@@ -1011,9 +1011,9 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
1011 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1011 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1012 return 1; 1012 return 1;
1013 } else { 1013 } else {
1014 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4, 1014 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
1015 rdp->mynode->gpnum)) 1015 rdp->mynode->gpnum))
1016 ACCESS_ONCE(rdp->gpwrap) = true; 1016 WRITE_ONCE(rdp->gpwrap, true);
1017 return 0; 1017 return 0;
1018 } 1018 }
1019} 1019}
@@ -1093,12 +1093,12 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1093 if (ULONG_CMP_GE(jiffies, 1093 if (ULONG_CMP_GE(jiffies,
1094 rdp->rsp->gp_start + jiffies_till_sched_qs) || 1094 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
1095 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 1095 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1096 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { 1096 if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
1097 ACCESS_ONCE(rdp->cond_resched_completed) = 1097 WRITE_ONCE(rdp->cond_resched_completed,
1098 ACCESS_ONCE(rdp->mynode->completed); 1098 READ_ONCE(rdp->mynode->completed));
1099 smp_mb(); /* ->cond_resched_completed before *rcrmp. */ 1099 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
1100 ACCESS_ONCE(*rcrmp) = 1100 WRITE_ONCE(*rcrmp,
1101 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask; 1101 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
1102 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */ 1102 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1103 rdp->rsp->jiffies_resched += 5; /* Enable beating. */ 1103 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
1104 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 1104 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
@@ -1119,9 +1119,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
1119 rsp->gp_start = j; 1119 rsp->gp_start = j;
1120 smp_wmb(); /* Record start time before stall time. */ 1120 smp_wmb(); /* Record start time before stall time. */
1121 j1 = rcu_jiffies_till_stall_check(); 1121 j1 = rcu_jiffies_till_stall_check();
1122 ACCESS_ONCE(rsp->jiffies_stall) = j + j1; 1122 WRITE_ONCE(rsp->jiffies_stall, j + j1);
1123 rsp->jiffies_resched = j + j1 / 2; 1123 rsp->jiffies_resched = j + j1 / 2;
1124 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs); 1124 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1125} 1125}
1126 1126
1127/* 1127/*
@@ -1133,7 +1133,7 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1133 unsigned long j; 1133 unsigned long j;
1134 1134
1135 j = jiffies; 1135 j = jiffies;
1136 gpa = ACCESS_ONCE(rsp->gp_activity); 1136 gpa = READ_ONCE(rsp->gp_activity);
1137 if (j - gpa > 2 * HZ) 1137 if (j - gpa > 2 * HZ)
1138 pr_err("%s kthread starved for %ld jiffies!\n", 1138 pr_err("%s kthread starved for %ld jiffies!\n",
1139 rsp->name, j - gpa); 1139 rsp->name, j - gpa);
@@ -1173,12 +1173,13 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1173 /* Only let one CPU complain about others per time interval. */ 1173 /* Only let one CPU complain about others per time interval. */
1174 1174
1175 raw_spin_lock_irqsave(&rnp->lock, flags); 1175 raw_spin_lock_irqsave(&rnp->lock, flags);
1176 delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); 1176 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1177 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 1177 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1178 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1178 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1179 return; 1179 return;
1180 } 1180 }
1181 ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; 1181 WRITE_ONCE(rsp->jiffies_stall,
1182 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1182 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1183 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1183 1184
1184 /* 1185 /*
@@ -1212,12 +1213,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1212 if (ndetected) { 1213 if (ndetected) {
1213 rcu_dump_cpu_stacks(rsp); 1214 rcu_dump_cpu_stacks(rsp);
1214 } else { 1215 } else {
1215 if (ACCESS_ONCE(rsp->gpnum) != gpnum || 1216 if (READ_ONCE(rsp->gpnum) != gpnum ||
1216 ACCESS_ONCE(rsp->completed) == gpnum) { 1217 READ_ONCE(rsp->completed) == gpnum) {
1217 pr_err("INFO: Stall ended before state dump start\n"); 1218 pr_err("INFO: Stall ended before state dump start\n");
1218 } else { 1219 } else {
1219 j = jiffies; 1220 j = jiffies;
1220 gpa = ACCESS_ONCE(rsp->gp_activity); 1221 gpa = READ_ONCE(rsp->gp_activity);
1221 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", 1222 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1222 rsp->name, j - gpa, j, gpa, 1223 rsp->name, j - gpa, j, gpa,
1223 jiffies_till_next_fqs, 1224 jiffies_till_next_fqs,
@@ -1262,9 +1263,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
1262 rcu_dump_cpu_stacks(rsp); 1263 rcu_dump_cpu_stacks(rsp);
1263 1264
1264 raw_spin_lock_irqsave(&rnp->lock, flags); 1265 raw_spin_lock_irqsave(&rnp->lock, flags);
1265 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall))) 1266 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1266 ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 1267 WRITE_ONCE(rsp->jiffies_stall,
1267 3 * rcu_jiffies_till_stall_check() + 3; 1268 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1268 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1269 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1269 1270
1270 /* 1271 /*
@@ -1307,20 +1308,20 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1307 * Given this check, comparisons of jiffies, rsp->jiffies_stall, 1308 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1308 * and rsp->gp_start suffice to forestall false positives. 1309 * and rsp->gp_start suffice to forestall false positives.
1309 */ 1310 */
1310 gpnum = ACCESS_ONCE(rsp->gpnum); 1311 gpnum = READ_ONCE(rsp->gpnum);
1311 smp_rmb(); /* Pick up ->gpnum first... */ 1312 smp_rmb(); /* Pick up ->gpnum first... */
1312 js = ACCESS_ONCE(rsp->jiffies_stall); 1313 js = READ_ONCE(rsp->jiffies_stall);
1313 smp_rmb(); /* ...then ->jiffies_stall before the rest... */ 1314 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1314 gps = ACCESS_ONCE(rsp->gp_start); 1315 gps = READ_ONCE(rsp->gp_start);
1315 smp_rmb(); /* ...and finally ->gp_start before ->completed. */ 1316 smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1316 completed = ACCESS_ONCE(rsp->completed); 1317 completed = READ_ONCE(rsp->completed);
1317 if (ULONG_CMP_GE(completed, gpnum) || 1318 if (ULONG_CMP_GE(completed, gpnum) ||
1318 ULONG_CMP_LT(j, js) || 1319 ULONG_CMP_LT(j, js) ||
1319 ULONG_CMP_GE(gps, js)) 1320 ULONG_CMP_GE(gps, js))
1320 return; /* No stall or GP completed since entering function. */ 1321 return; /* No stall or GP completed since entering function. */
1321 rnp = rdp->mynode; 1322 rnp = rdp->mynode;
1322 if (rcu_gp_in_progress(rsp) && 1323 if (rcu_gp_in_progress(rsp) &&
1323 (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) { 1324 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
1324 1325
1325 /* We haven't checked in, so go dump stack. */ 1326 /* We haven't checked in, so go dump stack. */
1326 print_cpu_stall(rsp); 1327 print_cpu_stall(rsp);
@@ -1347,7 +1348,7 @@ void rcu_cpu_stall_reset(void)
1347 struct rcu_state *rsp; 1348 struct rcu_state *rsp;
1348 1349
1349 for_each_rcu_flavor(rsp) 1350 for_each_rcu_flavor(rsp)
1350 ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2; 1351 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1351} 1352}
1352 1353
1353/* 1354/*
@@ -1457,7 +1458,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1457 * doing some extra useless work. 1458 * doing some extra useless work.
1458 */ 1459 */
1459 if (rnp->gpnum != rnp->completed || 1460 if (rnp->gpnum != rnp->completed ||
1460 ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) { 1461 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
1461 rnp->need_future_gp[c & 0x1]++; 1462 rnp->need_future_gp[c & 0x1]++;
1462 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); 1463 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1463 goto out; 1464 goto out;
@@ -1542,7 +1543,7 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1542static void rcu_gp_kthread_wake(struct rcu_state *rsp) 1543static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1543{ 1544{
1544 if (current == rsp->gp_kthread || 1545 if (current == rsp->gp_kthread ||
1545 !ACCESS_ONCE(rsp->gp_flags) || 1546 !READ_ONCE(rsp->gp_flags) ||
1546 !rsp->gp_kthread) 1547 !rsp->gp_kthread)
1547 return; 1548 return;
1548 wake_up(&rsp->gp_wq); 1549 wake_up(&rsp->gp_wq);
@@ -1677,7 +1678,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1677 1678
1678 /* Handle the ends of any preceding grace periods first. */ 1679 /* Handle the ends of any preceding grace periods first. */
1679 if (rdp->completed == rnp->completed && 1680 if (rdp->completed == rnp->completed &&
1680 !unlikely(ACCESS_ONCE(rdp->gpwrap))) { 1681 !unlikely(READ_ONCE(rdp->gpwrap))) {
1681 1682
1682 /* No grace period end, so just accelerate recent callbacks. */ 1683 /* No grace period end, so just accelerate recent callbacks. */
1683 ret = rcu_accelerate_cbs(rsp, rnp, rdp); 1684 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1692,7 +1693,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1692 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); 1693 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1693 } 1694 }
1694 1695
1695 if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) { 1696 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
1696 /* 1697 /*
1697 * If the current grace period is waiting for this CPU, 1698 * If the current grace period is waiting for this CPU,
1698 * set up to detect a quiescent state, otherwise don't 1699 * set up to detect a quiescent state, otherwise don't
@@ -1704,7 +1705,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1704 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 1705 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1705 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1706 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1706 zero_cpu_stall_ticks(rdp); 1707 zero_cpu_stall_ticks(rdp);
1707 ACCESS_ONCE(rdp->gpwrap) = false; 1708 WRITE_ONCE(rdp->gpwrap, false);
1708 } 1709 }
1709 return ret; 1710 return ret;
1710} 1711}
@@ -1717,9 +1718,9 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1717 1718
1718 local_irq_save(flags); 1719 local_irq_save(flags);
1719 rnp = rdp->mynode; 1720 rnp = rdp->mynode;
1720 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && 1721 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1721 rdp->completed == ACCESS_ONCE(rnp->completed) && 1722 rdp->completed == READ_ONCE(rnp->completed) &&
1722 !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1723 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1723 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ 1724 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1724 local_irq_restore(flags); 1725 local_irq_restore(flags);
1725 return; 1726 return;
@@ -1740,15 +1741,15 @@ static int rcu_gp_init(struct rcu_state *rsp)
1740 struct rcu_data *rdp; 1741 struct rcu_data *rdp;
1741 struct rcu_node *rnp = rcu_get_root(rsp); 1742 struct rcu_node *rnp = rcu_get_root(rsp);
1742 1743
1743 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1744 WRITE_ONCE(rsp->gp_activity, jiffies);
1744 raw_spin_lock_irq(&rnp->lock); 1745 raw_spin_lock_irq(&rnp->lock);
1745 smp_mb__after_unlock_lock(); 1746 smp_mb__after_unlock_lock();
1746 if (!ACCESS_ONCE(rsp->gp_flags)) { 1747 if (!READ_ONCE(rsp->gp_flags)) {
1747 /* Spurious wakeup, tell caller to go back to sleep. */ 1748 /* Spurious wakeup, tell caller to go back to sleep. */
1748 raw_spin_unlock_irq(&rnp->lock); 1749 raw_spin_unlock_irq(&rnp->lock);
1749 return 0; 1750 return 0;
1750 } 1751 }
1751 ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */ 1752 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
1752 1753
1753 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { 1754 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1754 /* 1755 /*
@@ -1834,9 +1835,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
1834 rdp = this_cpu_ptr(rsp->rda); 1835 rdp = this_cpu_ptr(rsp->rda);
1835 rcu_preempt_check_blocked_tasks(rnp); 1836 rcu_preempt_check_blocked_tasks(rnp);
1836 rnp->qsmask = rnp->qsmaskinit; 1837 rnp->qsmask = rnp->qsmaskinit;
1837 ACCESS_ONCE(rnp->gpnum) = rsp->gpnum; 1838 WRITE_ONCE(rnp->gpnum, rsp->gpnum);
1838 if (WARN_ON_ONCE(rnp->completed != rsp->completed)) 1839 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
1839 ACCESS_ONCE(rnp->completed) = rsp->completed; 1840 WRITE_ONCE(rnp->completed, rsp->completed);
1840 if (rnp == rdp->mynode) 1841 if (rnp == rdp->mynode)
1841 (void)__note_gp_changes(rsp, rnp, rdp); 1842 (void)__note_gp_changes(rsp, rnp, rdp);
1842 rcu_preempt_boost_start_gp(rnp); 1843 rcu_preempt_boost_start_gp(rnp);
@@ -1845,7 +1846,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1845 rnp->grphi, rnp->qsmask); 1846 rnp->grphi, rnp->qsmask);
1846 raw_spin_unlock_irq(&rnp->lock); 1847 raw_spin_unlock_irq(&rnp->lock);
1847 cond_resched_rcu_qs(); 1848 cond_resched_rcu_qs();
1848 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1849 WRITE_ONCE(rsp->gp_activity, jiffies);
1849 if (gp_init_delay > 0 && 1850 if (gp_init_delay > 0 &&
1850 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) 1851 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD)))
1851 schedule_timeout_uninterruptible(gp_init_delay); 1852 schedule_timeout_uninterruptible(gp_init_delay);
@@ -1864,7 +1865,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1864 unsigned long maxj; 1865 unsigned long maxj;
1865 struct rcu_node *rnp = rcu_get_root(rsp); 1866 struct rcu_node *rnp = rcu_get_root(rsp);
1866 1867
1867 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1868 WRITE_ONCE(rsp->gp_activity, jiffies);
1868 rsp->n_force_qs++; 1869 rsp->n_force_qs++;
1869 if (fqs_state == RCU_SAVE_DYNTICK) { 1870 if (fqs_state == RCU_SAVE_DYNTICK) {
1870 /* Collect dyntick-idle snapshots. */ 1871 /* Collect dyntick-idle snapshots. */
@@ -1882,11 +1883,11 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1882 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); 1883 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1883 } 1884 }
1884 /* Clear flag to prevent immediate re-entry. */ 1885 /* Clear flag to prevent immediate re-entry. */
1885 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 1886 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1886 raw_spin_lock_irq(&rnp->lock); 1887 raw_spin_lock_irq(&rnp->lock);
1887 smp_mb__after_unlock_lock(); 1888 smp_mb__after_unlock_lock();
1888 ACCESS_ONCE(rsp->gp_flags) = 1889 WRITE_ONCE(rsp->gp_flags,
1889 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS; 1890 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
1890 raw_spin_unlock_irq(&rnp->lock); 1891 raw_spin_unlock_irq(&rnp->lock);
1891 } 1892 }
1892 return fqs_state; 1893 return fqs_state;
@@ -1903,7 +1904,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1903 struct rcu_data *rdp; 1904 struct rcu_data *rdp;
1904 struct rcu_node *rnp = rcu_get_root(rsp); 1905 struct rcu_node *rnp = rcu_get_root(rsp);
1905 1906
1906 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1907 WRITE_ONCE(rsp->gp_activity, jiffies);
1907 raw_spin_lock_irq(&rnp->lock); 1908 raw_spin_lock_irq(&rnp->lock);
1908 smp_mb__after_unlock_lock(); 1909 smp_mb__after_unlock_lock();
1909 gp_duration = jiffies - rsp->gp_start; 1910 gp_duration = jiffies - rsp->gp_start;
@@ -1934,7 +1935,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1934 smp_mb__after_unlock_lock(); 1935 smp_mb__after_unlock_lock();
1935 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 1936 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
1936 WARN_ON_ONCE(rnp->qsmask); 1937 WARN_ON_ONCE(rnp->qsmask);
1937 ACCESS_ONCE(rnp->completed) = rsp->gpnum; 1938 WRITE_ONCE(rnp->completed, rsp->gpnum);
1938 rdp = this_cpu_ptr(rsp->rda); 1939 rdp = this_cpu_ptr(rsp->rda);
1939 if (rnp == rdp->mynode) 1940 if (rnp == rdp->mynode)
1940 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; 1941 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -1942,7 +1943,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1942 nocb += rcu_future_gp_cleanup(rsp, rnp); 1943 nocb += rcu_future_gp_cleanup(rsp, rnp);
1943 raw_spin_unlock_irq(&rnp->lock); 1944 raw_spin_unlock_irq(&rnp->lock);
1944 cond_resched_rcu_qs(); 1945 cond_resched_rcu_qs();
1945 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1946 WRITE_ONCE(rsp->gp_activity, jiffies);
1946 } 1947 }
1947 rnp = rcu_get_root(rsp); 1948 rnp = rcu_get_root(rsp);
1948 raw_spin_lock_irq(&rnp->lock); 1949 raw_spin_lock_irq(&rnp->lock);
@@ -1950,16 +1951,16 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1950 rcu_nocb_gp_set(rnp, nocb); 1951 rcu_nocb_gp_set(rnp, nocb);
1951 1952
1952 /* Declare grace period done. */ 1953 /* Declare grace period done. */
1953 ACCESS_ONCE(rsp->completed) = rsp->gpnum; 1954 WRITE_ONCE(rsp->completed, rsp->gpnum);
1954 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); 1955 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1955 rsp->fqs_state = RCU_GP_IDLE; 1956 rsp->fqs_state = RCU_GP_IDLE;
1956 rdp = this_cpu_ptr(rsp->rda); 1957 rdp = this_cpu_ptr(rsp->rda);
1957 /* Advance CBs to reduce false positives below. */ 1958 /* Advance CBs to reduce false positives below. */
1958 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; 1959 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
1959 if (needgp || cpu_needs_another_gp(rsp, rdp)) { 1960 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
1960 ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; 1961 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
1961 trace_rcu_grace_period(rsp->name, 1962 trace_rcu_grace_period(rsp->name,
1962 ACCESS_ONCE(rsp->gpnum), 1963 READ_ONCE(rsp->gpnum),
1963 TPS("newreq")); 1964 TPS("newreq"));
1964 } 1965 }
1965 raw_spin_unlock_irq(&rnp->lock); 1966 raw_spin_unlock_irq(&rnp->lock);
@@ -1983,20 +1984,20 @@ static int __noreturn rcu_gp_kthread(void *arg)
1983 /* Handle grace-period start. */ 1984 /* Handle grace-period start. */
1984 for (;;) { 1985 for (;;) {
1985 trace_rcu_grace_period(rsp->name, 1986 trace_rcu_grace_period(rsp->name,
1986 ACCESS_ONCE(rsp->gpnum), 1987 READ_ONCE(rsp->gpnum),
1987 TPS("reqwait")); 1988 TPS("reqwait"));
1988 rsp->gp_state = RCU_GP_WAIT_GPS; 1989 rsp->gp_state = RCU_GP_WAIT_GPS;
1989 wait_event_interruptible(rsp->gp_wq, 1990 wait_event_interruptible(rsp->gp_wq,
1990 ACCESS_ONCE(rsp->gp_flags) & 1991 READ_ONCE(rsp->gp_flags) &
1991 RCU_GP_FLAG_INIT); 1992 RCU_GP_FLAG_INIT);
1992 /* Locking provides needed memory barrier. */ 1993 /* Locking provides needed memory barrier. */
1993 if (rcu_gp_init(rsp)) 1994 if (rcu_gp_init(rsp))
1994 break; 1995 break;
1995 cond_resched_rcu_qs(); 1996 cond_resched_rcu_qs();
1996 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1997 WRITE_ONCE(rsp->gp_activity, jiffies);
1997 WARN_ON(signal_pending(current)); 1998 WARN_ON(signal_pending(current));
1998 trace_rcu_grace_period(rsp->name, 1999 trace_rcu_grace_period(rsp->name,
1999 ACCESS_ONCE(rsp->gpnum), 2000 READ_ONCE(rsp->gpnum),
2000 TPS("reqwaitsig")); 2001 TPS("reqwaitsig"));
2001 } 2002 }
2002 2003
@@ -2012,39 +2013,39 @@ static int __noreturn rcu_gp_kthread(void *arg)
2012 if (!ret) 2013 if (!ret)
2013 rsp->jiffies_force_qs = jiffies + j; 2014 rsp->jiffies_force_qs = jiffies + j;
2014 trace_rcu_grace_period(rsp->name, 2015 trace_rcu_grace_period(rsp->name,
2015 ACCESS_ONCE(rsp->gpnum), 2016 READ_ONCE(rsp->gpnum),
2016 TPS("fqswait")); 2017 TPS("fqswait"));
2017 rsp->gp_state = RCU_GP_WAIT_FQS; 2018 rsp->gp_state = RCU_GP_WAIT_FQS;
2018 ret = wait_event_interruptible_timeout(rsp->gp_wq, 2019 ret = wait_event_interruptible_timeout(rsp->gp_wq,
2019 ((gf = ACCESS_ONCE(rsp->gp_flags)) & 2020 ((gf = READ_ONCE(rsp->gp_flags)) &
2020 RCU_GP_FLAG_FQS) || 2021 RCU_GP_FLAG_FQS) ||
2021 (!ACCESS_ONCE(rnp->qsmask) && 2022 (!READ_ONCE(rnp->qsmask) &&
2022 !rcu_preempt_blocked_readers_cgp(rnp)), 2023 !rcu_preempt_blocked_readers_cgp(rnp)),
2023 j); 2024 j);
2024 /* Locking provides needed memory barriers. */ 2025 /* Locking provides needed memory barriers. */
2025 /* If grace period done, leave loop. */ 2026 /* If grace period done, leave loop. */
2026 if (!ACCESS_ONCE(rnp->qsmask) && 2027 if (!READ_ONCE(rnp->qsmask) &&
2027 !rcu_preempt_blocked_readers_cgp(rnp)) 2028 !rcu_preempt_blocked_readers_cgp(rnp))
2028 break; 2029 break;
2029 /* If time for quiescent-state forcing, do it. */ 2030 /* If time for quiescent-state forcing, do it. */
2030 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || 2031 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2031 (gf & RCU_GP_FLAG_FQS)) { 2032 (gf & RCU_GP_FLAG_FQS)) {
2032 trace_rcu_grace_period(rsp->name, 2033 trace_rcu_grace_period(rsp->name,
2033 ACCESS_ONCE(rsp->gpnum), 2034 READ_ONCE(rsp->gpnum),
2034 TPS("fqsstart")); 2035 TPS("fqsstart"));
2035 fqs_state = rcu_gp_fqs(rsp, fqs_state); 2036 fqs_state = rcu_gp_fqs(rsp, fqs_state);
2036 trace_rcu_grace_period(rsp->name, 2037 trace_rcu_grace_period(rsp->name,
2037 ACCESS_ONCE(rsp->gpnum), 2038 READ_ONCE(rsp->gpnum),
2038 TPS("fqsend")); 2039 TPS("fqsend"));
2039 cond_resched_rcu_qs(); 2040 cond_resched_rcu_qs();
2040 ACCESS_ONCE(rsp->gp_activity) = jiffies; 2041 WRITE_ONCE(rsp->gp_activity, jiffies);
2041 } else { 2042 } else {
2042 /* Deal with stray signal. */ 2043 /* Deal with stray signal. */
2043 cond_resched_rcu_qs(); 2044 cond_resched_rcu_qs();
2044 ACCESS_ONCE(rsp->gp_activity) = jiffies; 2045 WRITE_ONCE(rsp->gp_activity, jiffies);
2045 WARN_ON(signal_pending(current)); 2046 WARN_ON(signal_pending(current));
2046 trace_rcu_grace_period(rsp->name, 2047 trace_rcu_grace_period(rsp->name,
2047 ACCESS_ONCE(rsp->gpnum), 2048 READ_ONCE(rsp->gpnum),
2048 TPS("fqswaitsig")); 2049 TPS("fqswaitsig"));
2049 } 2050 }
2050 j = jiffies_till_next_fqs; 2051 j = jiffies_till_next_fqs;
@@ -2086,8 +2087,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2086 */ 2087 */
2087 return false; 2088 return false;
2088 } 2089 }
2089 ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; 2090 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2090 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), 2091 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
2091 TPS("newreq")); 2092 TPS("newreq"));
2092 2093
2093 /* 2094 /*
@@ -2359,7 +2360,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2359 rsp->qlen += rdp->qlen; 2360 rsp->qlen += rdp->qlen;
2360 rdp->n_cbs_orphaned += rdp->qlen; 2361 rdp->n_cbs_orphaned += rdp->qlen;
2361 rdp->qlen_lazy = 0; 2362 rdp->qlen_lazy = 0;
2362 ACCESS_ONCE(rdp->qlen) = 0; 2363 WRITE_ONCE(rdp->qlen, 0);
2363 } 2364 }
2364 2365
2365 /* 2366 /*
@@ -2580,7 +2581,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2580 /* If no callbacks are ready, just return. */ 2581 /* If no callbacks are ready, just return. */
2581 if (!cpu_has_callbacks_ready_to_invoke(rdp)) { 2582 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
2582 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); 2583 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
2583 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), 2584 trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
2584 need_resched(), is_idle_task(current), 2585 need_resched(), is_idle_task(current),
2585 rcu_is_callbacks_kthread()); 2586 rcu_is_callbacks_kthread());
2586 return; 2587 return;
@@ -2636,7 +2637,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2636 } 2637 }
2637 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2638 smp_mb(); /* List handling before counting for rcu_barrier(). */
2638 rdp->qlen_lazy -= count_lazy; 2639 rdp->qlen_lazy -= count_lazy;
2639 ACCESS_ONCE(rdp->qlen) = rdp->qlen - count; 2640 WRITE_ONCE(rdp->qlen, rdp->qlen - count);
2640 rdp->n_cbs_invoked += count; 2641 rdp->n_cbs_invoked += count;
2641 2642
2642 /* Reinstate batch limit if we have worked down the excess. */ 2643 /* Reinstate batch limit if we have worked down the excess. */
@@ -2793,7 +2794,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2793 /* Funnel through hierarchy to reduce memory contention. */ 2794 /* Funnel through hierarchy to reduce memory contention. */
2794 rnp = __this_cpu_read(rsp->rda->mynode); 2795 rnp = __this_cpu_read(rsp->rda->mynode);
2795 for (; rnp != NULL; rnp = rnp->parent) { 2796 for (; rnp != NULL; rnp = rnp->parent) {
2796 ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || 2797 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2797 !raw_spin_trylock(&rnp->fqslock); 2798 !raw_spin_trylock(&rnp->fqslock);
2798 if (rnp_old != NULL) 2799 if (rnp_old != NULL)
2799 raw_spin_unlock(&rnp_old->fqslock); 2800 raw_spin_unlock(&rnp_old->fqslock);
@@ -2809,13 +2810,12 @@ static void force_quiescent_state(struct rcu_state *rsp)
2809 raw_spin_lock_irqsave(&rnp_old->lock, flags); 2810 raw_spin_lock_irqsave(&rnp_old->lock, flags);
2810 smp_mb__after_unlock_lock(); 2811 smp_mb__after_unlock_lock();
2811 raw_spin_unlock(&rnp_old->fqslock); 2812 raw_spin_unlock(&rnp_old->fqslock);
2812 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2813 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2813 rsp->n_force_qs_lh++; 2814 rsp->n_force_qs_lh++;
2814 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2815 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2815 return; /* Someone beat us to it. */ 2816 return; /* Someone beat us to it. */
2816 } 2817 }
2817 ACCESS_ONCE(rsp->gp_flags) = 2818 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2818 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
2819 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2819 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2820 rcu_gp_kthread_wake(rsp); 2820 rcu_gp_kthread_wake(rsp);
2821} 2821}
@@ -2881,7 +2881,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
2881 */ 2881 */
2882static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 2882static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2883{ 2883{
2884 if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) 2884 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
2885 return; 2885 return;
2886 if (likely(!rsp->boost)) { 2886 if (likely(!rsp->boost)) {
2887 rcu_do_batch(rsp, rdp); 2887 rcu_do_batch(rsp, rdp);
@@ -2972,7 +2972,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2972 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */ 2972 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
2973 if (debug_rcu_head_queue(head)) { 2973 if (debug_rcu_head_queue(head)) {
2974 /* Probable double call_rcu(), so leak the callback. */ 2974 /* Probable double call_rcu(), so leak the callback. */
2975 ACCESS_ONCE(head->func) = rcu_leak_callback; 2975 WRITE_ONCE(head->func, rcu_leak_callback);
2976 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n"); 2976 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
2977 return; 2977 return;
2978 } 2978 }
@@ -3011,7 +3011,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
3011 if (!likely(rdp->nxtlist)) 3011 if (!likely(rdp->nxtlist))
3012 init_default_callback_list(rdp); 3012 init_default_callback_list(rdp);
3013 } 3013 }
3014 ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1; 3014 WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
3015 if (lazy) 3015 if (lazy)
3016 rdp->qlen_lazy++; 3016 rdp->qlen_lazy++;
3017 else 3017 else
@@ -3450,14 +3450,14 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3450 } 3450 }
3451 3451
3452 /* Has another RCU grace period completed? */ 3452 /* Has another RCU grace period completed? */
3453 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ 3453 if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
3454 rdp->n_rp_gp_completed++; 3454 rdp->n_rp_gp_completed++;
3455 return 1; 3455 return 1;
3456 } 3456 }
3457 3457
3458 /* Has a new RCU grace period started? */ 3458 /* Has a new RCU grace period started? */
3459 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum || 3459 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3460 unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */ 3460 unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
3461 rdp->n_rp_gp_started++; 3461 rdp->n_rp_gp_started++;
3462 return 1; 3462 return 1;
3463 } 3463 }
@@ -3564,7 +3564,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3564{ 3564{
3565 int cpu; 3565 int cpu;
3566 struct rcu_data *rdp; 3566 struct rcu_data *rdp;
3567 unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); 3567 unsigned long snap = READ_ONCE(rsp->n_barrier_done);
3568 unsigned long snap_done; 3568 unsigned long snap_done;
3569 3569
3570 _rcu_barrier_trace(rsp, "Begin", -1, snap); 3570 _rcu_barrier_trace(rsp, "Begin", -1, snap);
@@ -3606,10 +3606,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
3606 3606
3607 /* 3607 /*
3608 * Increment ->n_barrier_done to avoid duplicate work. Use 3608 * Increment ->n_barrier_done to avoid duplicate work. Use
3609 * ACCESS_ONCE() to prevent the compiler from speculating 3609 * WRITE_ONCE() to prevent the compiler from speculating
3610 * the increment to precede the early-exit check. 3610 * the increment to precede the early-exit check.
3611 */ 3611 */
3612 ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1; 3612 WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
3613 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); 3613 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3614 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); 3614 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3615 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ 3615 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3645,7 +3645,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3645 __call_rcu(&rdp->barrier_head, 3645 __call_rcu(&rdp->barrier_head,
3646 rcu_barrier_callback, rsp, cpu, 0); 3646 rcu_barrier_callback, rsp, cpu, 0);
3647 } 3647 }
3648 } else if (ACCESS_ONCE(rdp->qlen)) { 3648 } else if (READ_ONCE(rdp->qlen)) {
3649 _rcu_barrier_trace(rsp, "OnlineQ", cpu, 3649 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
3650 rsp->n_barrier_done); 3650 rsp->n_barrier_done);
3651 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); 3651 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
@@ -3665,7 +3665,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3665 3665
3666 /* Increment ->n_barrier_done to prevent duplicate work. */ 3666 /* Increment ->n_barrier_done to prevent duplicate work. */
3667 smp_mb(); /* Keep increment after above mechanism. */ 3667 smp_mb(); /* Keep increment after above mechanism. */
3668 ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1; 3668 WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
3669 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); 3669 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3670 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); 3670 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3671 smp_mb(); /* Keep increment before caller's subsequent code. */ 3671 smp_mb(); /* Keep increment before caller's subsequent code. */