diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-03-03 17:57:58 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-05-27 15:56:15 -0400 |
commit | 7d0ae8086b828311250c6afdf800b568ac9bd693 (patch) | |
tree | a1bb0c6a5e66f1e48c4667fd247a41c4b9253fd2 /kernel/rcu | |
parent | 030bbdbf4c833bc69f502eae58498bc5572db736 (diff) |
rcu: Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE()
This commit moves from the old ACCESS_ONCE() API to the new READ_ONCE()
and WRITE_ONCE() APIs.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
[ paulmck: Updated to include kernel/torture.c as suggested by Jason Low. ]
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/rcutorture.c | 2 | ||||
-rw-r--r-- | kernel/rcu/srcu.c | 10 | ||||
-rw-r--r-- | kernel/rcu/tiny_plugin.h | 12 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 184 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 93 | ||||
-rw-r--r-- | kernel/rcu/tree_trace.c | 6 | ||||
-rw-r--r-- | kernel/rcu/update.c | 30 |
7 files changed, 169 insertions, 168 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 8dbe27611ec3..a67ef6ff86b0 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c | |||
@@ -1413,7 +1413,7 @@ static int rcu_torture_barrier_cbs(void *arg) | |||
1413 | do { | 1413 | do { |
1414 | wait_event(barrier_cbs_wq[myid], | 1414 | wait_event(barrier_cbs_wq[myid], |
1415 | (newphase = | 1415 | (newphase = |
1416 | ACCESS_ONCE(barrier_phase)) != lastphase || | 1416 | READ_ONCE(barrier_phase)) != lastphase || |
1417 | torture_must_stop()); | 1417 | torture_must_stop()); |
1418 | lastphase = newphase; | 1418 | lastphase = newphase; |
1419 | smp_mb(); /* ensure barrier_phase load before ->call(). */ | 1419 | smp_mb(); /* ensure barrier_phase load before ->call(). */ |
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index cad76e76b4e7..fb33d35ee0b7 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c | |||
@@ -151,7 +151,7 @@ static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx) | |||
151 | unsigned long t; | 151 | unsigned long t; |
152 | 152 | ||
153 | for_each_possible_cpu(cpu) { | 153 | for_each_possible_cpu(cpu) { |
154 | t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); | 154 | t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); |
155 | sum += t; | 155 | sum += t; |
156 | } | 156 | } |
157 | return sum; | 157 | return sum; |
@@ -168,7 +168,7 @@ static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx) | |||
168 | unsigned long t; | 168 | unsigned long t; |
169 | 169 | ||
170 | for_each_possible_cpu(cpu) { | 170 | for_each_possible_cpu(cpu) { |
171 | t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); | 171 | t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); |
172 | sum += t; | 172 | sum += t; |
173 | } | 173 | } |
174 | return sum; | 174 | return sum; |
@@ -265,8 +265,8 @@ static int srcu_readers_active(struct srcu_struct *sp) | |||
265 | unsigned long sum = 0; | 265 | unsigned long sum = 0; |
266 | 266 | ||
267 | for_each_possible_cpu(cpu) { | 267 | for_each_possible_cpu(cpu) { |
268 | sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); | 268 | sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); |
269 | sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); | 269 | sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); |
270 | } | 270 | } |
271 | return sum; | 271 | return sum; |
272 | } | 272 | } |
@@ -296,7 +296,7 @@ int __srcu_read_lock(struct srcu_struct *sp) | |||
296 | { | 296 | { |
297 | int idx; | 297 | int idx; |
298 | 298 | ||
299 | idx = ACCESS_ONCE(sp->completed) & 0x1; | 299 | idx = READ_ONCE(sp->completed) & 0x1; |
300 | preempt_disable(); | 300 | preempt_disable(); |
301 | __this_cpu_inc(sp->per_cpu_ref->c[idx]); | 301 | __this_cpu_inc(sp->per_cpu_ref->c[idx]); |
302 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ | 302 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h index f94e209a10d6..e492a5253e0f 100644 --- a/kernel/rcu/tiny_plugin.h +++ b/kernel/rcu/tiny_plugin.h | |||
@@ -144,16 +144,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp) | |||
144 | return; | 144 | return; |
145 | rcp->ticks_this_gp++; | 145 | rcp->ticks_this_gp++; |
146 | j = jiffies; | 146 | j = jiffies; |
147 | js = ACCESS_ONCE(rcp->jiffies_stall); | 147 | js = READ_ONCE(rcp->jiffies_stall); |
148 | if (rcp->rcucblist && ULONG_CMP_GE(j, js)) { | 148 | if (rcp->rcucblist && ULONG_CMP_GE(j, js)) { |
149 | pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", | 149 | pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", |
150 | rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE, | 150 | rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE, |
151 | jiffies - rcp->gp_start, rcp->qlen); | 151 | jiffies - rcp->gp_start, rcp->qlen); |
152 | dump_stack(); | 152 | dump_stack(); |
153 | ACCESS_ONCE(rcp->jiffies_stall) = jiffies + | 153 | WRITE_ONCE(rcp->jiffies_stall, |
154 | 3 * rcu_jiffies_till_stall_check() + 3; | 154 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); |
155 | } else if (ULONG_CMP_GE(j, js)) { | 155 | } else if (ULONG_CMP_GE(j, js)) { |
156 | ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); | 156 | WRITE_ONCE(rcp->jiffies_stall, |
157 | jiffies + rcu_jiffies_till_stall_check()); | ||
157 | } | 158 | } |
158 | } | 159 | } |
159 | 160 | ||
@@ -161,7 +162,8 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) | |||
161 | { | 162 | { |
162 | rcp->ticks_this_gp = 0; | 163 | rcp->ticks_this_gp = 0; |
163 | rcp->gp_start = jiffies; | 164 | rcp->gp_start = jiffies; |
164 | ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); | 165 | WRITE_ONCE(rcp->jiffies_stall, |
166 | jiffies + rcu_jiffies_till_stall_check()); | ||
165 | } | 167 | } |
166 | 168 | ||
167 | static void check_cpu_stalls(void) | 169 | static void check_cpu_stalls(void) |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8cf7304b2867..0628df155970 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -191,17 +191,17 @@ unsigned long rcutorture_vernum; | |||
191 | */ | 191 | */ |
192 | unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) | 192 | unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) |
193 | { | 193 | { |
194 | return ACCESS_ONCE(rnp->qsmaskinitnext); | 194 | return READ_ONCE(rnp->qsmaskinitnext); |
195 | } | 195 | } |
196 | 196 | ||
197 | /* | 197 | /* |
198 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 198 | * Return true if an RCU grace period is in progress. The READ_ONCE()s |
199 | * permit this function to be invoked without holding the root rcu_node | 199 | * permit this function to be invoked without holding the root rcu_node |
200 | * structure's ->lock, but of course results can be subject to change. | 200 | * structure's ->lock, but of course results can be subject to change. |
201 | */ | 201 | */ |
202 | static int rcu_gp_in_progress(struct rcu_state *rsp) | 202 | static int rcu_gp_in_progress(struct rcu_state *rsp) |
203 | { | 203 | { |
204 | return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); | 204 | return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); |
205 | } | 205 | } |
206 | 206 | ||
207 | /* | 207 | /* |
@@ -278,8 +278,8 @@ static void rcu_momentary_dyntick_idle(void) | |||
278 | if (!(resched_mask & rsp->flavor_mask)) | 278 | if (!(resched_mask & rsp->flavor_mask)) |
279 | continue; | 279 | continue; |
280 | smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */ | 280 | smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */ |
281 | if (ACCESS_ONCE(rdp->mynode->completed) != | 281 | if (READ_ONCE(rdp->mynode->completed) != |
282 | ACCESS_ONCE(rdp->cond_resched_completed)) | 282 | READ_ONCE(rdp->cond_resched_completed)) |
283 | continue; | 283 | continue; |
284 | 284 | ||
285 | /* | 285 | /* |
@@ -491,9 +491,9 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, | |||
491 | break; | 491 | break; |
492 | } | 492 | } |
493 | if (rsp != NULL) { | 493 | if (rsp != NULL) { |
494 | *flags = ACCESS_ONCE(rsp->gp_flags); | 494 | *flags = READ_ONCE(rsp->gp_flags); |
495 | *gpnum = ACCESS_ONCE(rsp->gpnum); | 495 | *gpnum = READ_ONCE(rsp->gpnum); |
496 | *completed = ACCESS_ONCE(rsp->completed); | 496 | *completed = READ_ONCE(rsp->completed); |
497 | return; | 497 | return; |
498 | } | 498 | } |
499 | *flags = 0; | 499 | *flags = 0; |
@@ -539,10 +539,10 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
539 | static int rcu_future_needs_gp(struct rcu_state *rsp) | 539 | static int rcu_future_needs_gp(struct rcu_state *rsp) |
540 | { | 540 | { |
541 | struct rcu_node *rnp = rcu_get_root(rsp); | 541 | struct rcu_node *rnp = rcu_get_root(rsp); |
542 | int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1; | 542 | int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; |
543 | int *fp = &rnp->need_future_gp[idx]; | 543 | int *fp = &rnp->need_future_gp[idx]; |
544 | 544 | ||
545 | return ACCESS_ONCE(*fp); | 545 | return READ_ONCE(*fp); |
546 | } | 546 | } |
547 | 547 | ||
548 | /* | 548 | /* |
@@ -565,7 +565,7 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | |||
565 | return 1; /* Yes, this CPU has newly registered callbacks. */ | 565 | return 1; /* Yes, this CPU has newly registered callbacks. */ |
566 | for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) | 566 | for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) |
567 | if (rdp->nxttail[i - 1] != rdp->nxttail[i] && | 567 | if (rdp->nxttail[i - 1] != rdp->nxttail[i] && |
568 | ULONG_CMP_LT(ACCESS_ONCE(rsp->completed), | 568 | ULONG_CMP_LT(READ_ONCE(rsp->completed), |
569 | rdp->nxtcompleted[i])) | 569 | rdp->nxtcompleted[i])) |
570 | return 1; /* Yes, CBs for future grace period. */ | 570 | return 1; /* Yes, CBs for future grace period. */ |
571 | return 0; /* No grace period needed. */ | 571 | return 0; /* No grace period needed. */ |
@@ -1011,9 +1011,9 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp, | |||
1011 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); | 1011 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); |
1012 | return 1; | 1012 | return 1; |
1013 | } else { | 1013 | } else { |
1014 | if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4, | 1014 | if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, |
1015 | rdp->mynode->gpnum)) | 1015 | rdp->mynode->gpnum)) |
1016 | ACCESS_ONCE(rdp->gpwrap) = true; | 1016 | WRITE_ONCE(rdp->gpwrap, true); |
1017 | return 0; | 1017 | return 0; |
1018 | } | 1018 | } |
1019 | } | 1019 | } |
@@ -1093,12 +1093,12 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, | |||
1093 | if (ULONG_CMP_GE(jiffies, | 1093 | if (ULONG_CMP_GE(jiffies, |
1094 | rdp->rsp->gp_start + jiffies_till_sched_qs) || | 1094 | rdp->rsp->gp_start + jiffies_till_sched_qs) || |
1095 | ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { | 1095 | ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { |
1096 | if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { | 1096 | if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { |
1097 | ACCESS_ONCE(rdp->cond_resched_completed) = | 1097 | WRITE_ONCE(rdp->cond_resched_completed, |
1098 | ACCESS_ONCE(rdp->mynode->completed); | 1098 | READ_ONCE(rdp->mynode->completed)); |
1099 | smp_mb(); /* ->cond_resched_completed before *rcrmp. */ | 1099 | smp_mb(); /* ->cond_resched_completed before *rcrmp. */ |
1100 | ACCESS_ONCE(*rcrmp) = | 1100 | WRITE_ONCE(*rcrmp, |
1101 | ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask; | 1101 | READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask); |
1102 | resched_cpu(rdp->cpu); /* Force CPU into scheduler. */ | 1102 | resched_cpu(rdp->cpu); /* Force CPU into scheduler. */ |
1103 | rdp->rsp->jiffies_resched += 5; /* Enable beating. */ | 1103 | rdp->rsp->jiffies_resched += 5; /* Enable beating. */ |
1104 | } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { | 1104 | } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { |
@@ -1119,9 +1119,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp) | |||
1119 | rsp->gp_start = j; | 1119 | rsp->gp_start = j; |
1120 | smp_wmb(); /* Record start time before stall time. */ | 1120 | smp_wmb(); /* Record start time before stall time. */ |
1121 | j1 = rcu_jiffies_till_stall_check(); | 1121 | j1 = rcu_jiffies_till_stall_check(); |
1122 | ACCESS_ONCE(rsp->jiffies_stall) = j + j1; | 1122 | WRITE_ONCE(rsp->jiffies_stall, j + j1); |
1123 | rsp->jiffies_resched = j + j1 / 2; | 1123 | rsp->jiffies_resched = j + j1 / 2; |
1124 | rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs); | 1124 | rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs); |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | /* | 1127 | /* |
@@ -1133,7 +1133,7 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) | |||
1133 | unsigned long j; | 1133 | unsigned long j; |
1134 | 1134 | ||
1135 | j = jiffies; | 1135 | j = jiffies; |
1136 | gpa = ACCESS_ONCE(rsp->gp_activity); | 1136 | gpa = READ_ONCE(rsp->gp_activity); |
1137 | if (j - gpa > 2 * HZ) | 1137 | if (j - gpa > 2 * HZ) |
1138 | pr_err("%s kthread starved for %ld jiffies!\n", | 1138 | pr_err("%s kthread starved for %ld jiffies!\n", |
1139 | rsp->name, j - gpa); | 1139 | rsp->name, j - gpa); |
@@ -1173,12 +1173,13 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) | |||
1173 | /* Only let one CPU complain about others per time interval. */ | 1173 | /* Only let one CPU complain about others per time interval. */ |
1174 | 1174 | ||
1175 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1175 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1176 | delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); | 1176 | delta = jiffies - READ_ONCE(rsp->jiffies_stall); |
1177 | if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { | 1177 | if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { |
1178 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1178 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1179 | return; | 1179 | return; |
1180 | } | 1180 | } |
1181 | ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; | 1181 | WRITE_ONCE(rsp->jiffies_stall, |
1182 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); | ||
1182 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1183 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1183 | 1184 | ||
1184 | /* | 1185 | /* |
@@ -1212,12 +1213,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) | |||
1212 | if (ndetected) { | 1213 | if (ndetected) { |
1213 | rcu_dump_cpu_stacks(rsp); | 1214 | rcu_dump_cpu_stacks(rsp); |
1214 | } else { | 1215 | } else { |
1215 | if (ACCESS_ONCE(rsp->gpnum) != gpnum || | 1216 | if (READ_ONCE(rsp->gpnum) != gpnum || |
1216 | ACCESS_ONCE(rsp->completed) == gpnum) { | 1217 | READ_ONCE(rsp->completed) == gpnum) { |
1217 | pr_err("INFO: Stall ended before state dump start\n"); | 1218 | pr_err("INFO: Stall ended before state dump start\n"); |
1218 | } else { | 1219 | } else { |
1219 | j = jiffies; | 1220 | j = jiffies; |
1220 | gpa = ACCESS_ONCE(rsp->gp_activity); | 1221 | gpa = READ_ONCE(rsp->gp_activity); |
1221 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", | 1222 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", |
1222 | rsp->name, j - gpa, j, gpa, | 1223 | rsp->name, j - gpa, j, gpa, |
1223 | jiffies_till_next_fqs, | 1224 | jiffies_till_next_fqs, |
@@ -1262,9 +1263,9 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
1262 | rcu_dump_cpu_stacks(rsp); | 1263 | rcu_dump_cpu_stacks(rsp); |
1263 | 1264 | ||
1264 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1265 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1265 | if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall))) | 1266 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) |
1266 | ACCESS_ONCE(rsp->jiffies_stall) = jiffies + | 1267 | WRITE_ONCE(rsp->jiffies_stall, |
1267 | 3 * rcu_jiffies_till_stall_check() + 3; | 1268 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); |
1268 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1269 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1269 | 1270 | ||
1270 | /* | 1271 | /* |
@@ -1307,20 +1308,20 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1307 | * Given this check, comparisons of jiffies, rsp->jiffies_stall, | 1308 | * Given this check, comparisons of jiffies, rsp->jiffies_stall, |
1308 | * and rsp->gp_start suffice to forestall false positives. | 1309 | * and rsp->gp_start suffice to forestall false positives. |
1309 | */ | 1310 | */ |
1310 | gpnum = ACCESS_ONCE(rsp->gpnum); | 1311 | gpnum = READ_ONCE(rsp->gpnum); |
1311 | smp_rmb(); /* Pick up ->gpnum first... */ | 1312 | smp_rmb(); /* Pick up ->gpnum first... */ |
1312 | js = ACCESS_ONCE(rsp->jiffies_stall); | 1313 | js = READ_ONCE(rsp->jiffies_stall); |
1313 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ | 1314 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ |
1314 | gps = ACCESS_ONCE(rsp->gp_start); | 1315 | gps = READ_ONCE(rsp->gp_start); |
1315 | smp_rmb(); /* ...and finally ->gp_start before ->completed. */ | 1316 | smp_rmb(); /* ...and finally ->gp_start before ->completed. */ |
1316 | completed = ACCESS_ONCE(rsp->completed); | 1317 | completed = READ_ONCE(rsp->completed); |
1317 | if (ULONG_CMP_GE(completed, gpnum) || | 1318 | if (ULONG_CMP_GE(completed, gpnum) || |
1318 | ULONG_CMP_LT(j, js) || | 1319 | ULONG_CMP_LT(j, js) || |
1319 | ULONG_CMP_GE(gps, js)) | 1320 | ULONG_CMP_GE(gps, js)) |
1320 | return; /* No stall or GP completed since entering function. */ | 1321 | return; /* No stall or GP completed since entering function. */ |
1321 | rnp = rdp->mynode; | 1322 | rnp = rdp->mynode; |
1322 | if (rcu_gp_in_progress(rsp) && | 1323 | if (rcu_gp_in_progress(rsp) && |
1323 | (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) { | 1324 | (READ_ONCE(rnp->qsmask) & rdp->grpmask)) { |
1324 | 1325 | ||
1325 | /* We haven't checked in, so go dump stack. */ | 1326 | /* We haven't checked in, so go dump stack. */ |
1326 | print_cpu_stall(rsp); | 1327 | print_cpu_stall(rsp); |
@@ -1347,7 +1348,7 @@ void rcu_cpu_stall_reset(void) | |||
1347 | struct rcu_state *rsp; | 1348 | struct rcu_state *rsp; |
1348 | 1349 | ||
1349 | for_each_rcu_flavor(rsp) | 1350 | for_each_rcu_flavor(rsp) |
1350 | ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2; | 1351 | WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2); |
1351 | } | 1352 | } |
1352 | 1353 | ||
1353 | /* | 1354 | /* |
@@ -1457,7 +1458,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1457 | * doing some extra useless work. | 1458 | * doing some extra useless work. |
1458 | */ | 1459 | */ |
1459 | if (rnp->gpnum != rnp->completed || | 1460 | if (rnp->gpnum != rnp->completed || |
1460 | ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) { | 1461 | READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) { |
1461 | rnp->need_future_gp[c & 0x1]++; | 1462 | rnp->need_future_gp[c & 0x1]++; |
1462 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); | 1463 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); |
1463 | goto out; | 1464 | goto out; |
@@ -1542,7 +1543,7 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) | |||
1542 | static void rcu_gp_kthread_wake(struct rcu_state *rsp) | 1543 | static void rcu_gp_kthread_wake(struct rcu_state *rsp) |
1543 | { | 1544 | { |
1544 | if (current == rsp->gp_kthread || | 1545 | if (current == rsp->gp_kthread || |
1545 | !ACCESS_ONCE(rsp->gp_flags) || | 1546 | !READ_ONCE(rsp->gp_flags) || |
1546 | !rsp->gp_kthread) | 1547 | !rsp->gp_kthread) |
1547 | return; | 1548 | return; |
1548 | wake_up(&rsp->gp_wq); | 1549 | wake_up(&rsp->gp_wq); |
@@ -1677,7 +1678,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1677 | 1678 | ||
1678 | /* Handle the ends of any preceding grace periods first. */ | 1679 | /* Handle the ends of any preceding grace periods first. */ |
1679 | if (rdp->completed == rnp->completed && | 1680 | if (rdp->completed == rnp->completed && |
1680 | !unlikely(ACCESS_ONCE(rdp->gpwrap))) { | 1681 | !unlikely(READ_ONCE(rdp->gpwrap))) { |
1681 | 1682 | ||
1682 | /* No grace period end, so just accelerate recent callbacks. */ | 1683 | /* No grace period end, so just accelerate recent callbacks. */ |
1683 | ret = rcu_accelerate_cbs(rsp, rnp, rdp); | 1684 | ret = rcu_accelerate_cbs(rsp, rnp, rdp); |
@@ -1692,7 +1693,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1692 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); | 1693 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); |
1693 | } | 1694 | } |
1694 | 1695 | ||
1695 | if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) { | 1696 | if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) { |
1696 | /* | 1697 | /* |
1697 | * If the current grace period is waiting for this CPU, | 1698 | * If the current grace period is waiting for this CPU, |
1698 | * set up to detect a quiescent state, otherwise don't | 1699 | * set up to detect a quiescent state, otherwise don't |
@@ -1704,7 +1705,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1704 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); | 1705 | rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); |
1705 | rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); | 1706 | rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); |
1706 | zero_cpu_stall_ticks(rdp); | 1707 | zero_cpu_stall_ticks(rdp); |
1707 | ACCESS_ONCE(rdp->gpwrap) = false; | 1708 | WRITE_ONCE(rdp->gpwrap, false); |
1708 | } | 1709 | } |
1709 | return ret; | 1710 | return ret; |
1710 | } | 1711 | } |
@@ -1717,9 +1718,9 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1717 | 1718 | ||
1718 | local_irq_save(flags); | 1719 | local_irq_save(flags); |
1719 | rnp = rdp->mynode; | 1720 | rnp = rdp->mynode; |
1720 | if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && | 1721 | if ((rdp->gpnum == READ_ONCE(rnp->gpnum) && |
1721 | rdp->completed == ACCESS_ONCE(rnp->completed) && | 1722 | rdp->completed == READ_ONCE(rnp->completed) && |
1722 | !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */ | 1723 | !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ |
1723 | !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ | 1724 | !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ |
1724 | local_irq_restore(flags); | 1725 | local_irq_restore(flags); |
1725 | return; | 1726 | return; |
@@ -1740,15 +1741,15 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1740 | struct rcu_data *rdp; | 1741 | struct rcu_data *rdp; |
1741 | struct rcu_node *rnp = rcu_get_root(rsp); | 1742 | struct rcu_node *rnp = rcu_get_root(rsp); |
1742 | 1743 | ||
1743 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1744 | WRITE_ONCE(rsp->gp_activity, jiffies); |
1744 | raw_spin_lock_irq(&rnp->lock); | 1745 | raw_spin_lock_irq(&rnp->lock); |
1745 | smp_mb__after_unlock_lock(); | 1746 | smp_mb__after_unlock_lock(); |
1746 | if (!ACCESS_ONCE(rsp->gp_flags)) { | 1747 | if (!READ_ONCE(rsp->gp_flags)) { |
1747 | /* Spurious wakeup, tell caller to go back to sleep. */ | 1748 | /* Spurious wakeup, tell caller to go back to sleep. */ |
1748 | raw_spin_unlock_irq(&rnp->lock); | 1749 | raw_spin_unlock_irq(&rnp->lock); |
1749 | return 0; | 1750 | return 0; |
1750 | } | 1751 | } |
1751 | ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */ | 1752 | WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ |
1752 | 1753 | ||
1753 | if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { | 1754 | if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { |
1754 | /* | 1755 | /* |
@@ -1834,9 +1835,9 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1834 | rdp = this_cpu_ptr(rsp->rda); | 1835 | rdp = this_cpu_ptr(rsp->rda); |
1835 | rcu_preempt_check_blocked_tasks(rnp); | 1836 | rcu_preempt_check_blocked_tasks(rnp); |
1836 | rnp->qsmask = rnp->qsmaskinit; | 1837 | rnp->qsmask = rnp->qsmaskinit; |
1837 | ACCESS_ONCE(rnp->gpnum) = rsp->gpnum; | 1838 | WRITE_ONCE(rnp->gpnum, rsp->gpnum); |
1838 | if (WARN_ON_ONCE(rnp->completed != rsp->completed)) | 1839 | if (WARN_ON_ONCE(rnp->completed != rsp->completed)) |
1839 | ACCESS_ONCE(rnp->completed) = rsp->completed; | 1840 | WRITE_ONCE(rnp->completed, rsp->completed); |
1840 | if (rnp == rdp->mynode) | 1841 | if (rnp == rdp->mynode) |
1841 | (void)__note_gp_changes(rsp, rnp, rdp); | 1842 | (void)__note_gp_changes(rsp, rnp, rdp); |
1842 | rcu_preempt_boost_start_gp(rnp); | 1843 | rcu_preempt_boost_start_gp(rnp); |
@@ -1845,7 +1846,7 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1845 | rnp->grphi, rnp->qsmask); | 1846 | rnp->grphi, rnp->qsmask); |
1846 | raw_spin_unlock_irq(&rnp->lock); | 1847 | raw_spin_unlock_irq(&rnp->lock); |
1847 | cond_resched_rcu_qs(); | 1848 | cond_resched_rcu_qs(); |
1848 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1849 | WRITE_ONCE(rsp->gp_activity, jiffies); |
1849 | if (gp_init_delay > 0 && | 1850 | if (gp_init_delay > 0 && |
1850 | !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) | 1851 | !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) |
1851 | schedule_timeout_uninterruptible(gp_init_delay); | 1852 | schedule_timeout_uninterruptible(gp_init_delay); |
@@ -1864,7 +1865,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) | |||
1864 | unsigned long maxj; | 1865 | unsigned long maxj; |
1865 | struct rcu_node *rnp = rcu_get_root(rsp); | 1866 | struct rcu_node *rnp = rcu_get_root(rsp); |
1866 | 1867 | ||
1867 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1868 | WRITE_ONCE(rsp->gp_activity, jiffies); |
1868 | rsp->n_force_qs++; | 1869 | rsp->n_force_qs++; |
1869 | if (fqs_state == RCU_SAVE_DYNTICK) { | 1870 | if (fqs_state == RCU_SAVE_DYNTICK) { |
1870 | /* Collect dyntick-idle snapshots. */ | 1871 | /* Collect dyntick-idle snapshots. */ |
@@ -1882,11 +1883,11 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) | |||
1882 | force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); | 1883 | force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); |
1883 | } | 1884 | } |
1884 | /* Clear flag to prevent immediate re-entry. */ | 1885 | /* Clear flag to prevent immediate re-entry. */ |
1885 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | 1886 | if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
1886 | raw_spin_lock_irq(&rnp->lock); | 1887 | raw_spin_lock_irq(&rnp->lock); |
1887 | smp_mb__after_unlock_lock(); | 1888 | smp_mb__after_unlock_lock(); |
1888 | ACCESS_ONCE(rsp->gp_flags) = | 1889 | WRITE_ONCE(rsp->gp_flags, |
1889 | ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS; | 1890 | READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); |
1890 | raw_spin_unlock_irq(&rnp->lock); | 1891 | raw_spin_unlock_irq(&rnp->lock); |
1891 | } | 1892 | } |
1892 | return fqs_state; | 1893 | return fqs_state; |
@@ -1903,7 +1904,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1903 | struct rcu_data *rdp; | 1904 | struct rcu_data *rdp; |
1904 | struct rcu_node *rnp = rcu_get_root(rsp); | 1905 | struct rcu_node *rnp = rcu_get_root(rsp); |
1905 | 1906 | ||
1906 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1907 | WRITE_ONCE(rsp->gp_activity, jiffies); |
1907 | raw_spin_lock_irq(&rnp->lock); | 1908 | raw_spin_lock_irq(&rnp->lock); |
1908 | smp_mb__after_unlock_lock(); | 1909 | smp_mb__after_unlock_lock(); |
1909 | gp_duration = jiffies - rsp->gp_start; | 1910 | gp_duration = jiffies - rsp->gp_start; |
@@ -1934,7 +1935,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1934 | smp_mb__after_unlock_lock(); | 1935 | smp_mb__after_unlock_lock(); |
1935 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); | 1936 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); |
1936 | WARN_ON_ONCE(rnp->qsmask); | 1937 | WARN_ON_ONCE(rnp->qsmask); |
1937 | ACCESS_ONCE(rnp->completed) = rsp->gpnum; | 1938 | WRITE_ONCE(rnp->completed, rsp->gpnum); |
1938 | rdp = this_cpu_ptr(rsp->rda); | 1939 | rdp = this_cpu_ptr(rsp->rda); |
1939 | if (rnp == rdp->mynode) | 1940 | if (rnp == rdp->mynode) |
1940 | needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; | 1941 | needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; |
@@ -1942,7 +1943,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1942 | nocb += rcu_future_gp_cleanup(rsp, rnp); | 1943 | nocb += rcu_future_gp_cleanup(rsp, rnp); |
1943 | raw_spin_unlock_irq(&rnp->lock); | 1944 | raw_spin_unlock_irq(&rnp->lock); |
1944 | cond_resched_rcu_qs(); | 1945 | cond_resched_rcu_qs(); |
1945 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1946 | WRITE_ONCE(rsp->gp_activity, jiffies); |
1946 | } | 1947 | } |
1947 | rnp = rcu_get_root(rsp); | 1948 | rnp = rcu_get_root(rsp); |
1948 | raw_spin_lock_irq(&rnp->lock); | 1949 | raw_spin_lock_irq(&rnp->lock); |
@@ -1950,16 +1951,16 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
1950 | rcu_nocb_gp_set(rnp, nocb); | 1951 | rcu_nocb_gp_set(rnp, nocb); |
1951 | 1952 | ||
1952 | /* Declare grace period done. */ | 1953 | /* Declare grace period done. */ |
1953 | ACCESS_ONCE(rsp->completed) = rsp->gpnum; | 1954 | WRITE_ONCE(rsp->completed, rsp->gpnum); |
1954 | trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); | 1955 | trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); |
1955 | rsp->fqs_state = RCU_GP_IDLE; | 1956 | rsp->fqs_state = RCU_GP_IDLE; |
1956 | rdp = this_cpu_ptr(rsp->rda); | 1957 | rdp = this_cpu_ptr(rsp->rda); |
1957 | /* Advance CBs to reduce false positives below. */ | 1958 | /* Advance CBs to reduce false positives below. */ |
1958 | needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; | 1959 | needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; |
1959 | if (needgp || cpu_needs_another_gp(rsp, rdp)) { | 1960 | if (needgp || cpu_needs_another_gp(rsp, rdp)) { |
1960 | ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; | 1961 | WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); |
1961 | trace_rcu_grace_period(rsp->name, | 1962 | trace_rcu_grace_period(rsp->name, |
1962 | ACCESS_ONCE(rsp->gpnum), | 1963 | READ_ONCE(rsp->gpnum), |
1963 | TPS("newreq")); | 1964 | TPS("newreq")); |
1964 | } | 1965 | } |
1965 | raw_spin_unlock_irq(&rnp->lock); | 1966 | raw_spin_unlock_irq(&rnp->lock); |
@@ -1983,20 +1984,20 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
1983 | /* Handle grace-period start. */ | 1984 | /* Handle grace-period start. */ |
1984 | for (;;) { | 1985 | for (;;) { |
1985 | trace_rcu_grace_period(rsp->name, | 1986 | trace_rcu_grace_period(rsp->name, |
1986 | ACCESS_ONCE(rsp->gpnum), | 1987 | READ_ONCE(rsp->gpnum), |
1987 | TPS("reqwait")); | 1988 | TPS("reqwait")); |
1988 | rsp->gp_state = RCU_GP_WAIT_GPS; | 1989 | rsp->gp_state = RCU_GP_WAIT_GPS; |
1989 | wait_event_interruptible(rsp->gp_wq, | 1990 | wait_event_interruptible(rsp->gp_wq, |
1990 | ACCESS_ONCE(rsp->gp_flags) & | 1991 | READ_ONCE(rsp->gp_flags) & |
1991 | RCU_GP_FLAG_INIT); | 1992 | RCU_GP_FLAG_INIT); |
1992 | /* Locking provides needed memory barrier. */ | 1993 | /* Locking provides needed memory barrier. */ |
1993 | if (rcu_gp_init(rsp)) | 1994 | if (rcu_gp_init(rsp)) |
1994 | break; | 1995 | break; |
1995 | cond_resched_rcu_qs(); | 1996 | cond_resched_rcu_qs(); |
1996 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1997 | WRITE_ONCE(rsp->gp_activity, jiffies); |
1997 | WARN_ON(signal_pending(current)); | 1998 | WARN_ON(signal_pending(current)); |
1998 | trace_rcu_grace_period(rsp->name, | 1999 | trace_rcu_grace_period(rsp->name, |
1999 | ACCESS_ONCE(rsp->gpnum), | 2000 | READ_ONCE(rsp->gpnum), |
2000 | TPS("reqwaitsig")); | 2001 | TPS("reqwaitsig")); |
2001 | } | 2002 | } |
2002 | 2003 | ||
@@ -2012,39 +2013,39 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
2012 | if (!ret) | 2013 | if (!ret) |
2013 | rsp->jiffies_force_qs = jiffies + j; | 2014 | rsp->jiffies_force_qs = jiffies + j; |
2014 | trace_rcu_grace_period(rsp->name, | 2015 | trace_rcu_grace_period(rsp->name, |
2015 | ACCESS_ONCE(rsp->gpnum), | 2016 | READ_ONCE(rsp->gpnum), |
2016 | TPS("fqswait")); | 2017 | TPS("fqswait")); |
2017 | rsp->gp_state = RCU_GP_WAIT_FQS; | 2018 | rsp->gp_state = RCU_GP_WAIT_FQS; |
2018 | ret = wait_event_interruptible_timeout(rsp->gp_wq, | 2019 | ret = wait_event_interruptible_timeout(rsp->gp_wq, |
2019 | ((gf = ACCESS_ONCE(rsp->gp_flags)) & | 2020 | ((gf = READ_ONCE(rsp->gp_flags)) & |
2020 | RCU_GP_FLAG_FQS) || | 2021 | RCU_GP_FLAG_FQS) || |
2021 | (!ACCESS_ONCE(rnp->qsmask) && | 2022 | (!READ_ONCE(rnp->qsmask) && |
2022 | !rcu_preempt_blocked_readers_cgp(rnp)), | 2023 | !rcu_preempt_blocked_readers_cgp(rnp)), |
2023 | j); | 2024 | j); |
2024 | /* Locking provides needed memory barriers. */ | 2025 | /* Locking provides needed memory barriers. */ |
2025 | /* If grace period done, leave loop. */ | 2026 | /* If grace period done, leave loop. */ |
2026 | if (!ACCESS_ONCE(rnp->qsmask) && | 2027 | if (!READ_ONCE(rnp->qsmask) && |
2027 | !rcu_preempt_blocked_readers_cgp(rnp)) | 2028 | !rcu_preempt_blocked_readers_cgp(rnp)) |
2028 | break; | 2029 | break; |
2029 | /* If time for quiescent-state forcing, do it. */ | 2030 | /* If time for quiescent-state forcing, do it. */ |
2030 | if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || | 2031 | if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || |
2031 | (gf & RCU_GP_FLAG_FQS)) { | 2032 | (gf & RCU_GP_FLAG_FQS)) { |
2032 | trace_rcu_grace_period(rsp->name, | 2033 | trace_rcu_grace_period(rsp->name, |
2033 | ACCESS_ONCE(rsp->gpnum), | 2034 | READ_ONCE(rsp->gpnum), |
2034 | TPS("fqsstart")); | 2035 | TPS("fqsstart")); |
2035 | fqs_state = rcu_gp_fqs(rsp, fqs_state); | 2036 | fqs_state = rcu_gp_fqs(rsp, fqs_state); |
2036 | trace_rcu_grace_period(rsp->name, | 2037 | trace_rcu_grace_period(rsp->name, |
2037 | ACCESS_ONCE(rsp->gpnum), | 2038 | READ_ONCE(rsp->gpnum), |
2038 | TPS("fqsend")); | 2039 | TPS("fqsend")); |
2039 | cond_resched_rcu_qs(); | 2040 | cond_resched_rcu_qs(); |
2040 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 2041 | WRITE_ONCE(rsp->gp_activity, jiffies); |
2041 | } else { | 2042 | } else { |
2042 | /* Deal with stray signal. */ | 2043 | /* Deal with stray signal. */ |
2043 | cond_resched_rcu_qs(); | 2044 | cond_resched_rcu_qs(); |
2044 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 2045 | WRITE_ONCE(rsp->gp_activity, jiffies); |
2045 | WARN_ON(signal_pending(current)); | 2046 | WARN_ON(signal_pending(current)); |
2046 | trace_rcu_grace_period(rsp->name, | 2047 | trace_rcu_grace_period(rsp->name, |
2047 | ACCESS_ONCE(rsp->gpnum), | 2048 | READ_ONCE(rsp->gpnum), |
2048 | TPS("fqswaitsig")); | 2049 | TPS("fqswaitsig")); |
2049 | } | 2050 | } |
2050 | j = jiffies_till_next_fqs; | 2051 | j = jiffies_till_next_fqs; |
@@ -2086,8 +2087,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | |||
2086 | */ | 2087 | */ |
2087 | return false; | 2088 | return false; |
2088 | } | 2089 | } |
2089 | ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; | 2090 | WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); |
2090 | trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), | 2091 | trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), |
2091 | TPS("newreq")); | 2092 | TPS("newreq")); |
2092 | 2093 | ||
2093 | /* | 2094 | /* |
@@ -2359,7 +2360,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, | |||
2359 | rsp->qlen += rdp->qlen; | 2360 | rsp->qlen += rdp->qlen; |
2360 | rdp->n_cbs_orphaned += rdp->qlen; | 2361 | rdp->n_cbs_orphaned += rdp->qlen; |
2361 | rdp->qlen_lazy = 0; | 2362 | rdp->qlen_lazy = 0; |
2362 | ACCESS_ONCE(rdp->qlen) = 0; | 2363 | WRITE_ONCE(rdp->qlen, 0); |
2363 | } | 2364 | } |
2364 | 2365 | ||
2365 | /* | 2366 | /* |
@@ -2580,7 +2581,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2580 | /* If no callbacks are ready, just return. */ | 2581 | /* If no callbacks are ready, just return. */ |
2581 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) { | 2582 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) { |
2582 | trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); | 2583 | trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); |
2583 | trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), | 2584 | trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist), |
2584 | need_resched(), is_idle_task(current), | 2585 | need_resched(), is_idle_task(current), |
2585 | rcu_is_callbacks_kthread()); | 2586 | rcu_is_callbacks_kthread()); |
2586 | return; | 2587 | return; |
@@ -2636,7 +2637,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2636 | } | 2637 | } |
2637 | smp_mb(); /* List handling before counting for rcu_barrier(). */ | 2638 | smp_mb(); /* List handling before counting for rcu_barrier(). */ |
2638 | rdp->qlen_lazy -= count_lazy; | 2639 | rdp->qlen_lazy -= count_lazy; |
2639 | ACCESS_ONCE(rdp->qlen) = rdp->qlen - count; | 2640 | WRITE_ONCE(rdp->qlen, rdp->qlen - count); |
2640 | rdp->n_cbs_invoked += count; | 2641 | rdp->n_cbs_invoked += count; |
2641 | 2642 | ||
2642 | /* Reinstate batch limit if we have worked down the excess. */ | 2643 | /* Reinstate batch limit if we have worked down the excess. */ |
@@ -2793,7 +2794,7 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
2793 | /* Funnel through hierarchy to reduce memory contention. */ | 2794 | /* Funnel through hierarchy to reduce memory contention. */ |
2794 | rnp = __this_cpu_read(rsp->rda->mynode); | 2795 | rnp = __this_cpu_read(rsp->rda->mynode); |
2795 | for (; rnp != NULL; rnp = rnp->parent) { | 2796 | for (; rnp != NULL; rnp = rnp->parent) { |
2796 | ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || | 2797 | ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || |
2797 | !raw_spin_trylock(&rnp->fqslock); | 2798 | !raw_spin_trylock(&rnp->fqslock); |
2798 | if (rnp_old != NULL) | 2799 | if (rnp_old != NULL) |
2799 | raw_spin_unlock(&rnp_old->fqslock); | 2800 | raw_spin_unlock(&rnp_old->fqslock); |
@@ -2809,13 +2810,12 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
2809 | raw_spin_lock_irqsave(&rnp_old->lock, flags); | 2810 | raw_spin_lock_irqsave(&rnp_old->lock, flags); |
2810 | smp_mb__after_unlock_lock(); | 2811 | smp_mb__after_unlock_lock(); |
2811 | raw_spin_unlock(&rnp_old->fqslock); | 2812 | raw_spin_unlock(&rnp_old->fqslock); |
2812 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | 2813 | if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
2813 | rsp->n_force_qs_lh++; | 2814 | rsp->n_force_qs_lh++; |
2814 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); | 2815 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
2815 | return; /* Someone beat us to it. */ | 2816 | return; /* Someone beat us to it. */ |
2816 | } | 2817 | } |
2817 | ACCESS_ONCE(rsp->gp_flags) = | 2818 | WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); |
2818 | ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS; | ||
2819 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); | 2819 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
2820 | rcu_gp_kthread_wake(rsp); | 2820 | rcu_gp_kthread_wake(rsp); |
2821 | } | 2821 | } |
@@ -2881,7 +2881,7 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
2881 | */ | 2881 | */ |
2882 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | 2882 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) |
2883 | { | 2883 | { |
2884 | if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) | 2884 | if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) |
2885 | return; | 2885 | return; |
2886 | if (likely(!rsp->boost)) { | 2886 | if (likely(!rsp->boost)) { |
2887 | rcu_do_batch(rsp, rdp); | 2887 | rcu_do_batch(rsp, rdp); |
@@ -2972,7 +2972,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
2972 | WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */ | 2972 | WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */ |
2973 | if (debug_rcu_head_queue(head)) { | 2973 | if (debug_rcu_head_queue(head)) { |
2974 | /* Probable double call_rcu(), so leak the callback. */ | 2974 | /* Probable double call_rcu(), so leak the callback. */ |
2975 | ACCESS_ONCE(head->func) = rcu_leak_callback; | 2975 | WRITE_ONCE(head->func, rcu_leak_callback); |
2976 | WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n"); | 2976 | WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n"); |
2977 | return; | 2977 | return; |
2978 | } | 2978 | } |
@@ -3011,7 +3011,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
3011 | if (!likely(rdp->nxtlist)) | 3011 | if (!likely(rdp->nxtlist)) |
3012 | init_default_callback_list(rdp); | 3012 | init_default_callback_list(rdp); |
3013 | } | 3013 | } |
3014 | ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1; | 3014 | WRITE_ONCE(rdp->qlen, rdp->qlen + 1); |
3015 | if (lazy) | 3015 | if (lazy) |
3016 | rdp->qlen_lazy++; | 3016 | rdp->qlen_lazy++; |
3017 | else | 3017 | else |
@@ -3450,14 +3450,14 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
3450 | } | 3450 | } |
3451 | 3451 | ||
3452 | /* Has another RCU grace period completed? */ | 3452 | /* Has another RCU grace period completed? */ |
3453 | if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ | 3453 | if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ |
3454 | rdp->n_rp_gp_completed++; | 3454 | rdp->n_rp_gp_completed++; |
3455 | return 1; | 3455 | return 1; |
3456 | } | 3456 | } |
3457 | 3457 | ||
3458 | /* Has a new RCU grace period started? */ | 3458 | /* Has a new RCU grace period started? */ |
3459 | if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum || | 3459 | if (READ_ONCE(rnp->gpnum) != rdp->gpnum || |
3460 | unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */ | 3460 | unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */ |
3461 | rdp->n_rp_gp_started++; | 3461 | rdp->n_rp_gp_started++; |
3462 | return 1; | 3462 | return 1; |
3463 | } | 3463 | } |
@@ -3564,7 +3564,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3564 | { | 3564 | { |
3565 | int cpu; | 3565 | int cpu; |
3566 | struct rcu_data *rdp; | 3566 | struct rcu_data *rdp; |
3567 | unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); | 3567 | unsigned long snap = READ_ONCE(rsp->n_barrier_done); |
3568 | unsigned long snap_done; | 3568 | unsigned long snap_done; |
3569 | 3569 | ||
3570 | _rcu_barrier_trace(rsp, "Begin", -1, snap); | 3570 | _rcu_barrier_trace(rsp, "Begin", -1, snap); |
@@ -3606,10 +3606,10 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3606 | 3606 | ||
3607 | /* | 3607 | /* |
3608 | * Increment ->n_barrier_done to avoid duplicate work. Use | 3608 | * Increment ->n_barrier_done to avoid duplicate work. Use |
3609 | * ACCESS_ONCE() to prevent the compiler from speculating | 3609 | * WRITE_ONCE() to prevent the compiler from speculating |
3610 | * the increment to precede the early-exit check. | 3610 | * the increment to precede the early-exit check. |
3611 | */ | 3611 | */ |
3612 | ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1; | 3612 | WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1); |
3613 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); | 3613 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); |
3614 | _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); | 3614 | _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); |
3615 | smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ | 3615 | smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ |
@@ -3645,7 +3645,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3645 | __call_rcu(&rdp->barrier_head, | 3645 | __call_rcu(&rdp->barrier_head, |
3646 | rcu_barrier_callback, rsp, cpu, 0); | 3646 | rcu_barrier_callback, rsp, cpu, 0); |
3647 | } | 3647 | } |
3648 | } else if (ACCESS_ONCE(rdp->qlen)) { | 3648 | } else if (READ_ONCE(rdp->qlen)) { |
3649 | _rcu_barrier_trace(rsp, "OnlineQ", cpu, | 3649 | _rcu_barrier_trace(rsp, "OnlineQ", cpu, |
3650 | rsp->n_barrier_done); | 3650 | rsp->n_barrier_done); |
3651 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); | 3651 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); |
@@ -3665,7 +3665,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3665 | 3665 | ||
3666 | /* Increment ->n_barrier_done to prevent duplicate work. */ | 3666 | /* Increment ->n_barrier_done to prevent duplicate work. */ |
3667 | smp_mb(); /* Keep increment after above mechanism. */ | 3667 | smp_mb(); /* Keep increment after above mechanism. */ |
3668 | ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1; | 3668 | WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1); |
3669 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); | 3669 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); |
3670 | _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); | 3670 | _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); |
3671 | smp_mb(); /* Keep increment before caller's subsequent code. */ | 3671 | smp_mb(); /* Keep increment before caller's subsequent code. */ |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 8c0ec0f5a027..58b1ebdc4387 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -570,7 +570,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp) | |||
570 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | 570 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) |
571 | { | 571 | { |
572 | return !rcu_preempted_readers_exp(rnp) && | 572 | return !rcu_preempted_readers_exp(rnp) && |
573 | ACCESS_ONCE(rnp->expmask) == 0; | 573 | READ_ONCE(rnp->expmask) == 0; |
574 | } | 574 | } |
575 | 575 | ||
576 | /* | 576 | /* |
@@ -716,7 +716,7 @@ void synchronize_rcu_expedited(void) | |||
716 | int trycount = 0; | 716 | int trycount = 0; |
717 | 717 | ||
718 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | 718 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ |
719 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | 719 | snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1; |
720 | smp_mb(); /* Above access cannot bleed into critical section. */ | 720 | smp_mb(); /* Above access cannot bleed into critical section. */ |
721 | 721 | ||
722 | /* | 722 | /* |
@@ -740,7 +740,7 @@ void synchronize_rcu_expedited(void) | |||
740 | */ | 740 | */ |
741 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | 741 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { |
742 | if (ULONG_CMP_LT(snap, | 742 | if (ULONG_CMP_LT(snap, |
743 | ACCESS_ONCE(sync_rcu_preempt_exp_count))) { | 743 | READ_ONCE(sync_rcu_preempt_exp_count))) { |
744 | put_online_cpus(); | 744 | put_online_cpus(); |
745 | goto mb_ret; /* Others did our work for us. */ | 745 | goto mb_ret; /* Others did our work for us. */ |
746 | } | 746 | } |
@@ -752,7 +752,7 @@ void synchronize_rcu_expedited(void) | |||
752 | return; | 752 | return; |
753 | } | 753 | } |
754 | } | 754 | } |
755 | if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) { | 755 | if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) { |
756 | put_online_cpus(); | 756 | put_online_cpus(); |
757 | goto unlock_mb_ret; /* Others did our work for us. */ | 757 | goto unlock_mb_ret; /* Others did our work for us. */ |
758 | } | 758 | } |
@@ -780,8 +780,7 @@ void synchronize_rcu_expedited(void) | |||
780 | 780 | ||
781 | /* Clean up and exit. */ | 781 | /* Clean up and exit. */ |
782 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | 782 | smp_mb(); /* ensure expedited GP seen before counter increment. */ |
783 | ACCESS_ONCE(sync_rcu_preempt_exp_count) = | 783 | WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1); |
784 | sync_rcu_preempt_exp_count + 1; | ||
785 | unlock_mb_ret: | 784 | unlock_mb_ret: |
786 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | 785 | mutex_unlock(&sync_rcu_preempt_exp_mutex); |
787 | mb_ret: | 786 | mb_ret: |
@@ -994,8 +993,8 @@ static int rcu_boost(struct rcu_node *rnp) | |||
994 | struct task_struct *t; | 993 | struct task_struct *t; |
995 | struct list_head *tb; | 994 | struct list_head *tb; |
996 | 995 | ||
997 | if (ACCESS_ONCE(rnp->exp_tasks) == NULL && | 996 | if (READ_ONCE(rnp->exp_tasks) == NULL && |
998 | ACCESS_ONCE(rnp->boost_tasks) == NULL) | 997 | READ_ONCE(rnp->boost_tasks) == NULL) |
999 | return 0; /* Nothing left to boost. */ | 998 | return 0; /* Nothing left to boost. */ |
1000 | 999 | ||
1001 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1000 | raw_spin_lock_irqsave(&rnp->lock, flags); |
@@ -1048,8 +1047,8 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1048 | rt_mutex_lock(&rnp->boost_mtx); | 1047 | rt_mutex_lock(&rnp->boost_mtx); |
1049 | rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ | 1048 | rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ |
1050 | 1049 | ||
1051 | return ACCESS_ONCE(rnp->exp_tasks) != NULL || | 1050 | return READ_ONCE(rnp->exp_tasks) != NULL || |
1052 | ACCESS_ONCE(rnp->boost_tasks) != NULL; | 1051 | READ_ONCE(rnp->boost_tasks) != NULL; |
1053 | } | 1052 | } |
1054 | 1053 | ||
1055 | /* | 1054 | /* |
@@ -1462,7 +1461,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) | |||
1462 | * callbacks not yet ready to invoke. | 1461 | * callbacks not yet ready to invoke. |
1463 | */ | 1462 | */ |
1464 | if ((rdp->completed != rnp->completed || | 1463 | if ((rdp->completed != rnp->completed || |
1465 | unlikely(ACCESS_ONCE(rdp->gpwrap))) && | 1464 | unlikely(READ_ONCE(rdp->gpwrap))) && |
1466 | rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) | 1465 | rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) |
1467 | note_gp_changes(rsp, rdp); | 1466 | note_gp_changes(rsp, rdp); |
1468 | 1467 | ||
@@ -1534,7 +1533,7 @@ static void rcu_prepare_for_idle(void) | |||
1534 | int tne; | 1533 | int tne; |
1535 | 1534 | ||
1536 | /* Handle nohz enablement switches conservatively. */ | 1535 | /* Handle nohz enablement switches conservatively. */ |
1537 | tne = ACCESS_ONCE(tick_nohz_active); | 1536 | tne = READ_ONCE(tick_nohz_active); |
1538 | if (tne != rdtp->tick_nohz_enabled_snap) { | 1537 | if (tne != rdtp->tick_nohz_enabled_snap) { |
1539 | if (rcu_cpu_has_callbacks(NULL)) | 1538 | if (rcu_cpu_has_callbacks(NULL)) |
1540 | invoke_rcu_core(); /* force nohz to see update. */ | 1539 | invoke_rcu_core(); /* force nohz to see update. */ |
@@ -1760,7 +1759,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) | |||
1760 | atomic_read(&rdtp->dynticks) & 0xfff, | 1759 | atomic_read(&rdtp->dynticks) & 0xfff, |
1761 | rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, | 1760 | rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, |
1762 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), | 1761 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), |
1763 | ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, | 1762 | READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, |
1764 | fast_no_hz); | 1763 | fast_no_hz); |
1765 | } | 1764 | } |
1766 | 1765 | ||
@@ -1898,11 +1897,11 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) | |||
1898 | { | 1897 | { |
1899 | struct rcu_data *rdp_leader = rdp->nocb_leader; | 1898 | struct rcu_data *rdp_leader = rdp->nocb_leader; |
1900 | 1899 | ||
1901 | if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) | 1900 | if (!READ_ONCE(rdp_leader->nocb_kthread)) |
1902 | return; | 1901 | return; |
1903 | if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { | 1902 | if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { |
1904 | /* Prior smp_mb__after_atomic() orders against prior enqueue. */ | 1903 | /* Prior smp_mb__after_atomic() orders against prior enqueue. */ |
1905 | ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; | 1904 | WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); |
1906 | wake_up(&rdp_leader->nocb_wq); | 1905 | wake_up(&rdp_leader->nocb_wq); |
1907 | } | 1906 | } |
1908 | } | 1907 | } |
@@ -1934,14 +1933,14 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) | |||
1934 | ret = atomic_long_read(&rdp->nocb_q_count); | 1933 | ret = atomic_long_read(&rdp->nocb_q_count); |
1935 | 1934 | ||
1936 | #ifdef CONFIG_PROVE_RCU | 1935 | #ifdef CONFIG_PROVE_RCU |
1937 | rhp = ACCESS_ONCE(rdp->nocb_head); | 1936 | rhp = READ_ONCE(rdp->nocb_head); |
1938 | if (!rhp) | 1937 | if (!rhp) |
1939 | rhp = ACCESS_ONCE(rdp->nocb_gp_head); | 1938 | rhp = READ_ONCE(rdp->nocb_gp_head); |
1940 | if (!rhp) | 1939 | if (!rhp) |
1941 | rhp = ACCESS_ONCE(rdp->nocb_follower_head); | 1940 | rhp = READ_ONCE(rdp->nocb_follower_head); |
1942 | 1941 | ||
1943 | /* Having no rcuo kthread but CBs after scheduler starts is bad! */ | 1942 | /* Having no rcuo kthread but CBs after scheduler starts is bad! */ |
1944 | if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp && | 1943 | if (!READ_ONCE(rdp->nocb_kthread) && rhp && |
1945 | rcu_scheduler_fully_active) { | 1944 | rcu_scheduler_fully_active) { |
1946 | /* RCU callback enqueued before CPU first came online??? */ | 1945 | /* RCU callback enqueued before CPU first came online??? */ |
1947 | pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", | 1946 | pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", |
@@ -1975,12 +1974,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
1975 | atomic_long_add(rhcount, &rdp->nocb_q_count); | 1974 | atomic_long_add(rhcount, &rdp->nocb_q_count); |
1976 | /* rcu_barrier() relies on ->nocb_q_count add before xchg. */ | 1975 | /* rcu_barrier() relies on ->nocb_q_count add before xchg. */ |
1977 | old_rhpp = xchg(&rdp->nocb_tail, rhtp); | 1976 | old_rhpp = xchg(&rdp->nocb_tail, rhtp); |
1978 | ACCESS_ONCE(*old_rhpp) = rhp; | 1977 | WRITE_ONCE(*old_rhpp, rhp); |
1979 | atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); | 1978 | atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); |
1980 | smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ | 1979 | smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ |
1981 | 1980 | ||
1982 | /* If we are not being polled and there is a kthread, awaken it ... */ | 1981 | /* If we are not being polled and there is a kthread, awaken it ... */ |
1983 | t = ACCESS_ONCE(rdp->nocb_kthread); | 1982 | t = READ_ONCE(rdp->nocb_kthread); |
1984 | if (rcu_nocb_poll || !t) { | 1983 | if (rcu_nocb_poll || !t) { |
1985 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | 1984 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, |
1986 | TPS("WakeNotPoll")); | 1985 | TPS("WakeNotPoll")); |
@@ -2118,7 +2117,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2118 | for (;;) { | 2117 | for (;;) { |
2119 | wait_event_interruptible( | 2118 | wait_event_interruptible( |
2120 | rnp->nocb_gp_wq[c & 0x1], | 2119 | rnp->nocb_gp_wq[c & 0x1], |
2121 | (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); | 2120 | (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); |
2122 | if (likely(d)) | 2121 | if (likely(d)) |
2123 | break; | 2122 | break; |
2124 | WARN_ON(signal_pending(current)); | 2123 | WARN_ON(signal_pending(current)); |
@@ -2145,7 +2144,7 @@ wait_again: | |||
2145 | if (!rcu_nocb_poll) { | 2144 | if (!rcu_nocb_poll) { |
2146 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); | 2145 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); |
2147 | wait_event_interruptible(my_rdp->nocb_wq, | 2146 | wait_event_interruptible(my_rdp->nocb_wq, |
2148 | !ACCESS_ONCE(my_rdp->nocb_leader_sleep)); | 2147 | !READ_ONCE(my_rdp->nocb_leader_sleep)); |
2149 | /* Memory barrier handled by smp_mb() calls below and repoll. */ | 2148 | /* Memory barrier handled by smp_mb() calls below and repoll. */ |
2150 | } else if (firsttime) { | 2149 | } else if (firsttime) { |
2151 | firsttime = false; /* Don't drown trace log with "Poll"! */ | 2150 | firsttime = false; /* Don't drown trace log with "Poll"! */ |
@@ -2159,12 +2158,12 @@ wait_again: | |||
2159 | */ | 2158 | */ |
2160 | gotcbs = false; | 2159 | gotcbs = false; |
2161 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { | 2160 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { |
2162 | rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head); | 2161 | rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); |
2163 | if (!rdp->nocb_gp_head) | 2162 | if (!rdp->nocb_gp_head) |
2164 | continue; /* No CBs here, try next follower. */ | 2163 | continue; /* No CBs here, try next follower. */ |
2165 | 2164 | ||
2166 | /* Move callbacks to wait-for-GP list, which is empty. */ | 2165 | /* Move callbacks to wait-for-GP list, which is empty. */ |
2167 | ACCESS_ONCE(rdp->nocb_head) = NULL; | 2166 | WRITE_ONCE(rdp->nocb_head, NULL); |
2168 | rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); | 2167 | rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); |
2169 | gotcbs = true; | 2168 | gotcbs = true; |
2170 | } | 2169 | } |
@@ -2184,7 +2183,7 @@ wait_again: | |||
2184 | my_rdp->nocb_leader_sleep = true; | 2183 | my_rdp->nocb_leader_sleep = true; |
2185 | smp_mb(); /* Ensure _sleep true before scan. */ | 2184 | smp_mb(); /* Ensure _sleep true before scan. */ |
2186 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) | 2185 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) |
2187 | if (ACCESS_ONCE(rdp->nocb_head)) { | 2186 | if (READ_ONCE(rdp->nocb_head)) { |
2188 | /* Found CB, so short-circuit next wait. */ | 2187 | /* Found CB, so short-circuit next wait. */ |
2189 | my_rdp->nocb_leader_sleep = false; | 2188 | my_rdp->nocb_leader_sleep = false; |
2190 | break; | 2189 | break; |
@@ -2205,7 +2204,7 @@ wait_again: | |||
2205 | 2204 | ||
2206 | /* Each pass through the following loop wakes a follower, if needed. */ | 2205 | /* Each pass through the following loop wakes a follower, if needed. */ |
2207 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { | 2206 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { |
2208 | if (ACCESS_ONCE(rdp->nocb_head)) | 2207 | if (READ_ONCE(rdp->nocb_head)) |
2209 | my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ | 2208 | my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ |
2210 | if (!rdp->nocb_gp_head) | 2209 | if (!rdp->nocb_gp_head) |
2211 | continue; /* No CBs, so no need to wake follower. */ | 2210 | continue; /* No CBs, so no need to wake follower. */ |
@@ -2241,7 +2240,7 @@ static void nocb_follower_wait(struct rcu_data *rdp) | |||
2241 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | 2240 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, |
2242 | "FollowerSleep"); | 2241 | "FollowerSleep"); |
2243 | wait_event_interruptible(rdp->nocb_wq, | 2242 | wait_event_interruptible(rdp->nocb_wq, |
2244 | ACCESS_ONCE(rdp->nocb_follower_head)); | 2243 | READ_ONCE(rdp->nocb_follower_head)); |
2245 | } else if (firsttime) { | 2244 | } else if (firsttime) { |
2246 | /* Don't drown trace log with "Poll"! */ | 2245 | /* Don't drown trace log with "Poll"! */ |
2247 | firsttime = false; | 2246 | firsttime = false; |
@@ -2282,10 +2281,10 @@ static int rcu_nocb_kthread(void *arg) | |||
2282 | nocb_follower_wait(rdp); | 2281 | nocb_follower_wait(rdp); |
2283 | 2282 | ||
2284 | /* Pull the ready-to-invoke callbacks onto local list. */ | 2283 | /* Pull the ready-to-invoke callbacks onto local list. */ |
2285 | list = ACCESS_ONCE(rdp->nocb_follower_head); | 2284 | list = READ_ONCE(rdp->nocb_follower_head); |
2286 | BUG_ON(!list); | 2285 | BUG_ON(!list); |
2287 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); | 2286 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); |
2288 | ACCESS_ONCE(rdp->nocb_follower_head) = NULL; | 2287 | WRITE_ONCE(rdp->nocb_follower_head, NULL); |
2289 | tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); | 2288 | tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); |
2290 | 2289 | ||
2291 | /* Each pass through the following loop invokes a callback. */ | 2290 | /* Each pass through the following loop invokes a callback. */ |
@@ -2324,7 +2323,7 @@ static int rcu_nocb_kthread(void *arg) | |||
2324 | /* Is a deferred wakeup of rcu_nocb_kthread() required? */ | 2323 | /* Is a deferred wakeup of rcu_nocb_kthread() required? */ |
2325 | static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) | 2324 | static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) |
2326 | { | 2325 | { |
2327 | return ACCESS_ONCE(rdp->nocb_defer_wakeup); | 2326 | return READ_ONCE(rdp->nocb_defer_wakeup); |
2328 | } | 2327 | } |
2329 | 2328 | ||
2330 | /* Do a deferred wakeup of rcu_nocb_kthread(). */ | 2329 | /* Do a deferred wakeup of rcu_nocb_kthread(). */ |
@@ -2334,8 +2333,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) | |||
2334 | 2333 | ||
2335 | if (!rcu_nocb_need_deferred_wakeup(rdp)) | 2334 | if (!rcu_nocb_need_deferred_wakeup(rdp)) |
2336 | return; | 2335 | return; |
2337 | ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup); | 2336 | ndw = READ_ONCE(rdp->nocb_defer_wakeup); |
2338 | ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT; | 2337 | WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT); |
2339 | wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE); | 2338 | wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE); |
2340 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); | 2339 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); |
2341 | } | 2340 | } |
@@ -2448,7 +2447,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) | |||
2448 | t = kthread_run(rcu_nocb_kthread, rdp_spawn, | 2447 | t = kthread_run(rcu_nocb_kthread, rdp_spawn, |
2449 | "rcuo%c/%d", rsp->abbr, cpu); | 2448 | "rcuo%c/%d", rsp->abbr, cpu); |
2450 | BUG_ON(IS_ERR(t)); | 2449 | BUG_ON(IS_ERR(t)); |
2451 | ACCESS_ONCE(rdp_spawn->nocb_kthread) = t; | 2450 | WRITE_ONCE(rdp_spawn->nocb_kthread, t); |
2452 | } | 2451 | } |
2453 | 2452 | ||
2454 | /* | 2453 | /* |
@@ -2663,7 +2662,7 @@ static void rcu_sysidle_enter(int irq) | |||
2663 | 2662 | ||
2664 | /* Record start of fully idle period. */ | 2663 | /* Record start of fully idle period. */ |
2665 | j = jiffies; | 2664 | j = jiffies; |
2666 | ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; | 2665 | WRITE_ONCE(rdtp->dynticks_idle_jiffies, j); |
2667 | smp_mb__before_atomic(); | 2666 | smp_mb__before_atomic(); |
2668 | atomic_inc(&rdtp->dynticks_idle); | 2667 | atomic_inc(&rdtp->dynticks_idle); |
2669 | smp_mb__after_atomic(); | 2668 | smp_mb__after_atomic(); |
@@ -2681,7 +2680,7 @@ static void rcu_sysidle_enter(int irq) | |||
2681 | */ | 2680 | */ |
2682 | void rcu_sysidle_force_exit(void) | 2681 | void rcu_sysidle_force_exit(void) |
2683 | { | 2682 | { |
2684 | int oldstate = ACCESS_ONCE(full_sysidle_state); | 2683 | int oldstate = READ_ONCE(full_sysidle_state); |
2685 | int newoldstate; | 2684 | int newoldstate; |
2686 | 2685 | ||
2687 | /* | 2686 | /* |
@@ -2794,7 +2793,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, | |||
2794 | smp_mb(); /* Read counters before timestamps. */ | 2793 | smp_mb(); /* Read counters before timestamps. */ |
2795 | 2794 | ||
2796 | /* Pick up timestamps. */ | 2795 | /* Pick up timestamps. */ |
2797 | j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies); | 2796 | j = READ_ONCE(rdtp->dynticks_idle_jiffies); |
2798 | /* If this CPU entered idle more recently, update maxj timestamp. */ | 2797 | /* If this CPU entered idle more recently, update maxj timestamp. */ |
2799 | if (ULONG_CMP_LT(*maxj, j)) | 2798 | if (ULONG_CMP_LT(*maxj, j)) |
2800 | *maxj = j; | 2799 | *maxj = j; |
@@ -2831,11 +2830,11 @@ static unsigned long rcu_sysidle_delay(void) | |||
2831 | static void rcu_sysidle(unsigned long j) | 2830 | static void rcu_sysidle(unsigned long j) |
2832 | { | 2831 | { |
2833 | /* Check the current state. */ | 2832 | /* Check the current state. */ |
2834 | switch (ACCESS_ONCE(full_sysidle_state)) { | 2833 | switch (READ_ONCE(full_sysidle_state)) { |
2835 | case RCU_SYSIDLE_NOT: | 2834 | case RCU_SYSIDLE_NOT: |
2836 | 2835 | ||
2837 | /* First time all are idle, so note a short idle period. */ | 2836 | /* First time all are idle, so note a short idle period. */ |
2838 | ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT; | 2837 | WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT); |
2839 | break; | 2838 | break; |
2840 | 2839 | ||
2841 | case RCU_SYSIDLE_SHORT: | 2840 | case RCU_SYSIDLE_SHORT: |
@@ -2873,7 +2872,7 @@ static void rcu_sysidle_cancel(void) | |||
2873 | { | 2872 | { |
2874 | smp_mb(); | 2873 | smp_mb(); |
2875 | if (full_sysidle_state > RCU_SYSIDLE_SHORT) | 2874 | if (full_sysidle_state > RCU_SYSIDLE_SHORT) |
2876 | ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; | 2875 | WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT); |
2877 | } | 2876 | } |
2878 | 2877 | ||
2879 | /* | 2878 | /* |
@@ -2925,7 +2924,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp) | |||
2925 | smp_mb(); /* grace period precedes setting inuse. */ | 2924 | smp_mb(); /* grace period precedes setting inuse. */ |
2926 | 2925 | ||
2927 | rshp = container_of(rhp, struct rcu_sysidle_head, rh); | 2926 | rshp = container_of(rhp, struct rcu_sysidle_head, rh); |
2928 | ACCESS_ONCE(rshp->inuse) = 0; | 2927 | WRITE_ONCE(rshp->inuse, 0); |
2929 | } | 2928 | } |
2930 | 2929 | ||
2931 | /* | 2930 | /* |
@@ -2936,7 +2935,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp) | |||
2936 | bool rcu_sys_is_idle(void) | 2935 | bool rcu_sys_is_idle(void) |
2937 | { | 2936 | { |
2938 | static struct rcu_sysidle_head rsh; | 2937 | static struct rcu_sysidle_head rsh; |
2939 | int rss = ACCESS_ONCE(full_sysidle_state); | 2938 | int rss = READ_ONCE(full_sysidle_state); |
2940 | 2939 | ||
2941 | if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu)) | 2940 | if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu)) |
2942 | return false; | 2941 | return false; |
@@ -2964,7 +2963,7 @@ bool rcu_sys_is_idle(void) | |||
2964 | } | 2963 | } |
2965 | rcu_sysidle_report(rcu_state_p, isidle, maxj, false); | 2964 | rcu_sysidle_report(rcu_state_p, isidle, maxj, false); |
2966 | oldrss = rss; | 2965 | oldrss = rss; |
2967 | rss = ACCESS_ONCE(full_sysidle_state); | 2966 | rss = READ_ONCE(full_sysidle_state); |
2968 | } | 2967 | } |
2969 | } | 2968 | } |
2970 | 2969 | ||
@@ -3048,7 +3047,7 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp) | |||
3048 | #ifdef CONFIG_NO_HZ_FULL | 3047 | #ifdef CONFIG_NO_HZ_FULL |
3049 | if (tick_nohz_full_cpu(smp_processor_id()) && | 3048 | if (tick_nohz_full_cpu(smp_processor_id()) && |
3050 | (!rcu_gp_in_progress(rsp) || | 3049 | (!rcu_gp_in_progress(rsp) || |
3051 | ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ))) | 3050 | ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ))) |
3052 | return 1; | 3051 | return 1; |
3053 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ | 3052 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ |
3054 | return 0; | 3053 | return 0; |
@@ -3077,7 +3076,7 @@ static void rcu_bind_gp_kthread(void) | |||
3077 | static void rcu_dynticks_task_enter(void) | 3076 | static void rcu_dynticks_task_enter(void) |
3078 | { | 3077 | { |
3079 | #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) | 3078 | #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) |
3080 | ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id(); | 3079 | WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); |
3081 | #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ | 3080 | #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ |
3082 | } | 3081 | } |
3083 | 3082 | ||
@@ -3085,6 +3084,6 @@ static void rcu_dynticks_task_enter(void) | |||
3085 | static void rcu_dynticks_task_exit(void) | 3084 | static void rcu_dynticks_task_exit(void) |
3086 | { | 3085 | { |
3087 | #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) | 3086 | #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) |
3088 | ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1; | 3087 | WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); |
3089 | #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ | 3088 | #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ |
3090 | } | 3089 | } |
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index f92361efd0f5..3ea7ffc7d5c4 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c | |||
@@ -277,7 +277,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
277 | seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", | 277 | seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", |
278 | rsp->n_force_qs, rsp->n_force_qs_ngp, | 278 | rsp->n_force_qs, rsp->n_force_qs_ngp, |
279 | rsp->n_force_qs - rsp->n_force_qs_ngp, | 279 | rsp->n_force_qs - rsp->n_force_qs_ngp, |
280 | ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen); | 280 | READ_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen); |
281 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { | 281 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { |
282 | if (rnp->level != level) { | 282 | if (rnp->level != level) { |
283 | seq_puts(m, "\n"); | 283 | seq_puts(m, "\n"); |
@@ -323,8 +323,8 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp) | |||
323 | struct rcu_node *rnp = &rsp->node[0]; | 323 | struct rcu_node *rnp = &rsp->node[0]; |
324 | 324 | ||
325 | raw_spin_lock_irqsave(&rnp->lock, flags); | 325 | raw_spin_lock_irqsave(&rnp->lock, flags); |
326 | completed = ACCESS_ONCE(rsp->completed); | 326 | completed = READ_ONCE(rsp->completed); |
327 | gpnum = ACCESS_ONCE(rsp->gpnum); | 327 | gpnum = READ_ONCE(rsp->gpnum); |
328 | if (completed == gpnum) | 328 | if (completed == gpnum) |
329 | gpage = 0; | 329 | gpage = 0; |
330 | else | 330 | else |
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 1f133350da01..afaecb7a799a 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c | |||
@@ -150,14 +150,14 @@ void __rcu_read_unlock(void) | |||
150 | barrier(); /* critical section before exit code. */ | 150 | barrier(); /* critical section before exit code. */ |
151 | t->rcu_read_lock_nesting = INT_MIN; | 151 | t->rcu_read_lock_nesting = INT_MIN; |
152 | barrier(); /* assign before ->rcu_read_unlock_special load */ | 152 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
153 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s))) | 153 | if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) |
154 | rcu_read_unlock_special(t); | 154 | rcu_read_unlock_special(t); |
155 | barrier(); /* ->rcu_read_unlock_special load before assign */ | 155 | barrier(); /* ->rcu_read_unlock_special load before assign */ |
156 | t->rcu_read_lock_nesting = 0; | 156 | t->rcu_read_lock_nesting = 0; |
157 | } | 157 | } |
158 | #ifdef CONFIG_PROVE_LOCKING | 158 | #ifdef CONFIG_PROVE_LOCKING |
159 | { | 159 | { |
160 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | 160 | int rrln = READ_ONCE(t->rcu_read_lock_nesting); |
161 | 161 | ||
162 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | 162 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); |
163 | } | 163 | } |
@@ -389,17 +389,17 @@ module_param(rcu_cpu_stall_timeout, int, 0644); | |||
389 | 389 | ||
390 | int rcu_jiffies_till_stall_check(void) | 390 | int rcu_jiffies_till_stall_check(void) |
391 | { | 391 | { |
392 | int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); | 392 | int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); |
393 | 393 | ||
394 | /* | 394 | /* |
395 | * Limit check must be consistent with the Kconfig limits | 395 | * Limit check must be consistent with the Kconfig limits |
396 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. | 396 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. |
397 | */ | 397 | */ |
398 | if (till_stall_check < 3) { | 398 | if (till_stall_check < 3) { |
399 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; | 399 | WRITE_ONCE(rcu_cpu_stall_timeout, 3); |
400 | till_stall_check = 3; | 400 | till_stall_check = 3; |
401 | } else if (till_stall_check > 300) { | 401 | } else if (till_stall_check > 300) { |
402 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; | 402 | WRITE_ONCE(rcu_cpu_stall_timeout, 300); |
403 | till_stall_check = 300; | 403 | till_stall_check = 300; |
404 | } | 404 | } |
405 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; | 405 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; |
@@ -550,12 +550,12 @@ static void check_holdout_task(struct task_struct *t, | |||
550 | { | 550 | { |
551 | int cpu; | 551 | int cpu; |
552 | 552 | ||
553 | if (!ACCESS_ONCE(t->rcu_tasks_holdout) || | 553 | if (!READ_ONCE(t->rcu_tasks_holdout) || |
554 | t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) || | 554 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || |
555 | !ACCESS_ONCE(t->on_rq) || | 555 | !READ_ONCE(t->on_rq) || |
556 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && | 556 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && |
557 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { | 557 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { |
558 | ACCESS_ONCE(t->rcu_tasks_holdout) = false; | 558 | WRITE_ONCE(t->rcu_tasks_holdout, false); |
559 | list_del_init(&t->rcu_tasks_holdout_list); | 559 | list_del_init(&t->rcu_tasks_holdout_list); |
560 | put_task_struct(t); | 560 | put_task_struct(t); |
561 | return; | 561 | return; |
@@ -639,11 +639,11 @@ static int __noreturn rcu_tasks_kthread(void *arg) | |||
639 | */ | 639 | */ |
640 | rcu_read_lock(); | 640 | rcu_read_lock(); |
641 | for_each_process_thread(g, t) { | 641 | for_each_process_thread(g, t) { |
642 | if (t != current && ACCESS_ONCE(t->on_rq) && | 642 | if (t != current && READ_ONCE(t->on_rq) && |
643 | !is_idle_task(t)) { | 643 | !is_idle_task(t)) { |
644 | get_task_struct(t); | 644 | get_task_struct(t); |
645 | t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw); | 645 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); |
646 | ACCESS_ONCE(t->rcu_tasks_holdout) = true; | 646 | WRITE_ONCE(t->rcu_tasks_holdout, true); |
647 | list_add(&t->rcu_tasks_holdout_list, | 647 | list_add(&t->rcu_tasks_holdout_list, |
648 | &rcu_tasks_holdouts); | 648 | &rcu_tasks_holdouts); |
649 | } | 649 | } |
@@ -672,7 +672,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) | |||
672 | struct task_struct *t1; | 672 | struct task_struct *t1; |
673 | 673 | ||
674 | schedule_timeout_interruptible(HZ); | 674 | schedule_timeout_interruptible(HZ); |
675 | rtst = ACCESS_ONCE(rcu_task_stall_timeout); | 675 | rtst = READ_ONCE(rcu_task_stall_timeout); |
676 | needreport = rtst > 0 && | 676 | needreport = rtst > 0 && |
677 | time_after(jiffies, lastreport + rtst); | 677 | time_after(jiffies, lastreport + rtst); |
678 | if (needreport) | 678 | if (needreport) |
@@ -728,7 +728,7 @@ static void rcu_spawn_tasks_kthread(void) | |||
728 | static struct task_struct *rcu_tasks_kthread_ptr; | 728 | static struct task_struct *rcu_tasks_kthread_ptr; |
729 | struct task_struct *t; | 729 | struct task_struct *t; |
730 | 730 | ||
731 | if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) { | 731 | if (READ_ONCE(rcu_tasks_kthread_ptr)) { |
732 | smp_mb(); /* Ensure caller sees full kthread. */ | 732 | smp_mb(); /* Ensure caller sees full kthread. */ |
733 | return; | 733 | return; |
734 | } | 734 | } |
@@ -740,7 +740,7 @@ static void rcu_spawn_tasks_kthread(void) | |||
740 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); | 740 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); |
741 | BUG_ON(IS_ERR(t)); | 741 | BUG_ON(IS_ERR(t)); |
742 | smp_mb(); /* Ensure others see full kthread. */ | 742 | smp_mb(); /* Ensure others see full kthread. */ |
743 | ACCESS_ONCE(rcu_tasks_kthread_ptr) = t; | 743 | WRITE_ONCE(rcu_tasks_kthread_ptr, t); |
744 | mutex_unlock(&rcu_tasks_kthread_mutex); | 744 | mutex_unlock(&rcu_tasks_kthread_mutex); |
745 | } | 745 | } |
746 | 746 | ||