aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-05-09 18:44:42 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-07-02 15:34:22 -0400
commit1d1fb395f6dbc07b36285bbedcf01a73b57f7cb5 (patch)
tree450c8e5e2d69c6e8b6fd7820d6a7dc112d60325b /kernel/rcutree.c
parent3f5d3ea64f1783f0d4ea0d35890ae3297f045a8b (diff)
rcu: Add ACCESS_ONCE() to ->qlen accesses
The _rcu_barrier() function accesses other CPUs' rcu_data structure's ->qlen field without benefit of locking. This commit therefore adds the required ACCESS_ONCE() wrappers around accesses and updates that need it. ACCESS_ONCE() is not needed when a CPU accesses its own ->qlen, or in code that cannot run while _rcu_barrier() is sampling ->qlen fields. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 81e0394e46af..89addada3e3a 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1350,7 +1350,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1350 rsp->qlen += rdp->qlen; 1350 rsp->qlen += rdp->qlen;
1351 rdp->n_cbs_orphaned += rdp->qlen; 1351 rdp->n_cbs_orphaned += rdp->qlen;
1352 rdp->qlen_lazy = 0; 1352 rdp->qlen_lazy = 0;
1353 rdp->qlen = 0; 1353 ACCESS_ONCE(rdp->qlen) = 0;
1354 } 1354 }
1355 1355
1356 /* 1356 /*
@@ -1600,7 +1600,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1600 } 1600 }
1601 smp_mb(); /* List handling before counting for rcu_barrier(). */ 1601 smp_mb(); /* List handling before counting for rcu_barrier(). */
1602 rdp->qlen_lazy -= count_lazy; 1602 rdp->qlen_lazy -= count_lazy;
1603 rdp->qlen -= count; 1603 ACCESS_ONCE(rdp->qlen) -= count;
1604 rdp->n_cbs_invoked += count; 1604 rdp->n_cbs_invoked += count;
1605 1605
1606 /* Reinstate batch limit if we have worked down the excess. */ 1606 /* Reinstate batch limit if we have worked down the excess. */
@@ -1889,7 +1889,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1889 rdp = this_cpu_ptr(rsp->rda); 1889 rdp = this_cpu_ptr(rsp->rda);
1890 1890
1891 /* Add the callback to our list. */ 1891 /* Add the callback to our list. */
1892 rdp->qlen++; 1892 ACCESS_ONCE(rdp->qlen)++;
1893 if (lazy) 1893 if (lazy)
1894 rdp->qlen_lazy++; 1894 rdp->qlen_lazy++;
1895 else 1895 else
@@ -2423,7 +2423,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2423 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 2423 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
2424 init_callback_list(rdp); 2424 init_callback_list(rdp);
2425 rdp->qlen_lazy = 0; 2425 rdp->qlen_lazy = 0;
2426 rdp->qlen = 0; 2426 ACCESS_ONCE(rdp->qlen) = 0;
2427 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 2427 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2428 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 2428 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
2429 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 2429 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);