aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-05-29 08:18:53 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-07-02 15:33:22 -0400
commit7be7f0be907224445acc62b3884c892f38b7ff40 (patch)
tree9802c524d04374e6a4e5c5b9d3a0d3c9085ef2fd /kernel
parent7db74df88b52844f4e966901e2972bba725e6766 (diff)
rcu: Move rcu_barrier_mutex to rcu_state structure
In order to allow each RCU flavor to concurrently execute its rcu_barrier() function, it is necessary to move the relevant state to the rcu_state structure. This commit therefore moves the rcu_barrier_mutex global variable to a new ->barrier_mutex field in the rcu_state structure. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c9
-rw-r--r--kernel/rcutree.h1
2 files changed, 4 insertions, 6 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index ca7d1678ac79..ff992ac24e73 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -71,6 +71,7 @@ static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
71 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \ 71 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
72 .orphan_nxttail = &sname##_state.orphan_nxtlist, \ 72 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
73 .orphan_donetail = &sname##_state.orphan_donelist, \ 73 .orphan_donetail = &sname##_state.orphan_donelist, \
74 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
74 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \ 75 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \
75 .n_force_qs = 0, \ 76 .n_force_qs = 0, \
76 .n_force_qs_ngp = 0, \ 77 .n_force_qs_ngp = 0, \
@@ -155,10 +156,6 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
155unsigned long rcutorture_testseq; 156unsigned long rcutorture_testseq;
156unsigned long rcutorture_vernum; 157unsigned long rcutorture_vernum;
157 158
158/* State information for rcu_barrier() and friends. */
159
160static DEFINE_MUTEX(rcu_barrier_mutex);
161
162/* 159/*
163 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 160 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
164 * permit this function to be invoked without holding the root rcu_node 161 * permit this function to be invoked without holding the root rcu_node
@@ -2303,7 +2300,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
2303 init_rcu_head_on_stack(&rd.barrier_head); 2300 init_rcu_head_on_stack(&rd.barrier_head);
2304 2301
2305 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 2302 /* Take mutex to serialize concurrent rcu_barrier() requests. */
2306 mutex_lock(&rcu_barrier_mutex); 2303 mutex_lock(&rsp->barrier_mutex);
2307 2304
2308 smp_mb(); /* Prevent any prior operations from leaking in. */ 2305 smp_mb(); /* Prevent any prior operations from leaking in. */
2309 2306
@@ -2380,7 +2377,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
2380 wait_for_completion(&rsp->barrier_completion); 2377 wait_for_completion(&rsp->barrier_completion);
2381 2378
2382 /* Other rcu_barrier() invocations can now safely proceed. */ 2379 /* Other rcu_barrier() invocations can now safely proceed. */
2383 mutex_unlock(&rcu_barrier_mutex); 2380 mutex_unlock(&rsp->barrier_mutex);
2384 2381
2385 destroy_rcu_head_on_stack(&rd.barrier_head); 2382 destroy_rcu_head_on_stack(&rd.barrier_head);
2386} 2383}
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index d1ca4424122b..7641aec3e59c 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -400,6 +400,7 @@ struct rcu_state {
400 struct task_struct *rcu_barrier_in_progress; 400 struct task_struct *rcu_barrier_in_progress;
401 /* Task doing rcu_barrier(), */ 401 /* Task doing rcu_barrier(), */
402 /* or NULL if no barrier. */ 402 /* or NULL if no barrier. */
403 struct mutex barrier_mutex; /* Guards barrier fields. */
403 atomic_t barrier_cpu_count; /* # CPUs waiting on. */ 404 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
404 struct completion barrier_completion; /* Wake at barrier end. */ 405 struct completion barrier_completion; /* Wake at barrier end. */
405 raw_spinlock_t fqslock; /* Only one task forcing */ 406 raw_spinlock_t fqslock; /* Only one task forcing */