diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-09-29 00:50:21 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-05 15:02:05 -0400 |
commit | 135c8aea557cf53abe6c8847e286d01442124193 (patch) | |
tree | 82d8ca15e13548749c7770e9a1a7bc6800ebefbb /kernel/rcupdate.c | |
parent | a0b6c9a78c41dc36732d6e1e90f0f2f57b29816f (diff) |
rcu: Replace the rcu_barrier enum with pointer to call_rcu*() function
The rcu_barrier enum causes several problems:
(1) you have to define the enum somewhere, and there is no
convenient place,
(2) the difference between TREE_RCU and TREE_PREEMPT_RCU causes
problems when you need to map from rcu_barrier enum to struct
rcu_state,
(3) the switch statement are large, and
(4) TINY_RCU really needs a different rcu_barrier() than do the
treercu implementations.
So replace it with a functionally equivalent but cleaner function
pointer abstraction.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <12541998232366-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r-- | kernel/rcupdate.c | 32 |
1 files changed, 10 insertions, 22 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 4a189ea18b48..e43242274466 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -53,12 +53,6 @@ struct lockdep_map rcu_lock_map = | |||
53 | EXPORT_SYMBOL_GPL(rcu_lock_map); | 53 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | enum rcu_barrier { | ||
57 | RCU_BARRIER_STD, | ||
58 | RCU_BARRIER_BH, | ||
59 | RCU_BARRIER_SCHED, | ||
60 | }; | ||
61 | |||
62 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | 56 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; |
63 | static atomic_t rcu_barrier_cpu_count; | 57 | static atomic_t rcu_barrier_cpu_count; |
64 | static DEFINE_MUTEX(rcu_barrier_mutex); | 58 | static DEFINE_MUTEX(rcu_barrier_mutex); |
@@ -184,19 +178,12 @@ static void rcu_barrier_func(void *type) | |||
184 | { | 178 | { |
185 | int cpu = smp_processor_id(); | 179 | int cpu = smp_processor_id(); |
186 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | 180 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); |
181 | void (*call_rcu_func)(struct rcu_head *head, | ||
182 | void (*func)(struct rcu_head *head)); | ||
187 | 183 | ||
188 | atomic_inc(&rcu_barrier_cpu_count); | 184 | atomic_inc(&rcu_barrier_cpu_count); |
189 | switch ((enum rcu_barrier)type) { | 185 | call_rcu_func = type; |
190 | case RCU_BARRIER_STD: | 186 | call_rcu_func(head, rcu_barrier_callback); |
191 | call_rcu(head, rcu_barrier_callback); | ||
192 | break; | ||
193 | case RCU_BARRIER_BH: | ||
194 | call_rcu_bh(head, rcu_barrier_callback); | ||
195 | break; | ||
196 | case RCU_BARRIER_SCHED: | ||
197 | call_rcu_sched(head, rcu_barrier_callback); | ||
198 | break; | ||
199 | } | ||
200 | } | 187 | } |
201 | 188 | ||
202 | static inline void wait_migrated_callbacks(void) | 189 | static inline void wait_migrated_callbacks(void) |
@@ -209,7 +196,8 @@ static inline void wait_migrated_callbacks(void) | |||
209 | * Orchestrate the specified type of RCU barrier, waiting for all | 196 | * Orchestrate the specified type of RCU barrier, waiting for all |
210 | * RCU callbacks of the specified type to complete. | 197 | * RCU callbacks of the specified type to complete. |
211 | */ | 198 | */ |
212 | static void _rcu_barrier(enum rcu_barrier type) | 199 | static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head, |
200 | void (*func)(struct rcu_head *head))) | ||
213 | { | 201 | { |
214 | BUG_ON(in_interrupt()); | 202 | BUG_ON(in_interrupt()); |
215 | /* Take cpucontrol mutex to protect against CPU hotplug */ | 203 | /* Take cpucontrol mutex to protect against CPU hotplug */ |
@@ -225,7 +213,7 @@ static void _rcu_barrier(enum rcu_barrier type) | |||
225 | * early. | 213 | * early. |
226 | */ | 214 | */ |
227 | atomic_set(&rcu_barrier_cpu_count, 1); | 215 | atomic_set(&rcu_barrier_cpu_count, 1); |
228 | on_each_cpu(rcu_barrier_func, (void *)type, 1); | 216 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); |
229 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 217 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
230 | complete(&rcu_barrier_completion); | 218 | complete(&rcu_barrier_completion); |
231 | wait_for_completion(&rcu_barrier_completion); | 219 | wait_for_completion(&rcu_barrier_completion); |
@@ -238,7 +226,7 @@ static void _rcu_barrier(enum rcu_barrier type) | |||
238 | */ | 226 | */ |
239 | void rcu_barrier(void) | 227 | void rcu_barrier(void) |
240 | { | 228 | { |
241 | _rcu_barrier(RCU_BARRIER_STD); | 229 | _rcu_barrier(call_rcu); |
242 | } | 230 | } |
243 | EXPORT_SYMBOL_GPL(rcu_barrier); | 231 | EXPORT_SYMBOL_GPL(rcu_barrier); |
244 | 232 | ||
@@ -247,7 +235,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier); | |||
247 | */ | 235 | */ |
248 | void rcu_barrier_bh(void) | 236 | void rcu_barrier_bh(void) |
249 | { | 237 | { |
250 | _rcu_barrier(RCU_BARRIER_BH); | 238 | _rcu_barrier(call_rcu_bh); |
251 | } | 239 | } |
252 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | 240 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); |
253 | 241 | ||
@@ -256,7 +244,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh); | |||
256 | */ | 244 | */ |
257 | void rcu_barrier_sched(void) | 245 | void rcu_barrier_sched(void) |
258 | { | 246 | { |
259 | _rcu_barrier(RCU_BARRIER_SCHED); | 247 | _rcu_barrier(call_rcu_sched); |
260 | } | 248 | } |
261 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 249 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
262 | 250 | ||