diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-21 14:58:36 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 15:34:23 -0400 |
commit | 2a3fa843b555d202e682bf08c65ee1a4a93c79cf (patch) | |
tree | b7db689ab83f041319b80493b12a8f7ecc79ad0f /kernel/rcutree_plugin.h | |
parent | e9023c4061054cbf59c5288068118a4d0b152f01 (diff) |
rcu: Consolidate tree/tiny __rcu_read_{,un}lock() implementations
The CONFIG_TREE_PREEMPT_RCU and CONFIG_TINY_PREEMPT_RCU versions of
__rcu_read_lock() and __rcu_read_unlock() are identical, so this commit
consolidates them into kernel/rcupdate.h.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 47 |
1 files changed, 1 insertions, 46 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 3e4899459f3d..4b6b17cdf66b 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -78,7 +78,6 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); | |||
78 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 78 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
79 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 79 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
80 | 80 | ||
81 | static void rcu_read_unlock_special(struct task_struct *t); | ||
82 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 81 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
83 | 82 | ||
84 | /* | 83 | /* |
@@ -233,18 +232,6 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
233 | } | 232 | } |
234 | 233 | ||
235 | /* | 234 | /* |
236 | * Tree-preemptible RCU implementation for rcu_read_lock(). | ||
237 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
238 | * if we block. | ||
239 | */ | ||
240 | void __rcu_read_lock(void) | ||
241 | { | ||
242 | current->rcu_read_lock_nesting++; | ||
243 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | ||
244 | } | ||
245 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
246 | |||
247 | /* | ||
248 | * Check for preempted RCU readers blocking the current grace period | 235 | * Check for preempted RCU readers blocking the current grace period |
249 | * for the specified rcu_node structure. If the caller needs a reliable | 236 | * for the specified rcu_node structure. If the caller needs a reliable |
250 | * answer, it must hold the rcu_node's ->lock. | 237 | * answer, it must hold the rcu_node's ->lock. |
@@ -310,7 +297,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, | |||
310 | * notify RCU core processing or task having blocked during the RCU | 297 | * notify RCU core processing or task having blocked during the RCU |
311 | * read-side critical section. | 298 | * read-side critical section. |
312 | */ | 299 | */ |
313 | static noinline void rcu_read_unlock_special(struct task_struct *t) | 300 | void rcu_read_unlock_special(struct task_struct *t) |
314 | { | 301 | { |
315 | int empty; | 302 | int empty; |
316 | int empty_exp; | 303 | int empty_exp; |
@@ -418,38 +405,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
418 | } | 405 | } |
419 | } | 406 | } |
420 | 407 | ||
421 | /* | ||
422 | * Tree-preemptible RCU implementation for rcu_read_unlock(). | ||
423 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
424 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
425 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
426 | * in an RCU read-side critical section and other special cases. | ||
427 | */ | ||
428 | void __rcu_read_unlock(void) | ||
429 | { | ||
430 | struct task_struct *t = current; | ||
431 | |||
432 | if (t->rcu_read_lock_nesting != 1) | ||
433 | --t->rcu_read_lock_nesting; | ||
434 | else { | ||
435 | barrier(); /* critical section before exit code. */ | ||
436 | t->rcu_read_lock_nesting = INT_MIN; | ||
437 | barrier(); /* assign before ->rcu_read_unlock_special load */ | ||
438 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
439 | rcu_read_unlock_special(t); | ||
440 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
441 | t->rcu_read_lock_nesting = 0; | ||
442 | } | ||
443 | #ifdef CONFIG_PROVE_LOCKING | ||
444 | { | ||
445 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
446 | |||
447 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
448 | } | ||
449 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
452 | |||
453 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE | 408 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
454 | 409 | ||
455 | /* | 410 | /* |