diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-21 14:58:36 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 15:34:23 -0400 |
commit | 2a3fa843b555d202e682bf08c65ee1a4a93c79cf (patch) | |
tree | b7db689ab83f041319b80493b12a8f7ecc79ad0f /kernel/rcutiny_plugin.h | |
parent | e9023c4061054cbf59c5288068118a4d0b152f01 (diff) |
rcu: Consolidate tree/tiny __rcu_read_{,un}lock() implementations
The CONFIG_TREE_PREEMPT_RCU and CONFIG_TINY_PREEMPT_RCU versions of
__rcu_read_lock() and __rcu_read_unlock() are identical, so this commit
consolidates them into kernel/rcupdate.h.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutiny_plugin.h')
-rw-r--r-- | kernel/rcutiny_plugin.h | 47 |
1 files changed, 1 insertions, 46 deletions
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index fc31a2d65100..a269b0da0eb6 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | |||
132 | RCU_TRACE(.rcb.name = "rcu_preempt") | 132 | RCU_TRACE(.rcb.name = "rcu_preempt") |
133 | }; | 133 | }; |
134 | 134 | ||
135 | static void rcu_read_unlock_special(struct task_struct *t); | ||
136 | static int rcu_preempted_readers_exp(void); | 135 | static int rcu_preempted_readers_exp(void); |
137 | static void rcu_report_exp_done(void); | 136 | static void rcu_report_exp_done(void); |
138 | 137 | ||
@@ -527,23 +526,11 @@ void rcu_preempt_note_context_switch(void) | |||
527 | } | 526 | } |
528 | 527 | ||
529 | /* | 528 | /* |
530 | * Tiny-preemptible RCU implementation for rcu_read_lock(). | ||
531 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
532 | * if we block. | ||
533 | */ | ||
534 | void __rcu_read_lock(void) | ||
535 | { | ||
536 | current->rcu_read_lock_nesting++; | ||
537 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ | ||
538 | } | ||
539 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
540 | |||
541 | /* | ||
542 | * Handle special cases during rcu_read_unlock(), such as needing to | 529 | * Handle special cases during rcu_read_unlock(), such as needing to |
543 | * notify RCU core processing or task having blocked during the RCU | 530 | * notify RCU core processing or task having blocked during the RCU |
544 | * read-side critical section. | 531 | * read-side critical section. |
545 | */ | 532 | */ |
546 | static noinline void rcu_read_unlock_special(struct task_struct *t) | 533 | void rcu_read_unlock_special(struct task_struct *t) |
547 | { | 534 | { |
548 | int empty; | 535 | int empty; |
549 | int empty_exp; | 536 | int empty_exp; |
@@ -627,38 +614,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
627 | } | 614 | } |
628 | 615 | ||
629 | /* | 616 | /* |
630 | * Tiny-preemptible RCU implementation for rcu_read_unlock(). | ||
631 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
632 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
633 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
634 | * in an RCU read-side critical section and other special cases. | ||
635 | */ | ||
636 | void __rcu_read_unlock(void) | ||
637 | { | ||
638 | struct task_struct *t = current; | ||
639 | |||
640 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ | ||
641 | if (t->rcu_read_lock_nesting != 1) | ||
642 | --t->rcu_read_lock_nesting; | ||
643 | else { | ||
644 | t->rcu_read_lock_nesting = INT_MIN; | ||
645 | barrier(); /* assign before ->rcu_read_unlock_special load */ | ||
646 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
647 | rcu_read_unlock_special(t); | ||
648 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
649 | t->rcu_read_lock_nesting = 0; | ||
650 | } | ||
651 | #ifdef CONFIG_PROVE_LOCKING | ||
652 | { | ||
653 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
654 | |||
655 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
656 | } | ||
657 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
658 | } | ||
659 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
660 | |||
661 | /* | ||
662 | * Check for a quiescent state from the current CPU. When a task blocks, | 617 | * Check for a quiescent state from the current CPU. When a task blocks, |
663 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is | 618 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is |
664 | * checked elsewhere. This is called from the scheduling-clock interrupt. | 619 | * checked elsewhere. This is called from the scheduling-clock interrupt. |