diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-08-01 09:22:11 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-09-29 00:38:35 -0400 |
commit | 6206ab9bab620fc0fbbed30ce20d145b0b3d1840 (patch) | |
tree | b57912ba9fff6f2deba89c0178d3122bbd1fda6e | |
parent | 6846c0c54074d47927c90eab4a805115e1ae3292 (diff) |
rcu: Move __rcu_read_unlock()'s barrier() within if-statement
We only need to constrain the compiler if we are actually exiting
the top-level RCU read-side critical section. This commit therefore
moves the first barrier() cal in __rcu_read_unlock() to inside the
"if" statement, thus avoiding needless register flushes for inner
rcu_read_unlock() calls.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | include/linux/rcupdate.h | 14 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 2 |
2 files changed, 3 insertions, 13 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index af186e260c43..2cf4226ade7e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -134,16 +134,6 @@ extern void call_rcu_sched(struct rcu_head *head, | |||
134 | 134 | ||
135 | extern void synchronize_sched(void); | 135 | extern void synchronize_sched(void); |
136 | 136 | ||
137 | static inline void __rcu_read_lock_bh(void) | ||
138 | { | ||
139 | local_bh_disable(); | ||
140 | } | ||
141 | |||
142 | static inline void __rcu_read_unlock_bh(void) | ||
143 | { | ||
144 | local_bh_enable(); | ||
145 | } | ||
146 | |||
147 | #ifdef CONFIG_PREEMPT_RCU | 137 | #ifdef CONFIG_PREEMPT_RCU |
148 | 138 | ||
149 | extern void __rcu_read_lock(void); | 139 | extern void __rcu_read_lock(void); |
@@ -686,7 +676,7 @@ static inline void rcu_read_unlock(void) | |||
686 | */ | 676 | */ |
687 | static inline void rcu_read_lock_bh(void) | 677 | static inline void rcu_read_lock_bh(void) |
688 | { | 678 | { |
689 | __rcu_read_lock_bh(); | 679 | local_bh_disable(); |
690 | __acquire(RCU_BH); | 680 | __acquire(RCU_BH); |
691 | rcu_read_acquire_bh(); | 681 | rcu_read_acquire_bh(); |
692 | } | 682 | } |
@@ -700,7 +690,7 @@ static inline void rcu_read_unlock_bh(void) | |||
700 | { | 690 | { |
701 | rcu_read_release_bh(); | 691 | rcu_read_release_bh(); |
702 | __release(RCU_BH); | 692 | __release(RCU_BH); |
703 | __rcu_read_unlock_bh(); | 693 | local_bh_enable(); |
704 | } | 694 | } |
705 | 695 | ||
706 | /** | 696 | /** |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 4bac5a29fb69..ed70f6bf4c31 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -415,10 +415,10 @@ void __rcu_read_unlock(void) | |||
415 | { | 415 | { |
416 | struct task_struct *t = current; | 416 | struct task_struct *t = current; |
417 | 417 | ||
418 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | ||
419 | if (t->rcu_read_lock_nesting != 1) | 418 | if (t->rcu_read_lock_nesting != 1) |
420 | --t->rcu_read_lock_nesting; | 419 | --t->rcu_read_lock_nesting; |
421 | else { | 420 | else { |
421 | barrier(); /* critical section before exit code. */ | ||
422 | t->rcu_read_lock_nesting = INT_MIN; | 422 | t->rcu_read_lock_nesting = INT_MIN; |
423 | barrier(); /* assign before ->rcu_read_unlock_special load */ | 423 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
424 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | 424 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |