diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-09-13 20:20:11 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-09-25 09:45:06 -0400 |
commit | 5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420 (patch) | |
tree | 5dd725412d2bccbdf0bcee0ac8fefc1f1a3d18c8 /kernel | |
parent | f9ffc31ebd38d2d74dbfe9f0b67274e99ad668f5 (diff) |
rcu: Consistent rcu_is_watching() naming
The old rcu_is_cpu_idle() function is just __rcu_is_watching() with
preemption disabled. This commit therefore renames rcu_is_cpu_idle()
to rcu_is_watching.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 4 | ||||
-rw-r--r-- | kernel/rcupdate.c | 2 | ||||
-rw-r--r-- | kernel/rcutiny.c | 6 | ||||
-rw-r--r-- | kernel/rcutree.c | 36 |
4 files changed, 24 insertions, 24 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e16c45b9ee77..4e8e14c34e42 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
4224 | printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", | 4224 | printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", |
4225 | !rcu_lockdep_current_cpu_online() | 4225 | !rcu_lockdep_current_cpu_online() |
4226 | ? "RCU used illegally from offline CPU!\n" | 4226 | ? "RCU used illegally from offline CPU!\n" |
4227 | : rcu_is_cpu_idle() | 4227 | : !rcu_is_watching() |
4228 | ? "RCU used illegally from idle CPU!\n" | 4228 | ? "RCU used illegally from idle CPU!\n" |
4229 | : "", | 4229 | : "", |
4230 | rcu_scheduler_active, debug_locks); | 4230 | rcu_scheduler_active, debug_locks); |
@@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
4247 | * So complain bitterly if someone does call rcu_read_lock(), | 4247 | * So complain bitterly if someone does call rcu_read_lock(), |
4248 | * rcu_read_lock_bh() and so on from extended quiescent states. | 4248 | * rcu_read_lock_bh() and so on from extended quiescent states. |
4249 | */ | 4249 | */ |
4250 | if (rcu_is_cpu_idle()) | 4250 | if (!rcu_is_watching()) |
4251 | printk("RCU used illegally from extended quiescent state!\n"); | 4251 | printk("RCU used illegally from extended quiescent state!\n"); |
4252 | 4252 | ||
4253 | lockdep_print_held_locks(curr); | 4253 | lockdep_print_held_locks(curr); |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index b02a339836b4..3b3c0464d1eb 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -148,7 +148,7 @@ int rcu_read_lock_bh_held(void) | |||
148 | { | 148 | { |
149 | if (!debug_lockdep_rcu_enabled()) | 149 | if (!debug_lockdep_rcu_enabled()) |
150 | return 1; | 150 | return 1; |
151 | if (rcu_is_cpu_idle()) | 151 | if (!rcu_is_watching()) |
152 | return 0; | 152 | return 0; |
153 | if (!rcu_lockdep_current_cpu_online()) | 153 | if (!rcu_lockdep_current_cpu_online()) |
154 | return 0; | 154 | return 0; |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index b4bc61874d77..0fa061dfa55d 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -179,11 +179,11 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter); | |||
179 | /* | 179 | /* |
180 | * Test whether RCU thinks that the current CPU is idle. | 180 | * Test whether RCU thinks that the current CPU is idle. |
181 | */ | 181 | */ |
182 | int rcu_is_cpu_idle(void) | 182 | bool __rcu_is_watching(void) |
183 | { | 183 | { |
184 | return !rcu_dynticks_nesting; | 184 | return rcu_dynticks_nesting; |
185 | } | 185 | } |
186 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 186 | EXPORT_SYMBOL(__rcu_is_watching); |
187 | 187 | ||
188 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | 188 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
189 | 189 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1b123e179d71..981d0c15a389 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -655,34 +655,34 @@ void rcu_nmi_exit(void) | |||
655 | } | 655 | } |
656 | 656 | ||
657 | /** | 657 | /** |
658 | * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle | 658 | * __rcu_is_watching - are RCU read-side critical sections safe? |
659 | * | ||
660 | * Return true if RCU is watching the running CPU, which means that | ||
661 | * this CPU can safely enter RCU read-side critical sections. Unlike | ||
662 | * rcu_is_watching(), the caller of __rcu_is_watching() must have at | ||
663 | * least disabled preemption. | ||
664 | */ | ||
665 | bool __rcu_is_watching(void) | ||
666 | { | ||
667 | return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; | ||
668 | } | ||
669 | |||
670 | /** | ||
671 | * rcu_is_watching - see if RCU thinks that the current CPU is idle | ||
659 | * | 672 | * |
660 | * If the current CPU is in its idle loop and is neither in an interrupt | 673 | * If the current CPU is in its idle loop and is neither in an interrupt |
661 | * or NMI handler, return true. | 674 | * or NMI handler, return true. |
662 | */ | 675 | */ |
663 | int rcu_is_cpu_idle(void) | 676 | bool rcu_is_watching(void) |
664 | { | 677 | { |
665 | int ret; | 678 | int ret; |
666 | 679 | ||
667 | preempt_disable(); | 680 | preempt_disable(); |
668 | ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; | 681 | ret = __rcu_is_watching(); |
669 | preempt_enable(); | 682 | preempt_enable(); |
670 | return ret; | 683 | return ret; |
671 | } | 684 | } |
672 | EXPORT_SYMBOL_GPL(rcu_is_cpu_idle); | 685 | EXPORT_SYMBOL_GPL(rcu_is_watching); |
673 | |||
674 | /** | ||
675 | * __rcu_is_watching - are RCU read-side critical sections safe? | ||
676 | * | ||
677 | * Return true if RCU is watching the running CPU, which means that | ||
678 | * this CPU can safely enter RCU read-side critical sections. Unlike | ||
679 | * rcu_is_cpu_idle(), the caller of __rcu_is_watching() must have at | ||
680 | * least disabled preemption. | ||
681 | */ | ||
682 | bool __rcu_is_watching(void) | ||
683 | { | ||
684 | return !!(atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1); | ||
685 | } | ||
686 | 686 | ||
687 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) | 687 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) |
688 | 688 | ||
@@ -2268,7 +2268,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, | |||
2268 | * If called from an extended quiescent state, invoke the RCU | 2268 | * If called from an extended quiescent state, invoke the RCU |
2269 | * core in order to force a re-evaluation of RCU's idleness. | 2269 | * core in order to force a re-evaluation of RCU's idleness. |
2270 | */ | 2270 | */ |
2271 | if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) | 2271 | if (!rcu_is_watching() && cpu_online(smp_processor_id())) |
2272 | invoke_rcu_core(); | 2272 | invoke_rcu_core(); |
2273 | 2273 | ||
2274 | /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ | 2274 | /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ |