aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2013-11-05 16:03:17 -0500
committerSteven Rostedt <rostedt@goodmis.org>2013-11-05 16:03:17 -0500
commit44847da1b921f35b348ecd46df93abfbcd547355 (patch)
tree0dcc929a02b98dcf0927b9ad70083087c6d0e4ae /kernel/rcutree.c
parentab4ead02ec235d706d0611d8741964628291237e (diff)
parent5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420 (diff)
Merge branch 'idle.2013.09.25a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into HEAD
Need to use Paul McKenney's "rcu_is_watching()" changes to fix a perf/ftrace bug.
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 32618b3fe4e6..981d0c15a389 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -655,21 +655,34 @@ void rcu_nmi_exit(void)
655} 655}
656 656
657/** 657/**
658 * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle 658 * __rcu_is_watching - are RCU read-side critical sections safe?
659 *
660 * Return true if RCU is watching the running CPU, which means that
661 * this CPU can safely enter RCU read-side critical sections. Unlike
662 * rcu_is_watching(), the caller of __rcu_is_watching() must have at
663 * least disabled preemption.
664 */
665bool __rcu_is_watching(void)
666{
667 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
668}
669
670/**
671 * rcu_is_watching - see if RCU thinks that the current CPU is idle
659 * 672 *
660 * If the current CPU is in its idle loop and is neither in an interrupt 673 * If the current CPU is in its idle loop and is neither in an interrupt
661 * or NMI handler, return true. 674 * or NMI handler, return true.
662 */ 675 */
663int rcu_is_cpu_idle(void) 676bool rcu_is_watching(void)
664{ 677{
665 int ret; 678 int ret;
666 679
667 preempt_disable(); 680 preempt_disable();
668 ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; 681 ret = __rcu_is_watching();
669 preempt_enable(); 682 preempt_enable();
670 return ret; 683 return ret;
671} 684}
672EXPORT_SYMBOL(rcu_is_cpu_idle); 685EXPORT_SYMBOL_GPL(rcu_is_watching);
673 686
674#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 687#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
675 688
@@ -2255,7 +2268,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2255 * If called from an extended quiescent state, invoke the RCU 2268 * If called from an extended quiescent state, invoke the RCU
2256 * core in order to force a re-evaluation of RCU's idleness. 2269 * core in order to force a re-evaluation of RCU's idleness.
2257 */ 2270 */
2258 if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) 2271 if (!rcu_is_watching() && cpu_online(smp_processor_id()))
2259 invoke_rcu_core(); 2272 invoke_rcu_core();
2260 2273
2261 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2274 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */