diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-03-15 15:16:26 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-05-01 11:22:50 -0400 |
commit | f511fc624642f0bb8cf65aaa28979737514d4746 (patch) | |
tree | db1395c5d33da33c8ea6c82f6e5b7346cc5beb91 /kernel/rcutree_plugin.h | |
parent | 79b9a75fb703b6a2670e46b9dc495af5bc7029b3 (diff) |
rcu: Ensure that RCU_FAST_NO_HZ timers expire on correct CPU
Timers are subject to migration, which can lead to the following
system-hang scenario when CONFIG_RCU_FAST_NO_HZ=y:
1. CPU 0 executes synchronize_rcu(), which posts an RCU callback.
2. CPU 0 then goes idle. It cannot immediately invoke the callback,
but there is nothing RCU needs from ti, so it enters dyntick-idle
mode after posting a timer.
3. The timer gets migrated to CPU 1.
4. CPU 0 never wakes up, so the synchronize_rcu() never returns, so
the system hangs.
This commit fixes this problem by using mod_timer_pinned(), as suggested
by Peter Zijlstra, to ensure that the timer is actually posted on the
running CPU.
Reported-by: Dipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index ad61da79b311..d01e26df55a1 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -2110,6 +2110,8 @@ static void rcu_cleanup_after_idle(int cpu) | |||
2110 | */ | 2110 | */ |
2111 | static void rcu_prepare_for_idle(int cpu) | 2111 | static void rcu_prepare_for_idle(int cpu) |
2112 | { | 2112 | { |
2113 | struct timer_list *tp; | ||
2114 | |||
2113 | /* | 2115 | /* |
2114 | * If this is an idle re-entry, for example, due to use of | 2116 | * If this is an idle re-entry, for example, due to use of |
2115 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | 2117 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle |
@@ -2121,9 +2123,10 @@ static void rcu_prepare_for_idle(int cpu) | |||
2121 | if (!per_cpu(rcu_idle_first_pass, cpu) && | 2123 | if (!per_cpu(rcu_idle_first_pass, cpu) && |
2122 | (per_cpu(rcu_nonlazy_posted, cpu) == | 2124 | (per_cpu(rcu_nonlazy_posted, cpu) == |
2123 | per_cpu(rcu_nonlazy_posted_snap, cpu))) { | 2125 | per_cpu(rcu_nonlazy_posted_snap, cpu))) { |
2124 | if (rcu_cpu_has_callbacks(cpu)) | 2126 | if (rcu_cpu_has_callbacks(cpu)) { |
2125 | mod_timer(&per_cpu(rcu_idle_gp_timer, cpu), | 2127 | tp = &per_cpu(rcu_idle_gp_timer, cpu); |
2126 | per_cpu(rcu_idle_gp_timer_expires, cpu)); | 2128 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); |
2129 | } | ||
2127 | return; | 2130 | return; |
2128 | } | 2131 | } |
2129 | per_cpu(rcu_idle_first_pass, cpu) = 0; | 2132 | per_cpu(rcu_idle_first_pass, cpu) = 0; |
@@ -2167,8 +2170,8 @@ static void rcu_prepare_for_idle(int cpu) | |||
2167 | else | 2170 | else |
2168 | per_cpu(rcu_idle_gp_timer_expires, cpu) = | 2171 | per_cpu(rcu_idle_gp_timer_expires, cpu) = |
2169 | jiffies + RCU_IDLE_LAZY_GP_DELAY; | 2172 | jiffies + RCU_IDLE_LAZY_GP_DELAY; |
2170 | mod_timer(&per_cpu(rcu_idle_gp_timer, cpu), | 2173 | tp = &per_cpu(rcu_idle_gp_timer, cpu); |
2171 | per_cpu(rcu_idle_gp_timer_expires, cpu)); | 2174 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); |
2172 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | 2175 | per_cpu(rcu_nonlazy_posted_snap, cpu) = |
2173 | per_cpu(rcu_nonlazy_posted, cpu); | 2176 | per_cpu(rcu_nonlazy_posted, cpu); |
2174 | return; /* Nothing more to do immediately. */ | 2177 | return; /* Nothing more to do immediately. */ |