aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-10-30 02:37:37 -0400
committerIngo Molnar <mingo@kernel.org>2014-10-30 02:37:37 -0400
commit21ee24bf5b43ecaeec43a7d5c61edb3cd7f847bf (patch)
treef240ce442d7ced99390761d267f37815883827f7 /kernel/cpu.c
parent5631b8fba640a4ab2f8a954f63a603fa34eda96b (diff)
parentd7e29933969e5ca7c112ce1368a07911f4485dc2 (diff)
Merge branch 'urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/urgent
Pull two RCU fixes from Paul E. McKenney: " - Complete the work of commit dd56af42bd82 (rcu: Eliminate deadlock between CPU hotplug and expedited grace periods), which was intended to allow synchronize_sched_expedited() to be safely used when holding locks acquired by CPU-hotplug notifiers. This commit makes the put_online_cpus() avoid the deadlock instead of just handling the get_online_cpus(). - Complete the work of commit 35ce7f29a44a (rcu: Create rcuo kthreads only for onlined CPUs), which was intended to allow RCU to avoid allocating unneeded kthreads on systems where the firmware says that there are more CPUs than are really present. This commit makes rcu_barrier() aware of the mismatch, so that it doesn't hang waiting for non-existent CPUs. " Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 356450f09c1f..90a3d017b90c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -64,6 +64,8 @@ static struct {
64 * an ongoing cpu hotplug operation. 64 * an ongoing cpu hotplug operation.
65 */ 65 */
66 int refcount; 66 int refcount;
67 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
67 69
68#ifdef CONFIG_DEBUG_LOCK_ALLOC 70#ifdef CONFIG_DEBUG_LOCK_ALLOC
69 struct lockdep_map dep_map; 71 struct lockdep_map dep_map;
@@ -113,7 +115,11 @@ void put_online_cpus(void)
113{ 115{
114 if (cpu_hotplug.active_writer == current) 116 if (cpu_hotplug.active_writer == current)
115 return; 117 return;
116 mutex_lock(&cpu_hotplug.lock); 118 if (!mutex_trylock(&cpu_hotplug.lock)) {
119 atomic_inc(&cpu_hotplug.puts_pending);
120 cpuhp_lock_release();
121 return;
122 }
117 123
118 if (WARN_ON(!cpu_hotplug.refcount)) 124 if (WARN_ON(!cpu_hotplug.refcount))
119 cpu_hotplug.refcount++; /* try to fix things up */ 125 cpu_hotplug.refcount++; /* try to fix things up */
@@ -155,6 +161,12 @@ void cpu_hotplug_begin(void)
155 cpuhp_lock_acquire(); 161 cpuhp_lock_acquire();
156 for (;;) { 162 for (;;) {
157 mutex_lock(&cpu_hotplug.lock); 163 mutex_lock(&cpu_hotplug.lock);
164 if (atomic_read(&cpu_hotplug.puts_pending)) {
165 int delta;
166
167 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
168 cpu_hotplug.refcount -= delta;
169 }
158 if (likely(!cpu_hotplug.refcount)) 170 if (likely(!cpu_hotplug.refcount))
159 break; 171 break;
160 __set_current_state(TASK_UNINTERRUPTIBLE); 172 __set_current_state(TASK_UNINTERRUPTIBLE);