diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-10-22 13:00:05 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-10-23 10:51:17 -0400 |
commit | b2c4623dcd07af4b8ae3b56ae5f879e281c7b4f8 (patch) | |
tree | 8a0aaf80f950011fafb6743d42457a22412ae6dd | |
parent | f114040e3ea6e07372334ade75d1ee0775c355e1 (diff) |
rcu: More on deadlock between CPU hotplug and expedited grace periods
Commit dd56af42bd82 (rcu: Eliminate deadlock between CPU hotplug and
expedited grace periods) was incomplete. Although it did eliminate
deadlocks involving synchronize_sched_expedited()'s acquisition of
cpu_hotplug.lock via get_online_cpus(), it did nothing about the similar
deadlock involving acquisition of this same lock via put_online_cpus().
This deadlock became apparent with testing involving hibernation.
This commit therefore changes put_online_cpus() acquisition of this lock
to be conditional, and increments a new cpu_hotplug.puts_pending field
in case of acquisition failure. Then cpu_hotplug_begin() checks for this
new field being non-zero, and applies any changes to cpu_hotplug.refcount.
Reported-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Tested-by: Borislav Petkov <bp@suse.de>
-rw-r--r-- | kernel/cpu.c | 14 |
1 files changed, 13 insertions, 1 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 356450f09c1f..90a3d017b90c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -64,6 +64,8 @@ static struct { | |||
64 | * an ongoing cpu hotplug operation. | 64 | * an ongoing cpu hotplug operation. |
65 | */ | 65 | */ |
66 | int refcount; | 66 | int refcount; |
67 | /* And allows lockless put_online_cpus(). */ | ||
68 | atomic_t puts_pending; | ||
67 | 69 | ||
68 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
69 | struct lockdep_map dep_map; | 71 | struct lockdep_map dep_map; |
@@ -113,7 +115,11 @@ void put_online_cpus(void) | |||
113 | { | 115 | { |
114 | if (cpu_hotplug.active_writer == current) | 116 | if (cpu_hotplug.active_writer == current) |
115 | return; | 117 | return; |
116 | mutex_lock(&cpu_hotplug.lock); | 118 | if (!mutex_trylock(&cpu_hotplug.lock)) { |
119 | atomic_inc(&cpu_hotplug.puts_pending); | ||
120 | cpuhp_lock_release(); | ||
121 | return; | ||
122 | } | ||
117 | 123 | ||
118 | if (WARN_ON(!cpu_hotplug.refcount)) | 124 | if (WARN_ON(!cpu_hotplug.refcount)) |
119 | cpu_hotplug.refcount++; /* try to fix things up */ | 125 | cpu_hotplug.refcount++; /* try to fix things up */ |
@@ -155,6 +161,12 @@ void cpu_hotplug_begin(void) | |||
155 | cpuhp_lock_acquire(); | 161 | cpuhp_lock_acquire(); |
156 | for (;;) { | 162 | for (;;) { |
157 | mutex_lock(&cpu_hotplug.lock); | 163 | mutex_lock(&cpu_hotplug.lock); |
164 | if (atomic_read(&cpu_hotplug.puts_pending)) { | ||
165 | int delta; | ||
166 | |||
167 | delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); | ||
168 | cpu_hotplug.refcount -= delta; | ||
169 | } | ||
158 | if (likely(!cpu_hotplug.refcount)) | 170 | if (likely(!cpu_hotplug.refcount)) |
159 | break; | 171 | break; |
160 | __set_current_state(TASK_UNINTERRUPTIBLE); | 172 | __set_current_state(TASK_UNINTERRUPTIBLE); |