diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-10-22 17:51:49 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-11-03 22:21:01 -0500 |
commit | 62db99f4783ea34531fc344bd8c539d5d186f24d (patch) | |
tree | 71b90af82e70ea33e05455bf1e6d93cec77ea79e | |
parent | 8fa7845df539105a8962c3173c866483da74ff6d (diff) |
cpu: Avoid puts_pending overflow
A long string of get_online_cpus() with each followed by a
put_online_cpu() that fails to acquire cpu_hotplug.lock can result in
overflow of the cpu_hotplug.puts_pending counter. Although this is
perhaps improbably, a system with absolutely no CPU-hotplug operations
will have an arbitrarily long time in which this overflow could occur.
This commit therefore adds overflow checks to get_online_cpus() and
try_get_online_cpus().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
-rw-r--r-- | kernel/cpu.c | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 90a3d017b90c..5d220234b3ca 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -86,6 +86,16 @@ static struct { | |||
86 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) | 86 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) |
87 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) | 87 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) |
88 | 88 | ||
89 | static void apply_puts_pending(int max) | ||
90 | { | ||
91 | int delta; | ||
92 | |||
93 | if (atomic_read(&cpu_hotplug.puts_pending) >= max) { | ||
94 | delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); | ||
95 | cpu_hotplug.refcount -= delta; | ||
96 | } | ||
97 | } | ||
98 | |||
89 | void get_online_cpus(void) | 99 | void get_online_cpus(void) |
90 | { | 100 | { |
91 | might_sleep(); | 101 | might_sleep(); |
@@ -93,6 +103,7 @@ void get_online_cpus(void) | |||
93 | return; | 103 | return; |
94 | cpuhp_lock_acquire_read(); | 104 | cpuhp_lock_acquire_read(); |
95 | mutex_lock(&cpu_hotplug.lock); | 105 | mutex_lock(&cpu_hotplug.lock); |
106 | apply_puts_pending(65536); | ||
96 | cpu_hotplug.refcount++; | 107 | cpu_hotplug.refcount++; |
97 | mutex_unlock(&cpu_hotplug.lock); | 108 | mutex_unlock(&cpu_hotplug.lock); |
98 | } | 109 | } |
@@ -105,6 +116,7 @@ bool try_get_online_cpus(void) | |||
105 | if (!mutex_trylock(&cpu_hotplug.lock)) | 116 | if (!mutex_trylock(&cpu_hotplug.lock)) |
106 | return false; | 117 | return false; |
107 | cpuhp_lock_acquire_tryread(); | 118 | cpuhp_lock_acquire_tryread(); |
119 | apply_puts_pending(65536); | ||
108 | cpu_hotplug.refcount++; | 120 | cpu_hotplug.refcount++; |
109 | mutex_unlock(&cpu_hotplug.lock); | 121 | mutex_unlock(&cpu_hotplug.lock); |
110 | return true; | 122 | return true; |
@@ -161,12 +173,7 @@ void cpu_hotplug_begin(void) | |||
161 | cpuhp_lock_acquire(); | 173 | cpuhp_lock_acquire(); |
162 | for (;;) { | 174 | for (;;) { |
163 | mutex_lock(&cpu_hotplug.lock); | 175 | mutex_lock(&cpu_hotplug.lock); |
164 | if (atomic_read(&cpu_hotplug.puts_pending)) { | 176 | apply_puts_pending(1); |
165 | int delta; | ||
166 | |||
167 | delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); | ||
168 | cpu_hotplug.refcount -= delta; | ||
169 | } | ||
170 | if (likely(!cpu_hotplug.refcount)) | 177 | if (likely(!cpu_hotplug.refcount)) |
171 | break; | 178 | break; |
172 | __set_current_state(TASK_UNINTERRUPTIBLE); | 179 | __set_current_state(TASK_UNINTERRUPTIBLE); |