diff options
author | Venki Pallipadi <venkatesh.pallipadi@intel.com> | 2008-04-10 12:49:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-10 18:38:29 -0400 |
commit | 783e391b7b5b273cd20856d8f6f4878da8ec31b3 (patch) | |
tree | 897e8db7c6eaf3763c2f1ef9aa099f2691d76400 /arch | |
parent | a1e58bbdc969c3fe60addca7f2729779d22a83c1 (diff) |
x86: Simplify cpu_idle_wait
This patch also resolves hangs on boot:
http://lkml.org/lkml/2008/2/23/263
http://bugzilla.kernel.org/show_bug.cgi?id=10093
The bug was causing once-in-few-reboots 10-15 sec wait during boot on
certain laptops.
Earlier commit 40d6a146629b98d8e322b6f9332b182c7cbff3df added
smp_call_function in cpu_idle_wait() to kick cpus that are in tickless
idle. Looking at cpu_idle_wait code at that time, code seemed to be
over-engineered for a case which is rarely used (while changing idle
handler).
Below is a simplified version of cpu_idle_wait, which just makes a dummy
smp_call_function to all cpus, to make them come out of old idle handler
and start using the new idle handler. It eliminates code in the idle
loop to handle cpu_idle_wait.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/process_32.c | 47 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 47 |
2 files changed, 22 insertions, 72 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index be3c7a299f02..43930e73f657 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -82,7 +82,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
82 | */ | 82 | */ |
83 | void (*pm_idle)(void); | 83 | void (*pm_idle)(void); |
84 | EXPORT_SYMBOL(pm_idle); | 84 | EXPORT_SYMBOL(pm_idle); |
85 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); | ||
86 | 85 | ||
87 | void disable_hlt(void) | 86 | void disable_hlt(void) |
88 | { | 87 | { |
@@ -190,9 +189,6 @@ void cpu_idle(void) | |||
190 | while (!need_resched()) { | 189 | while (!need_resched()) { |
191 | void (*idle)(void); | 190 | void (*idle)(void); |
192 | 191 | ||
193 | if (__get_cpu_var(cpu_idle_state)) | ||
194 | __get_cpu_var(cpu_idle_state) = 0; | ||
195 | |||
196 | check_pgt_cache(); | 192 | check_pgt_cache(); |
197 | rmb(); | 193 | rmb(); |
198 | idle = pm_idle; | 194 | idle = pm_idle; |
@@ -220,40 +216,19 @@ static void do_nothing(void *unused) | |||
220 | { | 216 | { |
221 | } | 217 | } |
222 | 218 | ||
219 | /* | ||
220 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | ||
221 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | ||
222 | * handler on SMP systems. | ||
223 | * | ||
224 | * Caller must have changed pm_idle to the new value before the call. Old | ||
225 | * pm_idle value will not be used by any CPU after the return of this function. | ||
226 | */ | ||
223 | void cpu_idle_wait(void) | 227 | void cpu_idle_wait(void) |
224 | { | 228 | { |
225 | unsigned int cpu, this_cpu = get_cpu(); | 229 | smp_mb(); |
226 | cpumask_t map, tmp = current->cpus_allowed; | 230 | /* kick all the CPUs so that they exit out of pm_idle */ |
227 | 231 | smp_call_function(do_nothing, NULL, 0, 1); | |
228 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | ||
229 | put_cpu(); | ||
230 | |||
231 | cpus_clear(map); | ||
232 | for_each_online_cpu(cpu) { | ||
233 | per_cpu(cpu_idle_state, cpu) = 1; | ||
234 | cpu_set(cpu, map); | ||
235 | } | ||
236 | |||
237 | __get_cpu_var(cpu_idle_state) = 0; | ||
238 | |||
239 | wmb(); | ||
240 | do { | ||
241 | ssleep(1); | ||
242 | for_each_online_cpu(cpu) { | ||
243 | if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) | ||
244 | cpu_clear(cpu, map); | ||
245 | } | ||
246 | cpus_and(map, map, cpu_online_map); | ||
247 | /* | ||
248 | * We waited 1 sec, if a CPU still did not call idle | ||
249 | * it may be because it is in idle and not waking up | ||
250 | * because it has nothing to do. | ||
251 | * Give all the remaining CPUS a kick. | ||
252 | */ | ||
253 | smp_call_function_mask(map, do_nothing, NULL, 0); | ||
254 | } while (!cpus_empty(map)); | ||
255 | |||
256 | set_cpus_allowed(current, tmp); | ||
257 | } | 232 | } |
258 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 233 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
259 | 234 | ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3baf9b9f4c87..46c4c546b499 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -63,7 +63,6 @@ EXPORT_SYMBOL(boot_option_idle_override); | |||
63 | */ | 63 | */ |
64 | void (*pm_idle)(void); | 64 | void (*pm_idle)(void); |
65 | EXPORT_SYMBOL(pm_idle); | 65 | EXPORT_SYMBOL(pm_idle); |
66 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); | ||
67 | 66 | ||
68 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); | 67 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); |
69 | 68 | ||
@@ -173,9 +172,6 @@ void cpu_idle(void) | |||
173 | while (!need_resched()) { | 172 | while (!need_resched()) { |
174 | void (*idle)(void); | 173 | void (*idle)(void); |
175 | 174 | ||
176 | if (__get_cpu_var(cpu_idle_state)) | ||
177 | __get_cpu_var(cpu_idle_state) = 0; | ||
178 | |||
179 | rmb(); | 175 | rmb(); |
180 | idle = pm_idle; | 176 | idle = pm_idle; |
181 | if (!idle) | 177 | if (!idle) |
@@ -207,40 +203,19 @@ static void do_nothing(void *unused) | |||
207 | { | 203 | { |
208 | } | 204 | } |
209 | 205 | ||
206 | /* | ||
207 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | ||
208 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | ||
209 | * handler on SMP systems. | ||
210 | * | ||
211 | * Caller must have changed pm_idle to the new value before the call. Old | ||
212 | * pm_idle value will not be used by any CPU after the return of this function. | ||
213 | */ | ||
210 | void cpu_idle_wait(void) | 214 | void cpu_idle_wait(void) |
211 | { | 215 | { |
212 | unsigned int cpu, this_cpu = get_cpu(); | 216 | smp_mb(); |
213 | cpumask_t map, tmp = current->cpus_allowed; | 217 | /* kick all the CPUs so that they exit out of pm_idle */ |
214 | 218 | smp_call_function(do_nothing, NULL, 0, 1); | |
215 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | ||
216 | put_cpu(); | ||
217 | |||
218 | cpus_clear(map); | ||
219 | for_each_online_cpu(cpu) { | ||
220 | per_cpu(cpu_idle_state, cpu) = 1; | ||
221 | cpu_set(cpu, map); | ||
222 | } | ||
223 | |||
224 | __get_cpu_var(cpu_idle_state) = 0; | ||
225 | |||
226 | wmb(); | ||
227 | do { | ||
228 | ssleep(1); | ||
229 | for_each_online_cpu(cpu) { | ||
230 | if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) | ||
231 | cpu_clear(cpu, map); | ||
232 | } | ||
233 | cpus_and(map, map, cpu_online_map); | ||
234 | /* | ||
235 | * We waited 1 sec, if a CPU still did not call idle | ||
236 | * it may be because it is in idle and not waking up | ||
237 | * because it has nothing to do. | ||
238 | * Give all the remaining CPUS a kick. | ||
239 | */ | ||
240 | smp_call_function_mask(map, do_nothing, 0, 0); | ||
241 | } while (!cpus_empty(map)); | ||
242 | |||
243 | set_cpus_allowed(current, tmp); | ||
244 | } | 219 | } |
245 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 220 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
246 | 221 | ||