aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/process.c
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-02-08 14:53:09 -0500
committerTony Luck <tony.luck@intel.com>2008-02-08 15:01:40 -0500
commit427639354ff346710012b53e1ceed5e3f3200e0c (patch)
tree679d7a4a997ae270e2d646f47582f235a1a51526 /arch/ia64/kernel/process.c
parentaa91a2e90044b88228bdb0620e771f2ea7798804 (diff)
[IA64] Simplify cpu_idle_wait
This is just Venki's patch[*] for x86 ported to ia64. * http://marc.info/?l=linux-kernel&m=120249201318159&w=2 Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/process.c')
-rw-r--r--arch/ia64/kernel/process.c44
1 files changed, 15 insertions, 29 deletions
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index be6c6f7be027..49937a383b23 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -52,7 +52,6 @@
52#include "sigframe.h" 52#include "sigframe.h"
53 53
54void (*ia64_mark_idle)(int); 54void (*ia64_mark_idle)(int);
55static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
56 55
57unsigned long boot_option_idle_override = 0; 56unsigned long boot_option_idle_override = 0;
58EXPORT_SYMBOL(boot_option_idle_override); 57EXPORT_SYMBOL(boot_option_idle_override);
@@ -254,33 +253,23 @@ static inline void play_dead(void)
254} 253}
255#endif /* CONFIG_HOTPLUG_CPU */ 254#endif /* CONFIG_HOTPLUG_CPU */
256 255
257void cpu_idle_wait(void) 256static void do_nothing(void *unused)
258{ 257{
259 unsigned int cpu, this_cpu = get_cpu(); 258}
260 cpumask_t map;
261 cpumask_t tmp = current->cpus_allowed;
262
263 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
264 put_cpu();
265
266 cpus_clear(map);
267 for_each_online_cpu(cpu) {
268 per_cpu(cpu_idle_state, cpu) = 1;
269 cpu_set(cpu, map);
270 }
271
272 __get_cpu_var(cpu_idle_state) = 0;
273 259
274 wmb(); 260/*
275 do { 261 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
276 ssleep(1); 262 * pm_idle and update to new pm_idle value. Required while changing pm_idle
277 for_each_online_cpu(cpu) { 263 * handler on SMP systems.
278 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) 264 *
279 cpu_clear(cpu, map); 265 * Caller must have changed pm_idle to the new value before the call. Old
280 } 266 * pm_idle value will not be used by any CPU after the return of this function.
281 cpus_and(map, map, cpu_online_map); 267 */
282 } while (!cpus_empty(map)); 268void cpu_idle_wait(void)
283 set_cpus_allowed(current, tmp); 269{
270 smp_mb();
271 /* kick all the CPUs so that they exit out of pm_idle */
272 smp_call_function(do_nothing, NULL, 0, 1);
284} 273}
285EXPORT_SYMBOL_GPL(cpu_idle_wait); 274EXPORT_SYMBOL_GPL(cpu_idle_wait);
286 275
@@ -308,9 +297,6 @@ cpu_idle (void)
308#ifdef CONFIG_SMP 297#ifdef CONFIG_SMP
309 min_xtp(); 298 min_xtp();
310#endif 299#endif
311 if (__get_cpu_var(cpu_idle_state))
312 __get_cpu_var(cpu_idle_state) = 0;
313
314 rmb(); 300 rmb();
315 if (mark_idle) 301 if (mark_idle)
316 (*mark_idle)(1); 302 (*mark_idle)(1);