aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorZwane Mwaikambo <zwane@arm.linux.org.uk>2005-03-30 23:40:00 -0500
committerTony Luck <tony.luck@intel.com>2005-05-03 16:40:18 -0400
commit7d5f9c0f10255000ca007fb03773c6b825c2b9ce (patch)
tree5f53e41ef4cbe5ced2c5884ee5dde8df1fcaa509 /arch/ia64
parentde7548d0e202263bb6bfd7574a7889e85a691937 (diff)
[IA64] reduce cacheline bouncing in cpu_idle_wait
Andi noted that during normal runtime cpu_idle_map is bounced around a lot, and occassionally at a higher frequency than the timer interrupt wakeup which we normally exit pm_idle from. So switch to a percpu variable. I didn't move things to the slow path because it would involve adding scheduler code to wakeup the idle thread on the cpus we're waiting for. Signed-off-by: Zwane Mwaikambo <zwane@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/process.c41
1 files changed, 26 insertions, 15 deletions
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 7c43aea5f7f7..c0140f4235e4 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -50,7 +50,7 @@
50#include "sigframe.h" 50#include "sigframe.h"
51 51
52void (*ia64_mark_idle)(int); 52void (*ia64_mark_idle)(int);
53static cpumask_t cpu_idle_map; 53static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
54 54
55unsigned long boot_option_idle_override = 0; 55unsigned long boot_option_idle_override = 0;
56EXPORT_SYMBOL(boot_option_idle_override); 56EXPORT_SYMBOL(boot_option_idle_override);
@@ -223,20 +223,31 @@ static inline void play_dead(void)
223} 223}
224#endif /* CONFIG_HOTPLUG_CPU */ 224#endif /* CONFIG_HOTPLUG_CPU */
225 225
226
227void cpu_idle_wait(void) 226void cpu_idle_wait(void)
228{ 227{
229 int cpu; 228 unsigned int cpu, this_cpu = get_cpu();
230 cpumask_t map; 229 cpumask_t map;
230
231 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
232 put_cpu();
231 233
232 for_each_online_cpu(cpu) 234 cpus_clear(map);
233 cpu_set(cpu, cpu_idle_map); 235 for_each_online_cpu(cpu) {
236 per_cpu(cpu_idle_state, cpu) = 1;
237 cpu_set(cpu, map);
238 }
234 239
235 wmb(); 240 __get_cpu_var(cpu_idle_state) = 0;
236 do { 241
237 ssleep(1); 242 wmb();
238 cpus_and(map, cpu_idle_map, cpu_online_map); 243 do {
239 } while (!cpus_empty(map)); 244 ssleep(1);
245 for_each_online_cpu(cpu) {
246 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
247 cpu_clear(cpu, map);
248 }
249 cpus_and(map, map, cpu_online_map);
250 } while (!cpus_empty(map));
240} 251}
241EXPORT_SYMBOL_GPL(cpu_idle_wait); 252EXPORT_SYMBOL_GPL(cpu_idle_wait);
242 253
@@ -244,7 +255,6 @@ void __attribute__((noreturn))
244cpu_idle (void) 255cpu_idle (void)
245{ 256{
246 void (*mark_idle)(int) = ia64_mark_idle; 257 void (*mark_idle)(int) = ia64_mark_idle;
247 int cpu = smp_processor_id();
248 258
249 /* endless idle loop with no priority at all */ 259 /* endless idle loop with no priority at all */
250 while (1) { 260 while (1) {
@@ -255,12 +265,13 @@ cpu_idle (void)
255 while (!need_resched()) { 265 while (!need_resched()) {
256 void (*idle)(void); 266 void (*idle)(void);
257 267
268 if (__get_cpu_var(cpu_idle_state))
269 __get_cpu_var(cpu_idle_state) = 0;
270
271 rmb();
258 if (mark_idle) 272 if (mark_idle)
259 (*mark_idle)(1); 273 (*mark_idle)(1);
260 274
261 if (cpu_isset(cpu, cpu_idle_map))
262 cpu_clear(cpu, cpu_idle_map);
263 rmb();
264 idle = pm_idle; 275 idle = pm_idle;
265 if (!idle) 276 if (!idle)
266 idle = default_idle; 277 idle = default_idle;