diff options
| author | Jeff Garzik <jgarzik@pobox.com> | 2005-11-10 04:12:10 -0500 |
|---|---|---|
| committer | Jeff Garzik <jgarzik@pobox.com> | 2005-11-10 04:12:10 -0500 |
| commit | 2f67bdb23d74a6c6fd4f98f64239c5c34d1833cc (patch) | |
| tree | fe533abe3e7c400848647b95e4806f5125c654c3 /arch/sparc64/kernel/process.c | |
| parent | d40d9d29c020f8466c96f8e3ad4b7c014ff1085d (diff) | |
| parent | 3b44f137b9a846c5452d9e6e1271b79b1dbcc942 (diff) | |
Merge branch 'master'
Diffstat (limited to 'arch/sparc64/kernel/process.c')
| -rw-r--r-- | arch/sparc64/kernel/process.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 7d10b0397091..02f9dec1d459 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
| @@ -74,7 +74,9 @@ void cpu_idle(void) | |||
| 74 | while (!need_resched()) | 74 | while (!need_resched()) |
| 75 | barrier(); | 75 | barrier(); |
| 76 | 76 | ||
| 77 | preempt_enable_no_resched(); | ||
| 77 | schedule(); | 78 | schedule(); |
| 79 | preempt_disable(); | ||
| 78 | check_pgt_cache(); | 80 | check_pgt_cache(); |
| 79 | } | 81 | } |
| 80 | } | 82 | } |
| @@ -83,21 +85,31 @@ void cpu_idle(void) | |||
| 83 | 85 | ||
| 84 | /* | 86 | /* |
| 85 | * the idle loop on a UltraMultiPenguin... | 87 | * the idle loop on a UltraMultiPenguin... |
| 88 | * | ||
| 89 | * TIF_POLLING_NRFLAG is set because we do not sleep the cpu | ||
| 90 | * inside of the idler task, so an interrupt is not needed | ||
| 91 | * to get a clean fast response. | ||
| 92 | * | ||
| 93 | * XXX Reverify this assumption... -DaveM | ||
| 94 | * | ||
| 95 | * Addendum: We do want it to do something for the signal | ||
| 96 | * delivery case, we detect that by just seeing | ||
| 97 | * if we are trying to send this to an idler or not. | ||
| 86 | */ | 98 | */ |
| 87 | #define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1) | ||
| 88 | #define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0) | ||
| 89 | void cpu_idle(void) | 99 | void cpu_idle(void) |
| 90 | { | 100 | { |
| 101 | cpuinfo_sparc *cpuinfo = &local_cpu_data(); | ||
| 91 | set_thread_flag(TIF_POLLING_NRFLAG); | 102 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 103 | |||
| 92 | while(1) { | 104 | while(1) { |
| 93 | if (need_resched()) { | 105 | if (need_resched()) { |
| 94 | unidle_me(); | 106 | cpuinfo->idle_volume = 0; |
| 95 | clear_thread_flag(TIF_POLLING_NRFLAG); | 107 | preempt_enable_no_resched(); |
| 96 | schedule(); | 108 | schedule(); |
| 97 | set_thread_flag(TIF_POLLING_NRFLAG); | 109 | preempt_disable(); |
| 98 | check_pgt_cache(); | 110 | check_pgt_cache(); |
| 99 | } | 111 | } |
| 100 | idle_me_harder(); | 112 | cpuinfo->idle_volume++; |
| 101 | 113 | ||
| 102 | /* The store ordering is so that IRQ handlers on | 114 | /* The store ordering is so that IRQ handlers on |
| 103 | * other cpus see our increasing idleness for the buddy | 115 | * other cpus see our increasing idleness for the buddy |
