diff options
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/process.c | 20 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 13 |
2 files changed, 15 insertions, 18 deletions
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 2f89206e008f..02f9dec1d459 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -85,23 +85,31 @@ void cpu_idle(void) | |||
85 | 85 | ||
86 | /* | 86 | /* |
87 | * the idle loop on a UltraMultiPenguin... | 87 | * the idle loop on a UltraMultiPenguin... |
88 | * | ||
89 | * TIF_POLLING_NRFLAG is set because we do not sleep the cpu | ||
90 | * inside of the idler task, so an interrupt is not needed | ||
91 | * to get a clean fast response. | ||
92 | * | ||
93 | * XXX Reverify this assumption... -DaveM | ||
94 | * | ||
95 | * Addendum: We do want it to do something for the signal | ||
96 | * delivery case, we detect that by just seeing | ||
97 | * if we are trying to send this to an idler or not. | ||
88 | */ | 98 | */ |
89 | #define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1) | ||
90 | #define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0) | ||
91 | void cpu_idle(void) | 99 | void cpu_idle(void) |
92 | { | 100 | { |
101 | cpuinfo_sparc *cpuinfo = &local_cpu_data(); | ||
93 | set_thread_flag(TIF_POLLING_NRFLAG); | 102 | set_thread_flag(TIF_POLLING_NRFLAG); |
103 | |||
94 | while(1) { | 104 | while(1) { |
95 | if (need_resched()) { | 105 | if (need_resched()) { |
96 | unidle_me(); | 106 | cpuinfo->idle_volume = 0; |
97 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
98 | preempt_enable_no_resched(); | 107 | preempt_enable_no_resched(); |
99 | schedule(); | 108 | schedule(); |
100 | preempt_disable(); | 109 | preempt_disable(); |
101 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
102 | check_pgt_cache(); | 110 | check_pgt_cache(); |
103 | } | 111 | } |
104 | idle_me_harder(); | 112 | cpuinfo->idle_volume++; |
105 | 113 | ||
106 | /* The store ordering is so that IRQ handlers on | 114 | /* The store ordering is so that IRQ handlers on |
107 | * other cpus see our increasing idleness for the buddy | 115 | * other cpus see our increasing idleness for the buddy |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 8aca4b1dc04e..797a65493fb8 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -1152,20 +1152,9 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
1152 | (bogosum/(5000/HZ))%100); | 1152 | (bogosum/(5000/HZ))%100); |
1153 | } | 1153 | } |
1154 | 1154 | ||
1155 | /* This needn't do anything as we do not sleep the cpu | ||
1156 | * inside of the idler task, so an interrupt is not needed | ||
1157 | * to get a clean fast response. | ||
1158 | * | ||
1159 | * XXX Reverify this assumption... -DaveM | ||
1160 | * | ||
1161 | * Addendum: We do want it to do something for the signal | ||
1162 | * delivery case, we detect that by just seeing | ||
1163 | * if we are trying to send this to an idler or not. | ||
1164 | */ | ||
1165 | void smp_send_reschedule(int cpu) | 1155 | void smp_send_reschedule(int cpu) |
1166 | { | 1156 | { |
1167 | if (cpu_data(cpu).idle_volume == 0) | 1157 | smp_receive_signal(cpu); |
1168 | smp_receive_signal(cpu); | ||
1169 | } | 1158 | } |
1170 | 1159 | ||
1171 | /* This is a nop because we capture all other cpus | 1160 | /* This is a nop because we capture all other cpus |