aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel/process.c')
-rw-r--r--arch/ia64/kernel/process.c77
1 files changed, 44 insertions, 33 deletions
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 91293388dd29..ebb71f3d6d19 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co 4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
6 */ 7 */
7#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ 8#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
8#include <linux/config.h> 9#include <linux/config.h>
@@ -49,7 +50,7 @@
49#include "sigframe.h" 50#include "sigframe.h"
50 51
51void (*ia64_mark_idle)(int); 52void (*ia64_mark_idle)(int);
52static cpumask_t cpu_idle_map; 53static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
53 54
54unsigned long boot_option_idle_override = 0; 55unsigned long boot_option_idle_override = 0;
55EXPORT_SYMBOL(boot_option_idle_override); 56EXPORT_SYMBOL(boot_option_idle_override);
@@ -172,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
172 ia64_do_signal(oldset, scr, in_syscall); 173 ia64_do_signal(oldset, scr, in_syscall);
173} 174}
174 175
175static int pal_halt = 1; 176static int pal_halt = 1;
177static int can_do_pal_halt = 1;
178
176static int __init nohalt_setup(char * str) 179static int __init nohalt_setup(char * str)
177{ 180{
178 pal_halt = 0; 181 pal_halt = 0;
@@ -180,16 +183,20 @@ static int __init nohalt_setup(char * str)
180} 183}
181__setup("nohalt", nohalt_setup); 184__setup("nohalt", nohalt_setup);
182 185
186void
187update_pal_halt_status(int status)
188{
189 can_do_pal_halt = pal_halt && status;
190}
191
183/* 192/*
184 * We use this if we don't have any better idle routine.. 193 * We use this if we don't have any better idle routine..
185 */ 194 */
186void 195void
187default_idle (void) 196default_idle (void)
188{ 197{
189 unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
190
191 while (!need_resched()) 198 while (!need_resched())
192 if (pal_halt && !pmu_active) 199 if (can_do_pal_halt)
193 safe_halt(); 200 safe_halt();
194 else 201 else
195 cpu_relax(); 202 cpu_relax();
@@ -200,27 +207,20 @@ default_idle (void)
200static inline void play_dead(void) 207static inline void play_dead(void)
201{ 208{
202 extern void ia64_cpu_local_tick (void); 209 extern void ia64_cpu_local_tick (void);
210 unsigned int this_cpu = smp_processor_id();
211
203 /* Ack it */ 212 /* Ack it */
204 __get_cpu_var(cpu_state) = CPU_DEAD; 213 __get_cpu_var(cpu_state) = CPU_DEAD;
205 214
206 /* We shouldn't have to disable interrupts while dead, but
207 * some interrupts just don't seem to go away, and this makes
208 * it "work" for testing purposes. */
209 max_xtp(); 215 max_xtp();
210 local_irq_disable(); 216 local_irq_disable();
211 /* Death loop */ 217 idle_task_exit();
212 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 218 ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
213 cpu_relax();
214
215 /* 219 /*
216 * Enable timer interrupts from now on 220 * The above is a point of no-return, the processor is
217 * Not required if we put processor in SAL_BOOT_RENDEZ mode. 221 * expected to be in SAL loop now.
218 */ 222 */
219 local_flush_tlb_all(); 223 BUG();
220 cpu_set(smp_processor_id(), cpu_online_map);
221 wmb();
222 ia64_cpu_local_tick ();
223 local_irq_enable();
224} 224}
225#else 225#else
226static inline void play_dead(void) 226static inline void play_dead(void)
@@ -229,20 +229,31 @@ static inline void play_dead(void)
229} 229}
230#endif /* CONFIG_HOTPLUG_CPU */ 230#endif /* CONFIG_HOTPLUG_CPU */
231 231
232
233void cpu_idle_wait(void) 232void cpu_idle_wait(void)
234{ 233{
235 int cpu; 234 unsigned int cpu, this_cpu = get_cpu();
236 cpumask_t map; 235 cpumask_t map;
236
237 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
238 put_cpu();
237 239
238 for_each_online_cpu(cpu) 240 cpus_clear(map);
239 cpu_set(cpu, cpu_idle_map); 241 for_each_online_cpu(cpu) {
242 per_cpu(cpu_idle_state, cpu) = 1;
243 cpu_set(cpu, map);
244 }
240 245
241 wmb(); 246 __get_cpu_var(cpu_idle_state) = 0;
242 do { 247
243 ssleep(1); 248 wmb();
244 cpus_and(map, cpu_idle_map, cpu_online_map); 249 do {
245 } while (!cpus_empty(map)); 250 ssleep(1);
251 for_each_online_cpu(cpu) {
252 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
253 cpu_clear(cpu, map);
254 }
255 cpus_and(map, map, cpu_online_map);
256 } while (!cpus_empty(map));
246} 257}
247EXPORT_SYMBOL_GPL(cpu_idle_wait); 258EXPORT_SYMBOL_GPL(cpu_idle_wait);
248 259
@@ -250,7 +261,6 @@ void __attribute__((noreturn))
250cpu_idle (void) 261cpu_idle (void)
251{ 262{
252 void (*mark_idle)(int) = ia64_mark_idle; 263 void (*mark_idle)(int) = ia64_mark_idle;
253 int cpu = smp_processor_id();
254 264
255 /* endless idle loop with no priority at all */ 265 /* endless idle loop with no priority at all */
256 while (1) { 266 while (1) {
@@ -261,12 +271,13 @@ cpu_idle (void)
261 while (!need_resched()) { 271 while (!need_resched()) {
262 void (*idle)(void); 272 void (*idle)(void);
263 273
274 if (__get_cpu_var(cpu_idle_state))
275 __get_cpu_var(cpu_idle_state) = 0;
276
277 rmb();
264 if (mark_idle) 278 if (mark_idle)
265 (*mark_idle)(1); 279 (*mark_idle)(1);
266 280
267 if (cpu_isset(cpu, cpu_idle_map))
268 cpu_clear(cpu, cpu_idle_map);
269 rmb();
270 idle = pm_idle; 281 idle = pm_idle;
271 if (!idle) 282 if (!idle)
272 idle = default_idle; 283 idle = default_idle;