diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ppc64/kernel/smp.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ppc64/kernel/smp.c')
-rw-r--r-- | arch/ppc64/kernel/smp.c | 622 |
1 files changed, 622 insertions, 0 deletions
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c new file mode 100644 index 000000000000..1c92da3e4525 --- /dev/null +++ b/arch/ppc64/kernel/smp.c | |||
@@ -0,0 +1,622 @@ | |||
1 | /* | ||
2 | * SMP support for ppc. | ||
3 | * | ||
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | ||
5 | * deal of code from the sparc and intel versions. | ||
6 | * | ||
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
8 | * | ||
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | ||
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #undef DEBUG | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/err.h> | ||
31 | #include <linux/sysdev.h> | ||
32 | #include <linux/cpu.h> | ||
33 | #include <linux/notifier.h> | ||
34 | |||
35 | #include <asm/ptrace.h> | ||
36 | #include <asm/atomic.h> | ||
37 | #include <asm/irq.h> | ||
38 | #include <asm/page.h> | ||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/prom.h> | ||
41 | #include <asm/smp.h> | ||
42 | #include <asm/paca.h> | ||
43 | #include <asm/time.h> | ||
44 | #include <asm/machdep.h> | ||
45 | #include <asm/cputable.h> | ||
46 | #include <asm/system.h> | ||
47 | #include <asm/abs_addr.h> | ||
48 | |||
49 | #include "mpic.h" | ||
50 | |||
51 | #ifdef DEBUG | ||
52 | #define DBG(fmt...) udbg_printf(fmt) | ||
53 | #else | ||
54 | #define DBG(fmt...) | ||
55 | #endif | ||
56 | |||
57 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
58 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
59 | cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | ||
60 | |||
61 | EXPORT_SYMBOL(cpu_online_map); | ||
62 | EXPORT_SYMBOL(cpu_possible_map); | ||
63 | |||
64 | struct smp_ops_t *smp_ops; | ||
65 | |||
66 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | ||
67 | |||
68 | extern unsigned char stab_array[]; | ||
69 | |||
70 | void smp_call_function_interrupt(void); | ||
71 | |||
72 | int smt_enabled_at_boot = 1; | ||
73 | |||
74 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
75 | void smp_mpic_message_pass(int target, int msg) | ||
76 | { | ||
77 | /* make sure we're sending something that translates to an IPI */ | ||
78 | if ( msg > 0x3 ){ | ||
79 | printk("SMP %d: smp_message_pass: unknown msg %d\n", | ||
80 | smp_processor_id(), msg); | ||
81 | return; | ||
82 | } | ||
83 | switch ( target ) | ||
84 | { | ||
85 | case MSG_ALL: | ||
86 | mpic_send_ipi(msg, 0xffffffff); | ||
87 | break; | ||
88 | case MSG_ALL_BUT_SELF: | ||
89 | mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id())); | ||
90 | break; | ||
91 | default: | ||
92 | mpic_send_ipi(msg, 1 << target); | ||
93 | break; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | int __init smp_mpic_probe(void) | ||
98 | { | ||
99 | int nr_cpus; | ||
100 | |||
101 | DBG("smp_mpic_probe()...\n"); | ||
102 | |||
103 | nr_cpus = cpus_weight(cpu_possible_map); | ||
104 | |||
105 | DBG("nr_cpus: %d\n", nr_cpus); | ||
106 | |||
107 | if (nr_cpus > 1) | ||
108 | mpic_request_ipis(); | ||
109 | |||
110 | return nr_cpus; | ||
111 | } | ||
112 | |||
113 | void __devinit smp_mpic_setup_cpu(int cpu) | ||
114 | { | ||
115 | mpic_setup_this_cpu(); | ||
116 | } | ||
117 | |||
118 | void __devinit smp_generic_kick_cpu(int nr) | ||
119 | { | ||
120 | BUG_ON(nr < 0 || nr >= NR_CPUS); | ||
121 | |||
122 | /* | ||
123 | * The processor is currently spinning, waiting for the | ||
124 | * cpu_start field to become non-zero After we set cpu_start, | ||
125 | * the processor will continue on to secondary_start | ||
126 | */ | ||
127 | paca[nr].cpu_start = 1; | ||
128 | mb(); | ||
129 | } | ||
130 | |||
131 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
132 | |||
133 | static void __init smp_space_timers(unsigned int max_cpus) | ||
134 | { | ||
135 | int i; | ||
136 | unsigned long offset = tb_ticks_per_jiffy / max_cpus; | ||
137 | unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb; | ||
138 | |||
139 | for_each_cpu(i) { | ||
140 | if (i != boot_cpuid) { | ||
141 | paca[i].next_jiffy_update_tb = | ||
142 | previous_tb + offset; | ||
143 | previous_tb = paca[i].next_jiffy_update_tb; | ||
144 | } | ||
145 | } | ||
146 | } | ||
147 | |||
148 | void smp_message_recv(int msg, struct pt_regs *regs) | ||
149 | { | ||
150 | switch(msg) { | ||
151 | case PPC_MSG_CALL_FUNCTION: | ||
152 | smp_call_function_interrupt(); | ||
153 | break; | ||
154 | case PPC_MSG_RESCHEDULE: | ||
155 | /* XXX Do we have to do this? */ | ||
156 | set_need_resched(); | ||
157 | break; | ||
158 | #if 0 | ||
159 | case PPC_MSG_MIGRATE_TASK: | ||
160 | /* spare */ | ||
161 | break; | ||
162 | #endif | ||
163 | #ifdef CONFIG_DEBUGGER | ||
164 | case PPC_MSG_DEBUGGER_BREAK: | ||
165 | debugger_ipi(regs); | ||
166 | break; | ||
167 | #endif | ||
168 | default: | ||
169 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | ||
170 | smp_processor_id(), msg); | ||
171 | break; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | void smp_send_reschedule(int cpu) | ||
176 | { | ||
177 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | ||
178 | } | ||
179 | |||
180 | #ifdef CONFIG_DEBUGGER | ||
181 | void smp_send_debugger_break(int cpu) | ||
182 | { | ||
183 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | ||
184 | } | ||
185 | #endif | ||
186 | |||
187 | static void stop_this_cpu(void *dummy) | ||
188 | { | ||
189 | local_irq_disable(); | ||
190 | while (1) | ||
191 | ; | ||
192 | } | ||
193 | |||
194 | void smp_send_stop(void) | ||
195 | { | ||
196 | smp_call_function(stop_this_cpu, NULL, 1, 0); | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Structure and data for smp_call_function(). This is designed to minimise | ||
201 | * static memory requirements. It also looks cleaner. | ||
202 | * Stolen from the i386 version. | ||
203 | */ | ||
204 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | ||
205 | |||
206 | static struct call_data_struct { | ||
207 | void (*func) (void *info); | ||
208 | void *info; | ||
209 | atomic_t started; | ||
210 | atomic_t finished; | ||
211 | int wait; | ||
212 | } *call_data; | ||
213 | |||
214 | /* delay of at least 8 seconds on 1GHz cpu */ | ||
215 | #define SMP_CALL_TIMEOUT (1UL << (30 + 3)) | ||
216 | |||
217 | /* | ||
218 | * This function sends a 'generic call function' IPI to all other CPUs | ||
219 | * in the system. | ||
220 | * | ||
221 | * [SUMMARY] Run a function on all other CPUs. | ||
222 | * <func> The function to run. This must be fast and non-blocking. | ||
223 | * <info> An arbitrary pointer to pass to the function. | ||
224 | * <nonatomic> currently unused. | ||
225 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
226 | * [RETURNS] 0 on success, else a negative status code. Does not return until | ||
227 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
228 | * | ||
229 | * You must not call this function with disabled interrupts or from a | ||
230 | * hardware interrupt handler or from a bottom half handler. | ||
231 | */ | ||
232 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | ||
233 | int wait) | ||
234 | { | ||
235 | struct call_data_struct data; | ||
236 | int ret = -1, cpus; | ||
237 | unsigned long timeout; | ||
238 | |||
239 | /* Can deadlock when called with interrupts disabled */ | ||
240 | WARN_ON(irqs_disabled()); | ||
241 | |||
242 | data.func = func; | ||
243 | data.info = info; | ||
244 | atomic_set(&data.started, 0); | ||
245 | data.wait = wait; | ||
246 | if (wait) | ||
247 | atomic_set(&data.finished, 0); | ||
248 | |||
249 | spin_lock(&call_lock); | ||
250 | /* Must grab online cpu count with preempt disabled, otherwise | ||
251 | * it can change. */ | ||
252 | cpus = num_online_cpus() - 1; | ||
253 | if (!cpus) { | ||
254 | ret = 0; | ||
255 | goto out; | ||
256 | } | ||
257 | |||
258 | call_data = &data; | ||
259 | wmb(); | ||
260 | /* Send a message to all other CPUs and wait for them to respond */ | ||
261 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); | ||
262 | |||
263 | /* Wait for response */ | ||
264 | timeout = SMP_CALL_TIMEOUT; | ||
265 | while (atomic_read(&data.started) != cpus) { | ||
266 | HMT_low(); | ||
267 | if (--timeout == 0) { | ||
268 | printk("smp_call_function on cpu %d: other cpus not " | ||
269 | "responding (%d)\n", smp_processor_id(), | ||
270 | atomic_read(&data.started)); | ||
271 | debugger(NULL); | ||
272 | goto out; | ||
273 | } | ||
274 | } | ||
275 | |||
276 | if (wait) { | ||
277 | timeout = SMP_CALL_TIMEOUT; | ||
278 | while (atomic_read(&data.finished) != cpus) { | ||
279 | HMT_low(); | ||
280 | if (--timeout == 0) { | ||
281 | printk("smp_call_function on cpu %d: other " | ||
282 | "cpus not finishing (%d/%d)\n", | ||
283 | smp_processor_id(), | ||
284 | atomic_read(&data.finished), | ||
285 | atomic_read(&data.started)); | ||
286 | debugger(NULL); | ||
287 | goto out; | ||
288 | } | ||
289 | } | ||
290 | } | ||
291 | |||
292 | ret = 0; | ||
293 | |||
294 | out: | ||
295 | call_data = NULL; | ||
296 | HMT_medium(); | ||
297 | spin_unlock(&call_lock); | ||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | EXPORT_SYMBOL(smp_call_function); | ||
302 | |||
303 | void smp_call_function_interrupt(void) | ||
304 | { | ||
305 | void (*func) (void *info); | ||
306 | void *info; | ||
307 | int wait; | ||
308 | |||
309 | /* call_data will be NULL if the sender timed out while | ||
310 | * waiting on us to receive the call. | ||
311 | */ | ||
312 | if (!call_data) | ||
313 | return; | ||
314 | |||
315 | func = call_data->func; | ||
316 | info = call_data->info; | ||
317 | wait = call_data->wait; | ||
318 | |||
319 | if (!wait) | ||
320 | smp_mb__before_atomic_inc(); | ||
321 | |||
322 | /* | ||
323 | * Notify initiating CPU that I've grabbed the data and am | ||
324 | * about to execute the function | ||
325 | */ | ||
326 | atomic_inc(&call_data->started); | ||
327 | /* | ||
328 | * At this point the info structure may be out of scope unless wait==1 | ||
329 | */ | ||
330 | (*func)(info); | ||
331 | if (wait) { | ||
332 | smp_mb__before_atomic_inc(); | ||
333 | atomic_inc(&call_data->finished); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | extern unsigned long decr_overclock; | ||
338 | extern struct gettimeofday_struct do_gtod; | ||
339 | |||
340 | struct thread_info *current_set[NR_CPUS]; | ||
341 | |||
342 | DECLARE_PER_CPU(unsigned int, pvr); | ||
343 | |||
344 | static void __devinit smp_store_cpu_info(int id) | ||
345 | { | ||
346 | per_cpu(pvr, id) = mfspr(SPRN_PVR); | ||
347 | } | ||
348 | |||
349 | static void __init smp_create_idle(unsigned int cpu) | ||
350 | { | ||
351 | struct task_struct *p; | ||
352 | |||
353 | /* create a process for the processor */ | ||
354 | p = fork_idle(cpu); | ||
355 | if (IS_ERR(p)) | ||
356 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
357 | paca[cpu].__current = p; | ||
358 | current_set[cpu] = p->thread_info; | ||
359 | } | ||
360 | |||
361 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
362 | { | ||
363 | unsigned int cpu; | ||
364 | |||
365 | DBG("smp_prepare_cpus\n"); | ||
366 | |||
367 | /* | ||
368 | * setup_cpu may need to be called on the boot cpu. We havent | ||
369 | * spun any cpus up but lets be paranoid. | ||
370 | */ | ||
371 | BUG_ON(boot_cpuid != smp_processor_id()); | ||
372 | |||
373 | /* Fixup boot cpu */ | ||
374 | smp_store_cpu_info(boot_cpuid); | ||
375 | cpu_callin_map[boot_cpuid] = 1; | ||
376 | |||
377 | #ifndef CONFIG_PPC_ISERIES | ||
378 | paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb(); | ||
379 | |||
380 | /* | ||
381 | * Should update do_gtod.stamp_xsec. | ||
382 | * For now we leave it which means the time can be some | ||
383 | * number of msecs off until someone does a settimeofday() | ||
384 | */ | ||
385 | do_gtod.varp->tb_orig_stamp = tb_last_stamp; | ||
386 | systemcfg->tb_orig_stamp = tb_last_stamp; | ||
387 | #endif | ||
388 | |||
389 | max_cpus = smp_ops->probe(); | ||
390 | |||
391 | smp_space_timers(max_cpus); | ||
392 | |||
393 | for_each_cpu(cpu) | ||
394 | if (cpu != boot_cpuid) | ||
395 | smp_create_idle(cpu); | ||
396 | } | ||
397 | |||
398 | void __devinit smp_prepare_boot_cpu(void) | ||
399 | { | ||
400 | BUG_ON(smp_processor_id() != boot_cpuid); | ||
401 | |||
402 | cpu_set(boot_cpuid, cpu_online_map); | ||
403 | |||
404 | paca[boot_cpuid].__current = current; | ||
405 | current_set[boot_cpuid] = current->thread_info; | ||
406 | } | ||
407 | |||
408 | #ifdef CONFIG_HOTPLUG_CPU | ||
409 | /* State of each CPU during hotplug phases */ | ||
410 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
411 | |||
412 | int generic_cpu_disable(void) | ||
413 | { | ||
414 | unsigned int cpu = smp_processor_id(); | ||
415 | |||
416 | if (cpu == boot_cpuid) | ||
417 | return -EBUSY; | ||
418 | |||
419 | systemcfg->processorCount--; | ||
420 | cpu_clear(cpu, cpu_online_map); | ||
421 | fixup_irqs(cpu_online_map); | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | int generic_cpu_enable(unsigned int cpu) | ||
426 | { | ||
427 | /* Do the normal bootup if we haven't | ||
428 | * already bootstrapped. */ | ||
429 | if (system_state != SYSTEM_RUNNING) | ||
430 | return -ENOSYS; | ||
431 | |||
432 | /* get the target out of it's holding state */ | ||
433 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
434 | wmb(); | ||
435 | |||
436 | while (!cpu_online(cpu)) | ||
437 | cpu_relax(); | ||
438 | |||
439 | fixup_irqs(cpu_online_map); | ||
440 | /* counter the irq disable in fixup_irqs */ | ||
441 | local_irq_enable(); | ||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | void generic_cpu_die(unsigned int cpu) | ||
446 | { | ||
447 | int i; | ||
448 | |||
449 | for (i = 0; i < 100; i++) { | ||
450 | rmb(); | ||
451 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) | ||
452 | return; | ||
453 | msleep(100); | ||
454 | } | ||
455 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | ||
456 | } | ||
457 | |||
458 | void generic_mach_cpu_die(void) | ||
459 | { | ||
460 | unsigned int cpu; | ||
461 | |||
462 | local_irq_disable(); | ||
463 | cpu = smp_processor_id(); | ||
464 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | ||
465 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
466 | wmb(); | ||
467 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | ||
468 | cpu_relax(); | ||
469 | |||
470 | flush_tlb_pending(); | ||
471 | cpu_set(cpu, cpu_online_map); | ||
472 | local_irq_enable(); | ||
473 | } | ||
474 | #endif | ||
475 | |||
476 | static int __devinit cpu_enable(unsigned int cpu) | ||
477 | { | ||
478 | if (smp_ops->cpu_enable) | ||
479 | return smp_ops->cpu_enable(cpu); | ||
480 | |||
481 | return -ENOSYS; | ||
482 | } | ||
483 | |||
484 | int __devinit __cpu_up(unsigned int cpu) | ||
485 | { | ||
486 | int c; | ||
487 | |||
488 | if (!cpu_enable(cpu)) | ||
489 | return 0; | ||
490 | |||
491 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) | ||
492 | return -EINVAL; | ||
493 | |||
494 | paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock; | ||
495 | |||
496 | if (!cpu_has_feature(CPU_FTR_SLB)) { | ||
497 | void *tmp; | ||
498 | |||
499 | /* maximum of 48 CPUs on machines with a segment table */ | ||
500 | if (cpu >= 48) | ||
501 | BUG(); | ||
502 | |||
503 | tmp = &stab_array[PAGE_SIZE * cpu]; | ||
504 | memset(tmp, 0, PAGE_SIZE); | ||
505 | paca[cpu].stab_addr = (unsigned long)tmp; | ||
506 | paca[cpu].stab_real = virt_to_abs(tmp); | ||
507 | } | ||
508 | |||
509 | /* Make sure callin-map entry is 0 (can be leftover a CPU | ||
510 | * hotplug | ||
511 | */ | ||
512 | cpu_callin_map[cpu] = 0; | ||
513 | |||
514 | /* The information for processor bringup must | ||
515 | * be written out to main store before we release | ||
516 | * the processor. | ||
517 | */ | ||
518 | mb(); | ||
519 | |||
520 | /* wake up cpus */ | ||
521 | DBG("smp: kicking cpu %d\n", cpu); | ||
522 | smp_ops->kick_cpu(cpu); | ||
523 | |||
524 | /* | ||
525 | * wait to see if the cpu made a callin (is actually up). | ||
526 | * use this value that I found through experimentation. | ||
527 | * -- Cort | ||
528 | */ | ||
529 | if (system_state < SYSTEM_RUNNING) | ||
530 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) | ||
531 | udelay(100); | ||
532 | #ifdef CONFIG_HOTPLUG_CPU | ||
533 | else | ||
534 | /* | ||
535 | * CPUs can take much longer to come up in the | ||
536 | * hotplug case. Wait five seconds. | ||
537 | */ | ||
538 | for (c = 25; c && !cpu_callin_map[cpu]; c--) { | ||
539 | msleep(200); | ||
540 | } | ||
541 | #endif | ||
542 | |||
543 | if (!cpu_callin_map[cpu]) { | ||
544 | printk("Processor %u is stuck.\n", cpu); | ||
545 | return -ENOENT; | ||
546 | } | ||
547 | |||
548 | printk("Processor %u found.\n", cpu); | ||
549 | |||
550 | if (smp_ops->give_timebase) | ||
551 | smp_ops->give_timebase(); | ||
552 | |||
553 | /* Wait until cpu puts itself in the online map */ | ||
554 | while (!cpu_online(cpu)) | ||
555 | cpu_relax(); | ||
556 | |||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | |||
561 | /* Activate a secondary processor. */ | ||
562 | int __devinit start_secondary(void *unused) | ||
563 | { | ||
564 | unsigned int cpu = smp_processor_id(); | ||
565 | |||
566 | atomic_inc(&init_mm.mm_count); | ||
567 | current->active_mm = &init_mm; | ||
568 | |||
569 | smp_store_cpu_info(cpu); | ||
570 | set_dec(paca[cpu].default_decr); | ||
571 | cpu_callin_map[cpu] = 1; | ||
572 | |||
573 | smp_ops->setup_cpu(cpu); | ||
574 | if (smp_ops->take_timebase) | ||
575 | smp_ops->take_timebase(); | ||
576 | |||
577 | spin_lock(&call_lock); | ||
578 | cpu_set(cpu, cpu_online_map); | ||
579 | spin_unlock(&call_lock); | ||
580 | |||
581 | local_irq_enable(); | ||
582 | |||
583 | cpu_idle(); | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | int setup_profiling_timer(unsigned int multiplier) | ||
588 | { | ||
589 | return 0; | ||
590 | } | ||
591 | |||
592 | void __init smp_cpus_done(unsigned int max_cpus) | ||
593 | { | ||
594 | cpumask_t old_mask; | ||
595 | |||
596 | /* We want the setup_cpu() here to be called from CPU 0, but our | ||
597 | * init thread may have been "borrowed" by another CPU in the meantime | ||
598 | * se we pin us down to CPU 0 for a short while | ||
599 | */ | ||
600 | old_mask = current->cpus_allowed; | ||
601 | set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | ||
602 | |||
603 | smp_ops->setup_cpu(boot_cpuid); | ||
604 | |||
605 | set_cpus_allowed(current, old_mask); | ||
606 | } | ||
607 | |||
608 | #ifdef CONFIG_HOTPLUG_CPU | ||
609 | int __cpu_disable(void) | ||
610 | { | ||
611 | if (smp_ops->cpu_disable) | ||
612 | return smp_ops->cpu_disable(); | ||
613 | |||
614 | return -ENOSYS; | ||
615 | } | ||
616 | |||
617 | void __cpu_die(unsigned int cpu) | ||
618 | { | ||
619 | if (smp_ops->cpu_die) | ||
620 | smp_ops->cpu_die(cpu); | ||
621 | } | ||
622 | #endif | ||