diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/parisc/kernel/smp.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/parisc/kernel/smp.c')
-rw-r--r-- | arch/parisc/kernel/smp.c | 723 |
1 files changed, 723 insertions, 0 deletions
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c new file mode 100644 index 000000000000..bcc7e83f5142 --- /dev/null +++ b/arch/parisc/kernel/smp.c | |||
@@ -0,0 +1,723 @@ | |||
1 | /* | ||
2 | ** SMP Support | ||
3 | ** | ||
4 | ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | ||
5 | ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org> | ||
7 | ** | ||
8 | ** Lots of stuff stolen from arch/alpha/kernel/smp.c | ||
9 | ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^) | ||
10 | ** | ||
11 | ** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work. | ||
12 | ** -grant (1/12/2001) | ||
13 | ** | ||
14 | ** This program is free software; you can redistribute it and/or modify | ||
15 | ** it under the terms of the GNU General Public License as published by | ||
16 | ** the Free Software Foundation; either version 2 of the License, or | ||
17 | ** (at your option) any later version. | ||
18 | */ | ||
19 | #undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */ | ||
20 | |||
21 | #include <linux/autoconf.h> | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/slab.h> | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/smp.h> | ||
33 | #include <linux/kernel_stat.h> | ||
34 | #include <linux/mm.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/bitops.h> | ||
37 | |||
38 | #include <asm/system.h> | ||
39 | #include <asm/atomic.h> | ||
40 | #include <asm/current.h> | ||
41 | #include <asm/delay.h> | ||
42 | #include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */ | ||
43 | |||
44 | #include <asm/io.h> | ||
45 | #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ | ||
46 | #include <asm/mmu_context.h> | ||
47 | #include <asm/page.h> | ||
48 | #include <asm/pgtable.h> | ||
49 | #include <asm/pgalloc.h> | ||
50 | #include <asm/processor.h> | ||
51 | #include <asm/ptrace.h> | ||
52 | #include <asm/unistd.h> | ||
53 | #include <asm/cacheflush.h> | ||
54 | |||
55 | #define kDEBUG 0 | ||
56 | |||
57 | DEFINE_SPINLOCK(smp_lock); | ||
58 | |||
59 | volatile struct task_struct *smp_init_current_idle_task; | ||
60 | |||
61 | static volatile int cpu_now_booting = 0; /* track which CPU is booting */ | ||
62 | |||
63 | static int parisc_max_cpus = 1; | ||
64 | |||
65 | /* online cpus are ones that we've managed to bring up completely | ||
66 | * possible cpus are all valid cpu | ||
67 | * present cpus are all detected cpu | ||
68 | * | ||
69 | * On startup we bring up the "possible" cpus. Since we discover | ||
70 | * CPUs later, we add them as hotplug, so the possible cpu mask is | ||
71 | * empty in the beginning. | ||
72 | */ | ||
73 | |||
74 | cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */ | ||
75 | cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */ | ||
76 | |||
77 | EXPORT_SYMBOL(cpu_online_map); | ||
78 | EXPORT_SYMBOL(cpu_possible_map); | ||
79 | |||
80 | |||
81 | struct smp_call_struct { | ||
82 | void (*func) (void *info); | ||
83 | void *info; | ||
84 | long wait; | ||
85 | atomic_t unstarted_count; | ||
86 | atomic_t unfinished_count; | ||
87 | }; | ||
88 | static volatile struct smp_call_struct *smp_call_function_data; | ||
89 | |||
90 | enum ipi_message_type { | ||
91 | IPI_NOP=0, | ||
92 | IPI_RESCHEDULE=1, | ||
93 | IPI_CALL_FUNC, | ||
94 | IPI_CPU_START, | ||
95 | IPI_CPU_STOP, | ||
96 | IPI_CPU_TEST | ||
97 | }; | ||
98 | |||
99 | |||
100 | /********** SMP inter processor interrupt and communication routines */ | ||
101 | |||
102 | #undef PER_CPU_IRQ_REGION | ||
103 | #ifdef PER_CPU_IRQ_REGION | ||
104 | /* XXX REVISIT Ignore for now. | ||
105 | ** *May* need this "hook" to register IPI handler | ||
106 | ** once we have perCPU ExtIntr switch tables. | ||
107 | */ | ||
108 | static void | ||
109 | ipi_init(int cpuid) | ||
110 | { | ||
111 | |||
112 | /* If CPU is present ... */ | ||
113 | #ifdef ENTRY_SYS_CPUS | ||
114 | /* *and* running (not stopped) ... */ | ||
115 | #error iCOD support wants state checked here. | ||
116 | #endif | ||
117 | |||
118 | #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region | ||
119 | |||
120 | if(cpu_online(cpuid) ) | ||
121 | { | ||
122 | switch_to_idle_task(current); | ||
123 | } | ||
124 | |||
125 | return; | ||
126 | } | ||
127 | #endif | ||
128 | |||
129 | |||
130 | /* | ||
131 | ** Yoink this CPU from the runnable list... | ||
132 | ** | ||
133 | */ | ||
134 | static void | ||
135 | halt_processor(void) | ||
136 | { | ||
137 | #ifdef ENTRY_SYS_CPUS | ||
138 | #error halt_processor() needs rework | ||
139 | /* | ||
140 | ** o migrate I/O interrupts off this CPU. | ||
141 | ** o leave IPI enabled - __cli() will disable IPI. | ||
142 | ** o leave CPU in online map - just change the state | ||
143 | */ | ||
144 | cpu_data[this_cpu].state = STATE_STOPPED; | ||
145 | mark_bh(IPI_BH); | ||
146 | #else | ||
147 | /* REVISIT : redirect I/O Interrupts to another CPU? */ | ||
148 | /* REVISIT : does PM *know* this CPU isn't available? */ | ||
149 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
150 | local_irq_disable(); | ||
151 | for (;;) | ||
152 | ; | ||
153 | #endif | ||
154 | } | ||
155 | |||
156 | |||
157 | irqreturn_t | ||
158 | ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
159 | { | ||
160 | int this_cpu = smp_processor_id(); | ||
161 | struct cpuinfo_parisc *p = &cpu_data[this_cpu]; | ||
162 | unsigned long ops; | ||
163 | unsigned long flags; | ||
164 | |||
165 | /* Count this now; we may make a call that never returns. */ | ||
166 | p->ipi_count++; | ||
167 | |||
168 | mb(); /* Order interrupt and bit testing. */ | ||
169 | |||
170 | for (;;) { | ||
171 | spin_lock_irqsave(&(p->lock),flags); | ||
172 | ops = p->pending_ipi; | ||
173 | p->pending_ipi = 0; | ||
174 | spin_unlock_irqrestore(&(p->lock),flags); | ||
175 | |||
176 | mb(); /* Order bit clearing and data access. */ | ||
177 | |||
178 | if (!ops) | ||
179 | break; | ||
180 | |||
181 | while (ops) { | ||
182 | unsigned long which = ffz(~ops); | ||
183 | |||
184 | switch (which) { | ||
185 | case IPI_RESCHEDULE: | ||
186 | #if (kDEBUG>=100) | ||
187 | printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); | ||
188 | #endif /* kDEBUG */ | ||
189 | ops &= ~(1 << IPI_RESCHEDULE); | ||
190 | /* | ||
191 | * Reschedule callback. Everything to be | ||
192 | * done is done by the interrupt return path. | ||
193 | */ | ||
194 | break; | ||
195 | |||
196 | case IPI_CALL_FUNC: | ||
197 | #if (kDEBUG>=100) | ||
198 | printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); | ||
199 | #endif /* kDEBUG */ | ||
200 | ops &= ~(1 << IPI_CALL_FUNC); | ||
201 | { | ||
202 | volatile struct smp_call_struct *data; | ||
203 | void (*func)(void *info); | ||
204 | void *info; | ||
205 | int wait; | ||
206 | |||
207 | data = smp_call_function_data; | ||
208 | func = data->func; | ||
209 | info = data->info; | ||
210 | wait = data->wait; | ||
211 | |||
212 | mb(); | ||
213 | atomic_dec ((atomic_t *)&data->unstarted_count); | ||
214 | |||
215 | /* At this point, *data can't | ||
216 | * be relied upon. | ||
217 | */ | ||
218 | |||
219 | (*func)(info); | ||
220 | |||
221 | /* Notify the sending CPU that the | ||
222 | * task is done. | ||
223 | */ | ||
224 | mb(); | ||
225 | if (wait) | ||
226 | atomic_dec ((atomic_t *)&data->unfinished_count); | ||
227 | } | ||
228 | break; | ||
229 | |||
230 | case IPI_CPU_START: | ||
231 | #if (kDEBUG>=100) | ||
232 | printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); | ||
233 | #endif /* kDEBUG */ | ||
234 | ops &= ~(1 << IPI_CPU_START); | ||
235 | #ifdef ENTRY_SYS_CPUS | ||
236 | p->state = STATE_RUNNING; | ||
237 | #endif | ||
238 | break; | ||
239 | |||
240 | case IPI_CPU_STOP: | ||
241 | #if (kDEBUG>=100) | ||
242 | printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); | ||
243 | #endif /* kDEBUG */ | ||
244 | ops &= ~(1 << IPI_CPU_STOP); | ||
245 | #ifdef ENTRY_SYS_CPUS | ||
246 | #else | ||
247 | halt_processor(); | ||
248 | #endif | ||
249 | break; | ||
250 | |||
251 | case IPI_CPU_TEST: | ||
252 | #if (kDEBUG>=100) | ||
253 | printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); | ||
254 | #endif /* kDEBUG */ | ||
255 | ops &= ~(1 << IPI_CPU_TEST); | ||
256 | break; | ||
257 | |||
258 | default: | ||
259 | printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", | ||
260 | this_cpu, which); | ||
261 | ops &= ~(1 << which); | ||
262 | return IRQ_NONE; | ||
263 | } /* Switch */ | ||
264 | } /* while (ops) */ | ||
265 | } | ||
266 | return IRQ_HANDLED; | ||
267 | } | ||
268 | |||
269 | |||
270 | static inline void | ||
271 | ipi_send(int cpu, enum ipi_message_type op) | ||
272 | { | ||
273 | struct cpuinfo_parisc *p = &cpu_data[cpu]; | ||
274 | unsigned long flags; | ||
275 | |||
276 | spin_lock_irqsave(&(p->lock),flags); | ||
277 | p->pending_ipi |= 1 << op; | ||
278 | gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa); | ||
279 | spin_unlock_irqrestore(&(p->lock),flags); | ||
280 | } | ||
281 | |||
282 | |||
283 | static inline void | ||
284 | send_IPI_single(int dest_cpu, enum ipi_message_type op) | ||
285 | { | ||
286 | if (dest_cpu == NO_PROC_ID) { | ||
287 | BUG(); | ||
288 | return; | ||
289 | } | ||
290 | |||
291 | ipi_send(dest_cpu, op); | ||
292 | } | ||
293 | |||
294 | static inline void | ||
295 | send_IPI_allbutself(enum ipi_message_type op) | ||
296 | { | ||
297 | int i; | ||
298 | |||
299 | for (i = 0; i < NR_CPUS; i++) { | ||
300 | if (cpu_online(i) && i != smp_processor_id()) | ||
301 | send_IPI_single(i, op); | ||
302 | } | ||
303 | } | ||
304 | |||
305 | |||
306 | inline void | ||
307 | smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } | ||
308 | |||
309 | static inline void | ||
310 | smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); } | ||
311 | |||
312 | void | ||
313 | smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } | ||
314 | |||
315 | |||
316 | /** | ||
317 | * Run a function on all other CPUs. | ||
318 | * <func> The function to run. This must be fast and non-blocking. | ||
319 | * <info> An arbitrary pointer to pass to the function. | ||
320 | * <retry> If true, keep retrying until ready. | ||
321 | * <wait> If true, wait until function has completed on other CPUs. | ||
322 | * [RETURNS] 0 on success, else a negative status code. | ||
323 | * | ||
324 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
325 | * or have executed. | ||
326 | */ | ||
327 | |||
328 | int | ||
329 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
330 | { | ||
331 | struct smp_call_struct data; | ||
332 | unsigned long timeout; | ||
333 | static DEFINE_SPINLOCK(lock); | ||
334 | int retries = 0; | ||
335 | |||
336 | if (num_online_cpus() < 2) | ||
337 | return 0; | ||
338 | |||
339 | /* Can deadlock when called with interrupts disabled */ | ||
340 | WARN_ON(irqs_disabled()); | ||
341 | |||
342 | data.func = func; | ||
343 | data.info = info; | ||
344 | data.wait = wait; | ||
345 | atomic_set(&data.unstarted_count, num_online_cpus() - 1); | ||
346 | atomic_set(&data.unfinished_count, num_online_cpus() - 1); | ||
347 | |||
348 | if (retry) { | ||
349 | spin_lock (&lock); | ||
350 | while (smp_call_function_data != 0) | ||
351 | barrier(); | ||
352 | } | ||
353 | else { | ||
354 | spin_lock (&lock); | ||
355 | if (smp_call_function_data) { | ||
356 | spin_unlock (&lock); | ||
357 | return -EBUSY; | ||
358 | } | ||
359 | } | ||
360 | |||
361 | smp_call_function_data = &data; | ||
362 | spin_unlock (&lock); | ||
363 | |||
364 | /* Send a message to all other CPUs and wait for them to respond */ | ||
365 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
366 | |||
367 | retry: | ||
368 | /* Wait for response */ | ||
369 | timeout = jiffies + HZ; | ||
370 | while ( (atomic_read (&data.unstarted_count) > 0) && | ||
371 | time_before (jiffies, timeout) ) | ||
372 | barrier (); | ||
373 | |||
374 | if (atomic_read (&data.unstarted_count) > 0) { | ||
375 | printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n", | ||
376 | smp_processor_id(), ++retries); | ||
377 | goto retry; | ||
378 | } | ||
379 | /* We either got one or timed out. Release the lock */ | ||
380 | |||
381 | mb(); | ||
382 | smp_call_function_data = NULL; | ||
383 | |||
384 | while (wait && atomic_read (&data.unfinished_count) > 0) | ||
385 | barrier (); | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | EXPORT_SYMBOL(smp_call_function); | ||
391 | |||
392 | /* | ||
393 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() | ||
394 | * as we want to ensure all TLB's flushed before proceeding. | ||
395 | */ | ||
396 | |||
397 | extern void flush_tlb_all_local(void); | ||
398 | |||
399 | void | ||
400 | smp_flush_tlb_all(void) | ||
401 | { | ||
402 | on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); | ||
403 | } | ||
404 | |||
405 | |||
406 | void | ||
407 | smp_do_timer(struct pt_regs *regs) | ||
408 | { | ||
409 | int cpu = smp_processor_id(); | ||
410 | struct cpuinfo_parisc *data = &cpu_data[cpu]; | ||
411 | |||
412 | if (!--data->prof_counter) { | ||
413 | data->prof_counter = data->prof_multiplier; | ||
414 | update_process_times(user_mode(regs)); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * Called by secondaries to update state and initialize CPU registers. | ||
420 | */ | ||
421 | static void __init | ||
422 | smp_cpu_init(int cpunum) | ||
423 | { | ||
424 | extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */ | ||
425 | extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ | ||
426 | |||
427 | /* Set modes and Enable floating point coprocessor */ | ||
428 | (void) init_per_cpu(cpunum); | ||
429 | |||
430 | disable_sr_hashing(); | ||
431 | |||
432 | mb(); | ||
433 | |||
434 | /* Well, support 2.4 linux scheme as well. */ | ||
435 | if (cpu_test_and_set(cpunum, cpu_online_map)) | ||
436 | { | ||
437 | extern void machine_halt(void); /* arch/parisc.../process.c */ | ||
438 | |||
439 | printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); | ||
440 | machine_halt(); | ||
441 | } | ||
442 | |||
443 | /* Initialise the idle task for this CPU */ | ||
444 | atomic_inc(&init_mm.mm_count); | ||
445 | current->active_mm = &init_mm; | ||
446 | if(current->mm) | ||
447 | BUG(); | ||
448 | enter_lazy_tlb(&init_mm, current); | ||
449 | |||
450 | init_IRQ(); /* make sure no IRQ's are enabled or pending */ | ||
451 | } | ||
452 | |||
453 | |||
454 | /* | ||
455 | * Slaves start using C here. Indirectly called from smp_slave_stext. | ||
456 | * Do what start_kernel() and main() do for boot strap processor (aka monarch) | ||
457 | */ | ||
458 | void __init smp_callin(void) | ||
459 | { | ||
460 | int slave_id = cpu_now_booting; | ||
461 | #if 0 | ||
462 | void *istack; | ||
463 | #endif | ||
464 | |||
465 | smp_cpu_init(slave_id); | ||
466 | |||
467 | #if 0 /* NOT WORKING YET - see entry.S */ | ||
468 | istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER); | ||
469 | if (istack == NULL) { | ||
470 | printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id); | ||
471 | BUG(); | ||
472 | } | ||
473 | mtctl(istack,31); | ||
474 | #endif | ||
475 | |||
476 | flush_cache_all_local(); /* start with known state */ | ||
477 | flush_tlb_all_local(); | ||
478 | |||
479 | local_irq_enable(); /* Interrupts have been off until now */ | ||
480 | |||
481 | cpu_idle(); /* Wait for timer to schedule some work */ | ||
482 | |||
483 | /* NOTREACHED */ | ||
484 | panic("smp_callin() AAAAaaaaahhhh....\n"); | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * Bring one cpu online. | ||
489 | */ | ||
490 | int __init smp_boot_one_cpu(int cpuid) | ||
491 | { | ||
492 | struct task_struct *idle; | ||
493 | long timeout; | ||
494 | |||
495 | /* | ||
496 | * Create an idle task for this CPU. Note the address wed* give | ||
497 | * to kernel_thread is irrelevant -- it's going to start | ||
498 | * where OS_BOOT_RENDEVZ vector in SAL says to start. But | ||
499 | * this gets all the other task-y sort of data structures set | ||
500 | * up like we wish. We need to pull the just created idle task | ||
501 | * off the run queue and stuff it into the init_tasks[] array. | ||
502 | * Sheesh . . . | ||
503 | */ | ||
504 | |||
505 | idle = fork_idle(cpuid); | ||
506 | if (IS_ERR(idle)) | ||
507 | panic("SMP: fork failed for CPU:%d", cpuid); | ||
508 | |||
509 | idle->thread_info->cpu = cpuid; | ||
510 | |||
511 | /* Let _start know what logical CPU we're booting | ||
512 | ** (offset into init_tasks[],cpu_data[]) | ||
513 | */ | ||
514 | cpu_now_booting = cpuid; | ||
515 | |||
516 | /* | ||
517 | ** boot strap code needs to know the task address since | ||
518 | ** it also contains the process stack. | ||
519 | */ | ||
520 | smp_init_current_idle_task = idle ; | ||
521 | mb(); | ||
522 | |||
523 | printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa); | ||
524 | |||
525 | /* | ||
526 | ** This gets PDC to release the CPU from a very tight loop. | ||
527 | ** | ||
528 | ** From the PA-RISC 2.0 Firmware Architecture Reference Specification: | ||
529 | ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which | ||
530 | ** is executed after receiving the rendezvous signal (an interrupt to | ||
531 | ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the | ||
532 | ** contents of memory are valid." | ||
533 | */ | ||
534 | gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa); | ||
535 | mb(); | ||
536 | |||
537 | /* | ||
538 | * OK, wait a bit for that CPU to finish staggering about. | ||
539 | * Slave will set a bit when it reaches smp_cpu_init(). | ||
540 | * Once the "monarch CPU" sees the bit change, it can move on. | ||
541 | */ | ||
542 | for (timeout = 0; timeout < 10000; timeout++) { | ||
543 | if(cpu_online(cpuid)) { | ||
544 | /* Which implies Slave has started up */ | ||
545 | cpu_now_booting = 0; | ||
546 | smp_init_current_idle_task = NULL; | ||
547 | goto alive ; | ||
548 | } | ||
549 | udelay(100); | ||
550 | barrier(); | ||
551 | } | ||
552 | |||
553 | put_task_struct(idle); | ||
554 | idle = NULL; | ||
555 | |||
556 | printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); | ||
557 | return -1; | ||
558 | |||
559 | alive: | ||
560 | /* Remember the Slave data */ | ||
561 | #if (kDEBUG>=100) | ||
562 | printk(KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n", | ||
563 | cpuid, timeout * 100); | ||
564 | #endif /* kDEBUG */ | ||
565 | #ifdef ENTRY_SYS_CPUS | ||
566 | cpu_data[cpuid].state = STATE_RUNNING; | ||
567 | #endif | ||
568 | return 0; | ||
569 | } | ||
570 | |||
571 | void __devinit smp_prepare_boot_cpu(void) | ||
572 | { | ||
573 | int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */ | ||
574 | |||
575 | #ifdef ENTRY_SYS_CPUS | ||
576 | cpu_data[0].state = STATE_RUNNING; | ||
577 | #endif | ||
578 | |||
579 | /* Setup BSP mappings */ | ||
580 | printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor); | ||
581 | |||
582 | cpu_set(bootstrap_processor, cpu_online_map); | ||
583 | cpu_set(bootstrap_processor, cpu_present_map); | ||
584 | } | ||
585 | |||
586 | |||
587 | |||
588 | /* | ||
589 | ** inventory.c:do_inventory() hasn't yet been run and thus we | ||
590 | ** don't 'discover' the additional CPU's until later. | ||
591 | */ | ||
592 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
593 | { | ||
594 | cpus_clear(cpu_present_map); | ||
595 | cpu_set(0, cpu_present_map); | ||
596 | |||
597 | parisc_max_cpus = max_cpus; | ||
598 | if (!max_cpus) | ||
599 | printk(KERN_INFO "SMP mode deactivated.\n"); | ||
600 | } | ||
601 | |||
602 | |||
603 | void smp_cpus_done(unsigned int cpu_max) | ||
604 | { | ||
605 | return; | ||
606 | } | ||
607 | |||
608 | |||
609 | int __devinit __cpu_up(unsigned int cpu) | ||
610 | { | ||
611 | if (cpu != 0 && cpu < parisc_max_cpus) | ||
612 | smp_boot_one_cpu(cpu); | ||
613 | |||
614 | return cpu_online(cpu) ? 0 : -ENOSYS; | ||
615 | } | ||
616 | |||
617 | |||
618 | |||
619 | #ifdef ENTRY_SYS_CPUS | ||
620 | /* Code goes along with: | ||
621 | ** entry.s: ENTRY_NAME(sys_cpus) / * 215, for cpu stat * / | ||
622 | */ | ||
623 | int sys_cpus(int argc, char **argv) | ||
624 | { | ||
625 | int i,j=0; | ||
626 | extern int current_pid(int cpu); | ||
627 | |||
628 | if( argc > 2 ) { | ||
629 | printk("sys_cpus:Only one argument supported\n"); | ||
630 | return (-1); | ||
631 | } | ||
632 | if ( argc == 1 ){ | ||
633 | |||
634 | #ifdef DUMP_MORE_STATE | ||
635 | for(i=0; i<NR_CPUS; i++) { | ||
636 | int cpus_per_line = 4; | ||
637 | if(cpu_online(i)) { | ||
638 | if (j++ % cpus_per_line) | ||
639 | printk(" %3d",i); | ||
640 | else | ||
641 | printk("\n %3d",i); | ||
642 | } | ||
643 | } | ||
644 | printk("\n"); | ||
645 | #else | ||
646 | printk("\n 0\n"); | ||
647 | #endif | ||
648 | } else if((argc==2) && !(strcmp(argv[1],"-l"))) { | ||
649 | printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); | ||
650 | #ifdef DUMP_MORE_STATE | ||
651 | for(i=0;i<NR_CPUS;i++) { | ||
652 | if (!cpu_online(i)) | ||
653 | continue; | ||
654 | if (cpu_data[i].cpuid != NO_PROC_ID) { | ||
655 | switch(cpu_data[i].state) { | ||
656 | case STATE_RENDEZVOUS: | ||
657 | printk("RENDEZVS "); | ||
658 | break; | ||
659 | case STATE_RUNNING: | ||
660 | printk((current_pid(i)!=0) ? "RUNNING " : "IDLING "); | ||
661 | break; | ||
662 | case STATE_STOPPED: | ||
663 | printk("STOPPED "); | ||
664 | break; | ||
665 | case STATE_HALTED: | ||
666 | printk("HALTED "); | ||
667 | break; | ||
668 | default: | ||
669 | printk("%08x?", cpu_data[i].state); | ||
670 | break; | ||
671 | } | ||
672 | if(cpu_online(i)) { | ||
673 | printk(" %4d",current_pid(i)); | ||
674 | } | ||
675 | printk(" %6d",cpu_number_map(i)); | ||
676 | printk(" %5d",i); | ||
677 | printk(" 0x%lx\n",cpu_data[i].hpa); | ||
678 | } | ||
679 | } | ||
680 | #else | ||
681 | printk("\n%s %4d 0 0 --------", | ||
682 | (current->pid)?"RUNNING ": "IDLING ",current->pid); | ||
683 | #endif | ||
684 | } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { | ||
685 | #ifdef DUMP_MORE_STATE | ||
686 | printk("\nCPUSTATE CPUID\n"); | ||
687 | for (i=0;i<NR_CPUS;i++) { | ||
688 | if (!cpu_online(i)) | ||
689 | continue; | ||
690 | if (cpu_data[i].cpuid != NO_PROC_ID) { | ||
691 | switch(cpu_data[i].state) { | ||
692 | case STATE_RENDEZVOUS: | ||
693 | printk("RENDEZVS");break; | ||
694 | case STATE_RUNNING: | ||
695 | printk((current_pid(i)!=0) ? "RUNNING " : "IDLING"); | ||
696 | break; | ||
697 | case STATE_STOPPED: | ||
698 | printk("STOPPED ");break; | ||
699 | case STATE_HALTED: | ||
700 | printk("HALTED ");break; | ||
701 | default: | ||
702 | } | ||
703 | printk(" %5d\n",i); | ||
704 | } | ||
705 | } | ||
706 | #else | ||
707 | printk("\n%s CPU0",(current->pid==0)?"RUNNING ":"IDLING "); | ||
708 | #endif | ||
709 | } else { | ||
710 | printk("sys_cpus:Unknown request\n"); | ||
711 | return (-1); | ||
712 | } | ||
713 | return 0; | ||
714 | } | ||
715 | #endif /* ENTRY_SYS_CPUS */ | ||
716 | |||
717 | #ifdef CONFIG_PROC_FS | ||
718 | int __init | ||
719 | setup_profiling_timer(unsigned int multiplier) | ||
720 | { | ||
721 | return -EINVAL; | ||
722 | } | ||
723 | #endif | ||