diff options
author | Paul Mackerras <paulus@samba.org> | 2005-11-04 18:33:55 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-11-04 18:33:55 -0500 |
commit | 5ad570786158e327a1c5d32dd3d66f26d8de6340 (patch) | |
tree | 0b4aafe469c72e5887ed0379d62a0ee390db3160 /arch/powerpc | |
parent | c3df69cd854551cf70e9c63aa509c26621084f60 (diff) |
powerpc: Merge smp.c and smp.h
This also moves setup_cpu_maps to setup-common.c (calling it
smp_setup_cpu_maps) and uses it on both 32-bit and 64-bit.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 119 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 120 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 565 | ||||
-rw-r--r-- | arch/powerpc/platforms/chrp/smp.c | 39 |
6 files changed, 692 insertions, 157 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 631149ea93db..b3ae2993efb8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -41,6 +41,7 @@ obj-$(CONFIG_PPC_OF) += prom_init.o | |||
41 | obj-$(CONFIG_MODULES) += ppc_ksyms.o | 41 | obj-$(CONFIG_MODULES) += ppc_ksyms.o |
42 | obj-$(CONFIG_BOOTX_TEXT) += btext.o | 42 | obj-$(CONFIG_BOOTX_TEXT) += btext.o |
43 | obj-$(CONFIG_6xx) += idle_6xx.o | 43 | obj-$(CONFIG_6xx) += idle_6xx.o |
44 | obj-$(CONFIG_SMP) += smp.o | ||
44 | 45 | ||
45 | ifeq ($(CONFIG_PPC_ISERIES),y) | 46 | ifeq ($(CONFIG_PPC_ISERIES),y) |
46 | $(obj)/head_64.o: $(obj)/lparmap.s | 47 | $(obj)/head_64.o: $(obj)/lparmap.s |
@@ -49,8 +50,9 @@ endif | |||
49 | 50 | ||
50 | else | 51 | else |
51 | # stuff used from here for ARCH=ppc or ARCH=ppc64 | 52 | # stuff used from here for ARCH=ppc or ARCH=ppc64 |
53 | smpobj-$(CONFIG_SMP) += smp.o | ||
52 | obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \ | 54 | obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \ |
53 | setup-common.o | 55 | setup-common.o $(smpobj-y) |
54 | 56 | ||
55 | 57 | ||
56 | endif | 58 | endif |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 14ebe3bc48c3..d43fa8c0e5ac 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -170,12 +170,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
170 | } | 170 | } |
171 | 171 | ||
172 | #ifdef CONFIG_SMP | 172 | #ifdef CONFIG_SMP |
173 | #ifdef CONFIG_PPC64 /* XXX for now */ | ||
174 | pvr = per_cpu(pvr, cpu_id); | 173 | pvr = per_cpu(pvr, cpu_id); |
175 | #else | 174 | #else |
176 | pvr = cpu_data[cpu_id].pvr; | ||
177 | #endif | ||
178 | #else | ||
179 | pvr = mfspr(SPRN_PVR); | 175 | pvr = mfspr(SPRN_PVR); |
180 | #endif | 176 | #endif |
181 | maj = (pvr >> 8) & 0xFF; | 177 | maj = (pvr >> 8) & 0xFF; |
@@ -408,3 +404,118 @@ static int __init set_preferred_console(void) | |||
408 | } | 404 | } |
409 | console_initcall(set_preferred_console); | 405 | console_initcall(set_preferred_console); |
410 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 406 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |
407 | |||
408 | #ifdef CONFIG_SMP | ||
409 | |||
410 | /** | ||
411 | * setup_cpu_maps - initialize the following cpu maps: | ||
412 | * cpu_possible_map | ||
413 | * cpu_present_map | ||
414 | * cpu_sibling_map | ||
415 | * | ||
416 | * Having the possible map set up early allows us to restrict allocations | ||
417 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. | ||
418 | * | ||
419 | * We do not initialize the online map here; cpus set their own bits in | ||
420 | * cpu_online_map as they come up. | ||
421 | * | ||
422 | * This function is valid only for Open Firmware systems. finish_device_tree | ||
423 | * must be called before using this. | ||
424 | * | ||
425 | * While we're here, we may as well set the "physical" cpu ids in the paca. | ||
426 | */ | ||
427 | void __init smp_setup_cpu_maps(void) | ||
428 | { | ||
429 | struct device_node *dn = NULL; | ||
430 | int cpu = 0; | ||
431 | int swap_cpuid = 0; | ||
432 | |||
433 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { | ||
434 | int *intserv; | ||
435 | int j, len = sizeof(u32), nthreads = 1; | ||
436 | |||
437 | intserv = (int *)get_property(dn, "ibm,ppc-interrupt-server#s", | ||
438 | &len); | ||
439 | if (intserv) | ||
440 | nthreads = len / sizeof(int); | ||
441 | else { | ||
442 | intserv = (int *) get_property(dn, "reg", NULL); | ||
443 | if (!intserv) | ||
444 | intserv = &cpu; /* assume logical == phys */ | ||
445 | } | ||
446 | |||
447 | for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { | ||
448 | cpu_set(cpu, cpu_present_map); | ||
449 | set_hard_smp_processor_id(cpu, intserv[j]); | ||
450 | |||
451 | if (intserv[j] == boot_cpuid_phys) | ||
452 | swap_cpuid = cpu; | ||
453 | cpu_set(cpu, cpu_possible_map); | ||
454 | cpu++; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that | ||
459 | * boot cpu is logical 0. | ||
460 | */ | ||
461 | if (boot_cpuid_phys != get_hard_smp_processor_id(0)) { | ||
462 | u32 tmp; | ||
463 | tmp = get_hard_smp_processor_id(0); | ||
464 | set_hard_smp_processor_id(0, boot_cpuid_phys); | ||
465 | set_hard_smp_processor_id(swap_cpuid, tmp); | ||
466 | } | ||
467 | |||
468 | #ifdef CONFIG_PPC64 | ||
469 | /* | ||
470 | * On pSeries LPAR, we need to know how many cpus | ||
471 | * could possibly be added to this partition. | ||
472 | */ | ||
473 | if (systemcfg->platform == PLATFORM_PSERIES_LPAR && | ||
474 | (dn = of_find_node_by_path("/rtas"))) { | ||
475 | int num_addr_cell, num_size_cell, maxcpus; | ||
476 | unsigned int *ireg; | ||
477 | |||
478 | num_addr_cell = prom_n_addr_cells(dn); | ||
479 | num_size_cell = prom_n_size_cells(dn); | ||
480 | |||
481 | ireg = (unsigned int *) | ||
482 | get_property(dn, "ibm,lrdr-capacity", NULL); | ||
483 | |||
484 | if (!ireg) | ||
485 | goto out; | ||
486 | |||
487 | maxcpus = ireg[num_addr_cell + num_size_cell]; | ||
488 | |||
489 | /* Double maxcpus for processors which have SMT capability */ | ||
490 | if (cpu_has_feature(CPU_FTR_SMT)) | ||
491 | maxcpus *= 2; | ||
492 | |||
493 | if (maxcpus > NR_CPUS) { | ||
494 | printk(KERN_WARNING | ||
495 | "Partition configured for %d cpus, " | ||
496 | "operating system maximum is %d.\n", | ||
497 | maxcpus, NR_CPUS); | ||
498 | maxcpus = NR_CPUS; | ||
499 | } else | ||
500 | printk(KERN_INFO "Partition configured for %d cpus.\n", | ||
501 | maxcpus); | ||
502 | |||
503 | for (cpu = 0; cpu < maxcpus; cpu++) | ||
504 | cpu_set(cpu, cpu_possible_map); | ||
505 | out: | ||
506 | of_node_put(dn); | ||
507 | } | ||
508 | |||
509 | /* | ||
510 | * Do the sibling map; assume only two threads per processor. | ||
511 | */ | ||
512 | for_each_cpu(cpu) { | ||
513 | cpu_set(cpu, cpu_sibling_map[cpu]); | ||
514 | if (cpu_has_feature(CPU_FTR_SMT)) | ||
515 | cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); | ||
516 | } | ||
517 | |||
518 | systemcfg->processorCount = num_present_cpus(); | ||
519 | #endif /* CONFIG_PPC64 */ | ||
520 | } | ||
521 | #endif /* CONFIG_SMP */ | ||
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 9680ae99b084..b45eedbb4b3a 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -288,6 +288,8 @@ void __init setup_arch(char **cmdline_p) | |||
288 | unflatten_device_tree(); | 288 | unflatten_device_tree(); |
289 | finish_device_tree(); | 289 | finish_device_tree(); |
290 | 290 | ||
291 | smp_setup_cpu_maps(); | ||
292 | |||
291 | #ifdef CONFIG_BOOTX_TEXT | 293 | #ifdef CONFIG_BOOTX_TEXT |
292 | init_boot_display(); | 294 | init_boot_display(); |
293 | #endif | 295 | #endif |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 275d86ddd612..6b52cce872be 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -181,114 +181,8 @@ static int __init early_smt_enabled(char *p) | |||
181 | } | 181 | } |
182 | early_param("smt-enabled", early_smt_enabled); | 182 | early_param("smt-enabled", early_smt_enabled); |
183 | 183 | ||
184 | /** | 184 | #else |
185 | * setup_cpu_maps - initialize the following cpu maps: | 185 | #define check_smt_enabled() |
186 | * cpu_possible_map | ||
187 | * cpu_present_map | ||
188 | * cpu_sibling_map | ||
189 | * | ||
190 | * Having the possible map set up early allows us to restrict allocations | ||
191 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. | ||
192 | * | ||
193 | * We do not initialize the online map here; cpus set their own bits in | ||
194 | * cpu_online_map as they come up. | ||
195 | * | ||
196 | * This function is valid only for Open Firmware systems. finish_device_tree | ||
197 | * must be called before using this. | ||
198 | * | ||
199 | * While we're here, we may as well set the "physical" cpu ids in the paca. | ||
200 | */ | ||
201 | static void __init setup_cpu_maps(void) | ||
202 | { | ||
203 | struct device_node *dn = NULL; | ||
204 | int cpu = 0; | ||
205 | int swap_cpuid = 0; | ||
206 | |||
207 | check_smt_enabled(); | ||
208 | |||
209 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { | ||
210 | u32 *intserv; | ||
211 | int j, len = sizeof(u32), nthreads; | ||
212 | |||
213 | intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s", | ||
214 | &len); | ||
215 | if (!intserv) | ||
216 | intserv = (u32 *)get_property(dn, "reg", NULL); | ||
217 | |||
218 | nthreads = len / sizeof(u32); | ||
219 | |||
220 | for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { | ||
221 | cpu_set(cpu, cpu_present_map); | ||
222 | set_hard_smp_processor_id(cpu, intserv[j]); | ||
223 | |||
224 | if (intserv[j] == boot_cpuid_phys) | ||
225 | swap_cpuid = cpu; | ||
226 | cpu_set(cpu, cpu_possible_map); | ||
227 | cpu++; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that | ||
232 | * boot cpu is logical 0. | ||
233 | */ | ||
234 | if (boot_cpuid_phys != get_hard_smp_processor_id(0)) { | ||
235 | u32 tmp; | ||
236 | tmp = get_hard_smp_processor_id(0); | ||
237 | set_hard_smp_processor_id(0, boot_cpuid_phys); | ||
238 | set_hard_smp_processor_id(swap_cpuid, tmp); | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * On pSeries LPAR, we need to know how many cpus | ||
243 | * could possibly be added to this partition. | ||
244 | */ | ||
245 | if (systemcfg->platform == PLATFORM_PSERIES_LPAR && | ||
246 | (dn = of_find_node_by_path("/rtas"))) { | ||
247 | int num_addr_cell, num_size_cell, maxcpus; | ||
248 | unsigned int *ireg; | ||
249 | |||
250 | num_addr_cell = prom_n_addr_cells(dn); | ||
251 | num_size_cell = prom_n_size_cells(dn); | ||
252 | |||
253 | ireg = (unsigned int *) | ||
254 | get_property(dn, "ibm,lrdr-capacity", NULL); | ||
255 | |||
256 | if (!ireg) | ||
257 | goto out; | ||
258 | |||
259 | maxcpus = ireg[num_addr_cell + num_size_cell]; | ||
260 | |||
261 | /* Double maxcpus for processors which have SMT capability */ | ||
262 | if (cpu_has_feature(CPU_FTR_SMT)) | ||
263 | maxcpus *= 2; | ||
264 | |||
265 | if (maxcpus > NR_CPUS) { | ||
266 | printk(KERN_WARNING | ||
267 | "Partition configured for %d cpus, " | ||
268 | "operating system maximum is %d.\n", | ||
269 | maxcpus, NR_CPUS); | ||
270 | maxcpus = NR_CPUS; | ||
271 | } else | ||
272 | printk(KERN_INFO "Partition configured for %d cpus.\n", | ||
273 | maxcpus); | ||
274 | |||
275 | for (cpu = 0; cpu < maxcpus; cpu++) | ||
276 | cpu_set(cpu, cpu_possible_map); | ||
277 | out: | ||
278 | of_node_put(dn); | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Do the sibling map; assume only two threads per processor. | ||
283 | */ | ||
284 | for_each_cpu(cpu) { | ||
285 | cpu_set(cpu, cpu_sibling_map[cpu]); | ||
286 | if (cpu_has_feature(CPU_FTR_SMT)) | ||
287 | cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); | ||
288 | } | ||
289 | |||
290 | systemcfg->processorCount = num_present_cpus(); | ||
291 | } | ||
292 | #endif /* CONFIG_SMP */ | 186 | #endif /* CONFIG_SMP */ |
293 | 187 | ||
294 | extern struct machdep_calls pSeries_md; | 188 | extern struct machdep_calls pSeries_md; |
@@ -417,6 +311,8 @@ void smp_release_cpus(void) | |||
417 | 311 | ||
418 | DBG(" <- smp_release_cpus()\n"); | 312 | DBG(" <- smp_release_cpus()\n"); |
419 | } | 313 | } |
314 | #else | ||
315 | #define smp_release_cpus() | ||
420 | #endif /* CONFIG_SMP || CONFIG_KEXEC */ | 316 | #endif /* CONFIG_SMP || CONFIG_KEXEC */ |
421 | 317 | ||
422 | /* | 318 | /* |
@@ -608,17 +504,13 @@ void __init setup_system(void) | |||
608 | 504 | ||
609 | parse_early_param(); | 505 | parse_early_param(); |
610 | 506 | ||
611 | #ifdef CONFIG_SMP | 507 | check_smt_enabled(); |
612 | /* | 508 | smp_setup_cpu_maps(); |
613 | * iSeries has already initialized the cpu maps at this point. | ||
614 | */ | ||
615 | setup_cpu_maps(); | ||
616 | 509 | ||
617 | /* Release secondary cpus out of their spinloops at 0x60 now that | 510 | /* Release secondary cpus out of their spinloops at 0x60 now that |
618 | * we can map physical -> logical CPU ids | 511 | * we can map physical -> logical CPU ids |
619 | */ | 512 | */ |
620 | smp_release_cpus(); | 513 | smp_release_cpus(); |
621 | #endif | ||
622 | 514 | ||
623 | printk("Starting Linux PPC64 %s\n", system_utsname.version); | 515 | printk("Starting Linux PPC64 %s\n", system_utsname.version); |
624 | 516 | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c new file mode 100644 index 000000000000..1794a694a928 --- /dev/null +++ b/arch/powerpc/kernel/smp.c | |||
@@ -0,0 +1,565 @@ | |||
1 | /* | ||
2 | * SMP support for ppc. | ||
3 | * | ||
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | ||
5 | * deal of code from the sparc and intel versions. | ||
6 | * | ||
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
8 | * | ||
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | ||
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #undef DEBUG | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/err.h> | ||
31 | #include <linux/sysdev.h> | ||
32 | #include <linux/cpu.h> | ||
33 | #include <linux/notifier.h> | ||
34 | |||
35 | #include <asm/ptrace.h> | ||
36 | #include <asm/atomic.h> | ||
37 | #include <asm/irq.h> | ||
38 | #include <asm/page.h> | ||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/prom.h> | ||
41 | #include <asm/smp.h> | ||
42 | #include <asm/time.h> | ||
43 | #include <asm/xmon.h> | ||
44 | #include <asm/machdep.h> | ||
45 | #include <asm/cputable.h> | ||
46 | #include <asm/system.h> | ||
47 | #include <asm/mpic.h> | ||
48 | #ifdef CONFIG_PPC64 | ||
49 | #include <asm/paca.h> | ||
50 | #endif | ||
51 | |||
52 | int smp_hw_index[NR_CPUS]; | ||
53 | struct thread_info *secondary_ti; | ||
54 | |||
55 | #ifdef DEBUG | ||
56 | #define DBG(fmt...) udbg_printf(fmt) | ||
57 | #else | ||
58 | #define DBG(fmt...) | ||
59 | #endif | ||
60 | |||
61 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
62 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
63 | cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | ||
64 | |||
65 | EXPORT_SYMBOL(cpu_online_map); | ||
66 | EXPORT_SYMBOL(cpu_possible_map); | ||
67 | |||
68 | /* SMP operations for this machine */ | ||
69 | struct smp_ops_t *smp_ops; | ||
70 | |||
71 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | ||
72 | |||
73 | void smp_call_function_interrupt(void); | ||
74 | |||
75 | int smt_enabled_at_boot = 1; | ||
76 | |||
77 | #ifdef CONFIG_MPIC | ||
78 | int __init smp_mpic_probe(void) | ||
79 | { | ||
80 | int nr_cpus; | ||
81 | |||
82 | DBG("smp_mpic_probe()...\n"); | ||
83 | |||
84 | nr_cpus = cpus_weight(cpu_possible_map); | ||
85 | |||
86 | DBG("nr_cpus: %d\n", nr_cpus); | ||
87 | |||
88 | if (nr_cpus > 1) | ||
89 | mpic_request_ipis(); | ||
90 | |||
91 | return nr_cpus; | ||
92 | } | ||
93 | |||
94 | void __devinit smp_mpic_setup_cpu(int cpu) | ||
95 | { | ||
96 | mpic_setup_this_cpu(); | ||
97 | } | ||
98 | #endif /* CONFIG_MPIC */ | ||
99 | |||
100 | #ifdef CONFIG_PPC64 | ||
101 | void __devinit smp_generic_kick_cpu(int nr) | ||
102 | { | ||
103 | BUG_ON(nr < 0 || nr >= NR_CPUS); | ||
104 | |||
105 | /* | ||
106 | * The processor is currently spinning, waiting for the | ||
107 | * cpu_start field to become non-zero After we set cpu_start, | ||
108 | * the processor will continue on to secondary_start | ||
109 | */ | ||
110 | paca[nr].cpu_start = 1; | ||
111 | smp_mb(); | ||
112 | } | ||
113 | #endif | ||
114 | |||
115 | void smp_message_recv(int msg, struct pt_regs *regs) | ||
116 | { | ||
117 | switch(msg) { | ||
118 | case PPC_MSG_CALL_FUNCTION: | ||
119 | smp_call_function_interrupt(); | ||
120 | break; | ||
121 | case PPC_MSG_RESCHEDULE: | ||
122 | /* XXX Do we have to do this? */ | ||
123 | set_need_resched(); | ||
124 | break; | ||
125 | #ifdef CONFIG_DEBUGGER | ||
126 | case PPC_MSG_DEBUGGER_BREAK: | ||
127 | debugger_ipi(regs); | ||
128 | break; | ||
129 | #endif | ||
130 | default: | ||
131 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | ||
132 | smp_processor_id(), msg); | ||
133 | break; | ||
134 | } | ||
135 | } | ||
136 | |||
137 | void smp_send_reschedule(int cpu) | ||
138 | { | ||
139 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | ||
140 | } | ||
141 | |||
142 | #ifdef CONFIG_DEBUGGER | ||
143 | void smp_send_debugger_break(int cpu) | ||
144 | { | ||
145 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | ||
146 | } | ||
147 | #endif | ||
148 | |||
149 | static void stop_this_cpu(void *dummy) | ||
150 | { | ||
151 | local_irq_disable(); | ||
152 | while (1) | ||
153 | ; | ||
154 | } | ||
155 | |||
156 | void smp_send_stop(void) | ||
157 | { | ||
158 | smp_call_function(stop_this_cpu, NULL, 1, 0); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Structure and data for smp_call_function(). This is designed to minimise | ||
163 | * static memory requirements. It also looks cleaner. | ||
164 | * Stolen from the i386 version. | ||
165 | */ | ||
166 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | ||
167 | |||
168 | static struct call_data_struct { | ||
169 | void (*func) (void *info); | ||
170 | void *info; | ||
171 | atomic_t started; | ||
172 | atomic_t finished; | ||
173 | int wait; | ||
174 | } *call_data; | ||
175 | |||
176 | /* delay of at least 8 seconds */ | ||
177 | #define SMP_CALL_TIMEOUT 8 | ||
178 | |||
179 | /* | ||
180 | * This function sends a 'generic call function' IPI to all other CPUs | ||
181 | * in the system. | ||
182 | * | ||
183 | * [SUMMARY] Run a function on all other CPUs. | ||
184 | * <func> The function to run. This must be fast and non-blocking. | ||
185 | * <info> An arbitrary pointer to pass to the function. | ||
186 | * <nonatomic> currently unused. | ||
187 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
188 | * [RETURNS] 0 on success, else a negative status code. Does not return until | ||
189 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
190 | * | ||
191 | * You must not call this function with disabled interrupts or from a | ||
192 | * hardware interrupt handler or from a bottom half handler. | ||
193 | */ | ||
194 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | ||
195 | int wait) | ||
196 | { | ||
197 | struct call_data_struct data; | ||
198 | int ret = -1, cpus; | ||
199 | u64 timeout; | ||
200 | |||
201 | /* Can deadlock when called with interrupts disabled */ | ||
202 | WARN_ON(irqs_disabled()); | ||
203 | |||
204 | data.func = func; | ||
205 | data.info = info; | ||
206 | atomic_set(&data.started, 0); | ||
207 | data.wait = wait; | ||
208 | if (wait) | ||
209 | atomic_set(&data.finished, 0); | ||
210 | |||
211 | spin_lock(&call_lock); | ||
212 | /* Must grab online cpu count with preempt disabled, otherwise | ||
213 | * it can change. */ | ||
214 | cpus = num_online_cpus() - 1; | ||
215 | if (!cpus) { | ||
216 | ret = 0; | ||
217 | goto out; | ||
218 | } | ||
219 | |||
220 | call_data = &data; | ||
221 | smp_wmb(); | ||
222 | /* Send a message to all other CPUs and wait for them to respond */ | ||
223 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); | ||
224 | |||
225 | timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; | ||
226 | |||
227 | /* Wait for response */ | ||
228 | while (atomic_read(&data.started) != cpus) { | ||
229 | HMT_low(); | ||
230 | if (get_tb() >= timeout) { | ||
231 | printk("smp_call_function on cpu %d: other cpus not " | ||
232 | "responding (%d)\n", smp_processor_id(), | ||
233 | atomic_read(&data.started)); | ||
234 | debugger(NULL); | ||
235 | goto out; | ||
236 | } | ||
237 | } | ||
238 | |||
239 | if (wait) { | ||
240 | while (atomic_read(&data.finished) != cpus) { | ||
241 | HMT_low(); | ||
242 | if (get_tb() >= timeout) { | ||
243 | printk("smp_call_function on cpu %d: other " | ||
244 | "cpus not finishing (%d/%d)\n", | ||
245 | smp_processor_id(), | ||
246 | atomic_read(&data.finished), | ||
247 | atomic_read(&data.started)); | ||
248 | debugger(NULL); | ||
249 | goto out; | ||
250 | } | ||
251 | } | ||
252 | } | ||
253 | |||
254 | ret = 0; | ||
255 | |||
256 | out: | ||
257 | call_data = NULL; | ||
258 | HMT_medium(); | ||
259 | spin_unlock(&call_lock); | ||
260 | return ret; | ||
261 | } | ||
262 | |||
263 | EXPORT_SYMBOL(smp_call_function); | ||
264 | |||
265 | void smp_call_function_interrupt(void) | ||
266 | { | ||
267 | void (*func) (void *info); | ||
268 | void *info; | ||
269 | int wait; | ||
270 | |||
271 | /* call_data will be NULL if the sender timed out while | ||
272 | * waiting on us to receive the call. | ||
273 | */ | ||
274 | if (!call_data) | ||
275 | return; | ||
276 | |||
277 | func = call_data->func; | ||
278 | info = call_data->info; | ||
279 | wait = call_data->wait; | ||
280 | |||
281 | if (!wait) | ||
282 | smp_mb__before_atomic_inc(); | ||
283 | |||
284 | /* | ||
285 | * Notify initiating CPU that I've grabbed the data and am | ||
286 | * about to execute the function | ||
287 | */ | ||
288 | atomic_inc(&call_data->started); | ||
289 | /* | ||
290 | * At this point the info structure may be out of scope unless wait==1 | ||
291 | */ | ||
292 | (*func)(info); | ||
293 | if (wait) { | ||
294 | smp_mb__before_atomic_inc(); | ||
295 | atomic_inc(&call_data->finished); | ||
296 | } | ||
297 | } | ||
298 | |||
299 | extern struct gettimeofday_struct do_gtod; | ||
300 | |||
301 | struct thread_info *current_set[NR_CPUS]; | ||
302 | |||
303 | DECLARE_PER_CPU(unsigned int, pvr); | ||
304 | |||
305 | static void __devinit smp_store_cpu_info(int id) | ||
306 | { | ||
307 | per_cpu(pvr, id) = mfspr(SPRN_PVR); | ||
308 | } | ||
309 | |||
310 | static void __init smp_create_idle(unsigned int cpu) | ||
311 | { | ||
312 | struct task_struct *p; | ||
313 | |||
314 | /* create a process for the processor */ | ||
315 | p = fork_idle(cpu); | ||
316 | if (IS_ERR(p)) | ||
317 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
318 | #ifdef CONFIG_PPC64 | ||
319 | paca[cpu].__current = p; | ||
320 | #endif | ||
321 | current_set[cpu] = p->thread_info; | ||
322 | p->thread_info->cpu = cpu; | ||
323 | } | ||
324 | |||
325 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
326 | { | ||
327 | unsigned int cpu; | ||
328 | |||
329 | DBG("smp_prepare_cpus\n"); | ||
330 | |||
331 | /* | ||
332 | * setup_cpu may need to be called on the boot cpu. We havent | ||
333 | * spun any cpus up but lets be paranoid. | ||
334 | */ | ||
335 | BUG_ON(boot_cpuid != smp_processor_id()); | ||
336 | |||
337 | /* Fixup boot cpu */ | ||
338 | smp_store_cpu_info(boot_cpuid); | ||
339 | cpu_callin_map[boot_cpuid] = 1; | ||
340 | |||
341 | max_cpus = smp_ops->probe(); | ||
342 | |||
343 | smp_space_timers(max_cpus); | ||
344 | |||
345 | for_each_cpu(cpu) | ||
346 | if (cpu != boot_cpuid) | ||
347 | smp_create_idle(cpu); | ||
348 | } | ||
349 | |||
350 | void __devinit smp_prepare_boot_cpu(void) | ||
351 | { | ||
352 | BUG_ON(smp_processor_id() != boot_cpuid); | ||
353 | |||
354 | cpu_set(boot_cpuid, cpu_online_map); | ||
355 | #ifdef CONFIG_PPC64 | ||
356 | paca[boot_cpuid].__current = current; | ||
357 | #endif | ||
358 | current_set[boot_cpuid] = current->thread_info; | ||
359 | } | ||
360 | |||
361 | #ifdef CONFIG_HOTPLUG_CPU | ||
362 | /* State of each CPU during hotplug phases */ | ||
363 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
364 | |||
365 | int generic_cpu_disable(void) | ||
366 | { | ||
367 | unsigned int cpu = smp_processor_id(); | ||
368 | |||
369 | if (cpu == boot_cpuid) | ||
370 | return -EBUSY; | ||
371 | |||
372 | systemcfg->processorCount--; | ||
373 | cpu_clear(cpu, cpu_online_map); | ||
374 | fixup_irqs(cpu_online_map); | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | int generic_cpu_enable(unsigned int cpu) | ||
379 | { | ||
380 | /* Do the normal bootup if we haven't | ||
381 | * already bootstrapped. */ | ||
382 | if (system_state != SYSTEM_RUNNING) | ||
383 | return -ENOSYS; | ||
384 | |||
385 | /* get the target out of it's holding state */ | ||
386 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
387 | smp_wmb(); | ||
388 | |||
389 | while (!cpu_online(cpu)) | ||
390 | cpu_relax(); | ||
391 | |||
392 | fixup_irqs(cpu_online_map); | ||
393 | /* counter the irq disable in fixup_irqs */ | ||
394 | local_irq_enable(); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | void generic_cpu_die(unsigned int cpu) | ||
399 | { | ||
400 | int i; | ||
401 | |||
402 | for (i = 0; i < 100; i++) { | ||
403 | smp_rmb(); | ||
404 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) | ||
405 | return; | ||
406 | msleep(100); | ||
407 | } | ||
408 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | ||
409 | } | ||
410 | |||
411 | void generic_mach_cpu_die(void) | ||
412 | { | ||
413 | unsigned int cpu; | ||
414 | |||
415 | local_irq_disable(); | ||
416 | cpu = smp_processor_id(); | ||
417 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | ||
418 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
419 | smp_wmb(); | ||
420 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | ||
421 | cpu_relax(); | ||
422 | |||
423 | flush_tlb_pending(); | ||
424 | cpu_set(cpu, cpu_online_map); | ||
425 | local_irq_enable(); | ||
426 | } | ||
427 | #endif | ||
428 | |||
429 | static int __devinit cpu_enable(unsigned int cpu) | ||
430 | { | ||
431 | if (smp_ops->cpu_enable) | ||
432 | return smp_ops->cpu_enable(cpu); | ||
433 | |||
434 | return -ENOSYS; | ||
435 | } | ||
436 | |||
437 | int __devinit __cpu_up(unsigned int cpu) | ||
438 | { | ||
439 | int c; | ||
440 | |||
441 | secondary_ti = current_set[cpu]; | ||
442 | if (!cpu_enable(cpu)) | ||
443 | return 0; | ||
444 | |||
445 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) | ||
446 | return -EINVAL; | ||
447 | |||
448 | #ifdef CONFIG_PPC64 | ||
449 | paca[cpu].default_decr = tb_ticks_per_jiffy; | ||
450 | #endif | ||
451 | |||
452 | /* Make sure callin-map entry is 0 (can be leftover a CPU | ||
453 | * hotplug | ||
454 | */ | ||
455 | cpu_callin_map[cpu] = 0; | ||
456 | |||
457 | /* The information for processor bringup must | ||
458 | * be written out to main store before we release | ||
459 | * the processor. | ||
460 | */ | ||
461 | smp_mb(); | ||
462 | |||
463 | /* wake up cpus */ | ||
464 | DBG("smp: kicking cpu %d\n", cpu); | ||
465 | smp_ops->kick_cpu(cpu); | ||
466 | |||
467 | /* | ||
468 | * wait to see if the cpu made a callin (is actually up). | ||
469 | * use this value that I found through experimentation. | ||
470 | * -- Cort | ||
471 | */ | ||
472 | if (system_state < SYSTEM_RUNNING) | ||
473 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) | ||
474 | udelay(100); | ||
475 | #ifdef CONFIG_HOTPLUG_CPU | ||
476 | else | ||
477 | /* | ||
478 | * CPUs can take much longer to come up in the | ||
479 | * hotplug case. Wait five seconds. | ||
480 | */ | ||
481 | for (c = 25; c && !cpu_callin_map[cpu]; c--) { | ||
482 | msleep(200); | ||
483 | } | ||
484 | #endif | ||
485 | |||
486 | if (!cpu_callin_map[cpu]) { | ||
487 | printk("Processor %u is stuck.\n", cpu); | ||
488 | return -ENOENT; | ||
489 | } | ||
490 | |||
491 | printk("Processor %u found.\n", cpu); | ||
492 | |||
493 | if (smp_ops->give_timebase) | ||
494 | smp_ops->give_timebase(); | ||
495 | |||
496 | /* Wait until cpu puts itself in the online map */ | ||
497 | while (!cpu_online(cpu)) | ||
498 | cpu_relax(); | ||
499 | |||
500 | return 0; | ||
501 | } | ||
502 | |||
503 | |||
504 | /* Activate a secondary processor. */ | ||
505 | int __devinit start_secondary(void *unused) | ||
506 | { | ||
507 | unsigned int cpu = smp_processor_id(); | ||
508 | |||
509 | atomic_inc(&init_mm.mm_count); | ||
510 | current->active_mm = &init_mm; | ||
511 | |||
512 | smp_store_cpu_info(cpu); | ||
513 | set_dec(tb_ticks_per_jiffy); | ||
514 | cpu_callin_map[cpu] = 1; | ||
515 | |||
516 | smp_ops->setup_cpu(cpu); | ||
517 | if (smp_ops->take_timebase) | ||
518 | smp_ops->take_timebase(); | ||
519 | |||
520 | spin_lock(&call_lock); | ||
521 | cpu_set(cpu, cpu_online_map); | ||
522 | spin_unlock(&call_lock); | ||
523 | |||
524 | local_irq_enable(); | ||
525 | |||
526 | cpu_idle(); | ||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | int setup_profiling_timer(unsigned int multiplier) | ||
531 | { | ||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | void __init smp_cpus_done(unsigned int max_cpus) | ||
536 | { | ||
537 | cpumask_t old_mask; | ||
538 | |||
539 | /* We want the setup_cpu() here to be called from CPU 0, but our | ||
540 | * init thread may have been "borrowed" by another CPU in the meantime | ||
541 | * se we pin us down to CPU 0 for a short while | ||
542 | */ | ||
543 | old_mask = current->cpus_allowed; | ||
544 | set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | ||
545 | |||
546 | smp_ops->setup_cpu(boot_cpuid); | ||
547 | |||
548 | set_cpus_allowed(current, old_mask); | ||
549 | } | ||
550 | |||
551 | #ifdef CONFIG_HOTPLUG_CPU | ||
552 | int __cpu_disable(void) | ||
553 | { | ||
554 | if (smp_ops->cpu_disable) | ||
555 | return smp_ops->cpu_disable(); | ||
556 | |||
557 | return -ENOSYS; | ||
558 | } | ||
559 | |||
560 | void __cpu_die(unsigned int cpu) | ||
561 | { | ||
562 | if (smp_ops->cpu_die) | ||
563 | smp_ops->cpu_die(cpu); | ||
564 | } | ||
565 | #endif | ||
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index 31ee49c25014..bb2315997d45 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c | |||
@@ -35,43 +35,6 @@ | |||
35 | #include <asm/smp.h> | 35 | #include <asm/smp.h> |
36 | #include <asm/mpic.h> | 36 | #include <asm/mpic.h> |
37 | 37 | ||
38 | extern unsigned long smp_chrp_cpu_nr; | ||
39 | |||
40 | static int __init smp_chrp_probe(void) | ||
41 | { | ||
42 | struct device_node *cpus = NULL; | ||
43 | unsigned int *reg; | ||
44 | int reglen; | ||
45 | int ncpus = 0; | ||
46 | int cpuid; | ||
47 | unsigned int phys; | ||
48 | |||
49 | /* Count CPUs in the device-tree */ | ||
50 | cpuid = 1; /* the boot cpu is logical cpu 0 */ | ||
51 | while ((cpus = of_find_node_by_type(cpus, "cpu")) != NULL) { | ||
52 | phys = ncpus; | ||
53 | reg = (unsigned int *) get_property(cpus, "reg", ®len); | ||
54 | if (reg && reglen >= sizeof(unsigned int)) | ||
55 | /* hmmm, not having a reg property would be bad */ | ||
56 | phys = *reg; | ||
57 | if (phys != boot_cpuid_phys) { | ||
58 | set_hard_smp_processor_id(cpuid, phys); | ||
59 | ++cpuid; | ||
60 | } | ||
61 | ++ncpus; | ||
62 | } | ||
63 | |||
64 | printk(KERN_INFO "CHRP SMP probe found %d cpus\n", ncpus); | ||
65 | |||
66 | /* Nothing more to do if less than 2 of them */ | ||
67 | if (ncpus <= 1) | ||
68 | return 1; | ||
69 | |||
70 | mpic_request_ipis(); | ||
71 | |||
72 | return ncpus; | ||
73 | } | ||
74 | |||
75 | static void __devinit smp_chrp_kick_cpu(int nr) | 38 | static void __devinit smp_chrp_kick_cpu(int nr) |
76 | { | 39 | { |
77 | *(unsigned long *)KERNELBASE = nr; | 40 | *(unsigned long *)KERNELBASE = nr; |
@@ -114,7 +77,7 @@ void __devinit smp_chrp_take_timebase(void) | |||
114 | /* CHRP with openpic */ | 77 | /* CHRP with openpic */ |
115 | struct smp_ops_t chrp_smp_ops = { | 78 | struct smp_ops_t chrp_smp_ops = { |
116 | .message_pass = smp_mpic_message_pass, | 79 | .message_pass = smp_mpic_message_pass, |
117 | .probe = smp_chrp_probe, | 80 | .probe = smp_mpic_probe, |
118 | .kick_cpu = smp_chrp_kick_cpu, | 81 | .kick_cpu = smp_chrp_kick_cpu, |
119 | .setup_cpu = smp_chrp_setup_cpu, | 82 | .setup_cpu = smp_chrp_setup_cpu, |
120 | .give_timebase = smp_chrp_give_timebase, | 83 | .give_timebase = smp_chrp_give_timebase, |