diff options
Diffstat (limited to 'arch/mips/kernel/smp.c')
-rw-r--r-- | arch/mips/kernel/smp.c | 54 |
1 files changed, 46 insertions, 8 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 63989e9df4f9..9d41dab90a80 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
38 | #include <asm/system.h> | 38 | #include <asm/system.h> |
39 | #include <asm/mmu_context.h> | 39 | #include <asm/mmu_context.h> |
40 | #include <asm/smp.h> | ||
41 | #include <asm/time.h> | 40 | #include <asm/time.h> |
42 | 41 | ||
43 | #ifdef CONFIG_MIPS_MT_SMTC | 42 | #ifdef CONFIG_MIPS_MT_SMTC |
@@ -53,9 +52,46 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | |||
53 | EXPORT_SYMBOL(phys_cpu_present_map); | 52 | EXPORT_SYMBOL(phys_cpu_present_map); |
54 | EXPORT_SYMBOL(cpu_online_map); | 53 | EXPORT_SYMBOL(cpu_online_map); |
55 | 54 | ||
56 | extern void __init calibrate_delay(void); | ||
57 | extern void cpu_idle(void); | 55 | extern void cpu_idle(void); |
58 | 56 | ||
57 | /* Number of TCs (or siblings in Intel speak) per CPU core */ | ||
58 | int smp_num_siblings = 1; | ||
59 | EXPORT_SYMBOL(smp_num_siblings); | ||
60 | |||
61 | /* representing the TCs (or siblings in Intel speak) of each logical CPU */ | ||
62 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; | ||
63 | EXPORT_SYMBOL(cpu_sibling_map); | ||
64 | |||
65 | /* representing cpus for which sibling maps can be computed */ | ||
66 | static cpumask_t cpu_sibling_setup_map; | ||
67 | |||
68 | static inline void set_cpu_sibling_map(int cpu) | ||
69 | { | ||
70 | int i; | ||
71 | |||
72 | cpu_set(cpu, cpu_sibling_setup_map); | ||
73 | |||
74 | if (smp_num_siblings > 1) { | ||
75 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | ||
76 | if (cpu_data[cpu].core == cpu_data[i].core) { | ||
77 | cpu_set(i, cpu_sibling_map[cpu]); | ||
78 | cpu_set(cpu, cpu_sibling_map[i]); | ||
79 | } | ||
80 | } | ||
81 | } else | ||
82 | cpu_set(cpu, cpu_sibling_map[cpu]); | ||
83 | } | ||
84 | |||
85 | struct plat_smp_ops *mp_ops; | ||
86 | |||
87 | __cpuinit void register_smp_ops(struct plat_smp_ops *ops) | ||
88 | { | ||
89 | if (ops) | ||
90 | printk(KERN_WARNING "Overriding previous set SMP ops\n"); | ||
91 | |||
92 | mp_ops = ops; | ||
93 | } | ||
94 | |||
59 | /* | 95 | /* |
60 | * First C code run on the secondary CPUs after being started up by | 96 | * First C code run on the secondary CPUs after being started up by |
61 | * the master. | 97 | * the master. |
@@ -72,7 +108,7 @@ asmlinkage __cpuinit void start_secondary(void) | |||
72 | cpu_report(); | 108 | cpu_report(); |
73 | per_cpu_trap_init(); | 109 | per_cpu_trap_init(); |
74 | mips_clockevent_init(); | 110 | mips_clockevent_init(); |
75 | prom_init_secondary(); | 111 | mp_ops->init_secondary(); |
76 | 112 | ||
77 | /* | 113 | /* |
78 | * XXX parity protection should be folded in here when it's converted | 114 | * XXX parity protection should be folded in here when it's converted |
@@ -84,7 +120,8 @@ asmlinkage __cpuinit void start_secondary(void) | |||
84 | cpu = smp_processor_id(); | 120 | cpu = smp_processor_id(); |
85 | cpu_data[cpu].udelay_val = loops_per_jiffy; | 121 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
86 | 122 | ||
87 | prom_smp_finish(); | 123 | mp_ops->smp_finish(); |
124 | set_cpu_sibling_map(cpu); | ||
88 | 125 | ||
89 | cpu_set(cpu, cpu_callin_map); | 126 | cpu_set(cpu, cpu_callin_map); |
90 | 127 | ||
@@ -155,7 +192,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), | |||
155 | smp_mb(); | 192 | smp_mb(); |
156 | 193 | ||
157 | /* Send a message to all other CPUs and wait for them to respond */ | 194 | /* Send a message to all other CPUs and wait for them to respond */ |
158 | core_send_ipi_mask(mask, SMP_CALL_FUNCTION); | 195 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); |
159 | 196 | ||
160 | /* Wait for response */ | 197 | /* Wait for response */ |
161 | /* FIXME: lock-up detection, backtrace on lock-up */ | 198 | /* FIXME: lock-up detection, backtrace on lock-up */ |
@@ -249,7 +286,7 @@ void smp_send_stop(void) | |||
249 | 286 | ||
250 | void __init smp_cpus_done(unsigned int max_cpus) | 287 | void __init smp_cpus_done(unsigned int max_cpus) |
251 | { | 288 | { |
252 | prom_cpus_done(); | 289 | mp_ops->cpus_done(); |
253 | } | 290 | } |
254 | 291 | ||
255 | /* called from main before smp_init() */ | 292 | /* called from main before smp_init() */ |
@@ -257,7 +294,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
257 | { | 294 | { |
258 | init_new_context(current, &init_mm); | 295 | init_new_context(current, &init_mm); |
259 | current_thread_info()->cpu = 0; | 296 | current_thread_info()->cpu = 0; |
260 | plat_prepare_cpus(max_cpus); | 297 | mp_ops->prepare_cpus(max_cpus); |
298 | set_cpu_sibling_map(0); | ||
261 | #ifndef CONFIG_HOTPLUG_CPU | 299 | #ifndef CONFIG_HOTPLUG_CPU |
262 | cpu_present_map = cpu_possible_map; | 300 | cpu_present_map = cpu_possible_map; |
263 | #endif | 301 | #endif |
@@ -295,7 +333,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
295 | if (IS_ERR(idle)) | 333 | if (IS_ERR(idle)) |
296 | panic(KERN_ERR "Fork failed for CPU %d", cpu); | 334 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
297 | 335 | ||
298 | prom_boot_secondary(cpu, idle); | 336 | mp_ops->boot_secondary(cpu, idle); |
299 | 337 | ||
300 | /* | 338 | /* |
301 | * Trust is futile. We should really have timeouts ... | 339 | * Trust is futile. We should really have timeouts ... |