diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-11-19 07:23:51 -0500 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2008-01-29 05:14:57 -0500 |
commit | 87353d8ac39c52784da605ecbe965ecdfad609ad (patch) | |
tree | c95ce7cbe9b099c21cab71a195621801b04bc05a /arch/mips/kernel | |
parent | 19388fb092d89e179575bd0b44f51b57e175edf5 (diff) |
[MIPS] SMP: Call platform methods via ops structure.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/smp-mt.c | 193 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 23 | ||||
-rw-r--r-- | arch/mips/kernel/smtc-proc.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 1 |
6 files changed, 123 insertions, 99 deletions
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 3d6b1ec1f328..640fb0cc6e39 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <asm/system.h> | 17 | #include <asm/system.h> |
18 | #include <asm/hardirq.h> | 18 | #include <asm/hardirq.h> |
19 | #include <asm/mmu_context.h> | 19 | #include <asm/mmu_context.h> |
20 | #include <asm/smp.h> | ||
21 | #include <asm/mipsmtregs.h> | 20 | #include <asm/mipsmtregs.h> |
22 | #include <asm/r4kcache.h> | 21 | #include <asm/r4kcache.h> |
23 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 7b4418dd5857..269c252d956f 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/cpu.h> | 29 | #include <asm/cpu.h> |
30 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
31 | #include <asm/setup.h> | 31 | #include <asm/setup.h> |
32 | #include <asm/smp-ops.h> | ||
32 | #include <asm/system.h> | 33 | #include <asm/system.h> |
33 | 34 | ||
34 | struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; | 35 | struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; |
@@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p) | |||
575 | arch_mem_init(cmdline_p); | 576 | arch_mem_init(cmdline_p); |
576 | 577 | ||
577 | resource_init(); | 578 | resource_init(); |
578 | #ifdef CONFIG_SMP | ||
579 | plat_smp_setup(); | 579 | plat_smp_setup(); |
580 | #endif | ||
581 | } | 580 | } |
582 | 581 | ||
583 | static int __init fpu_disable(char *s) | 582 | static int __init fpu_disable(char *s) |
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 2ab0b7eeaa7e..89e6f6aa5166 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -215,72 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) | |||
215 | write_tc_c0_tchalt(TCHALT_H); | 215 | write_tc_c0_tchalt(TCHALT_H); |
216 | } | 216 | } |
217 | 217 | ||
218 | /* | 218 | static void vsmp_send_ipi_single(int cpu, unsigned int action) |
219 | * Common setup before any secondaries are started | ||
220 | * Make sure all CPU's are in a sensible state before we boot any of the | ||
221 | * secondarys | ||
222 | */ | ||
223 | void __init plat_smp_setup(void) | ||
224 | { | 219 | { |
225 | unsigned int mvpconf0, ntc, tc, ncpu = 0; | 220 | int i; |
226 | unsigned int nvpe; | 221 | unsigned long flags; |
222 | int vpflags; | ||
227 | 223 | ||
228 | #ifdef CONFIG_MIPS_MT_FPAFF | 224 | local_irq_save(flags); |
229 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | ||
230 | if (cpu_has_fpu) | ||
231 | cpu_set(0, mt_fpu_cpumask); | ||
232 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
233 | if (!cpu_has_mipsmt) | ||
234 | return; | ||
235 | 225 | ||
236 | /* disable MT so we can configure */ | 226 | vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ |
237 | dvpe(); | ||
238 | dmt(); | ||
239 | 227 | ||
240 | /* Put MVPE's into 'configuration state' */ | 228 | switch (action) { |
241 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 229 | case SMP_CALL_FUNCTION: |
230 | i = C_SW1; | ||
231 | break; | ||
242 | 232 | ||
243 | mvpconf0 = read_c0_mvpconf0(); | 233 | case SMP_RESCHEDULE_YOURSELF: |
244 | ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; | 234 | default: |
235 | i = C_SW0; | ||
236 | break; | ||
237 | } | ||
245 | 238 | ||
246 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | 239 | /* 1:1 mapping of vpe and tc... */ |
247 | smp_num_siblings = nvpe; | 240 | settc(cpu); |
241 | write_vpe_c0_cause(read_vpe_c0_cause() | i); | ||
242 | evpe(vpflags); | ||
248 | 243 | ||
249 | /* we'll always have more TC's than VPE's, so loop setting everything | 244 | local_irq_restore(flags); |
250 | to a sensible state */ | 245 | } |
251 | for (tc = 0; tc <= ntc; tc++) { | ||
252 | settc(tc); | ||
253 | 246 | ||
254 | smp_tc_init(tc, mvpconf0); | 247 | static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action) |
255 | ncpu = smp_vpe_init(tc, mvpconf0, ncpu); | 248 | { |
256 | } | 249 | unsigned int i; |
257 | 250 | ||
258 | /* Release config state */ | 251 | for_each_cpu_mask(i, mask) |
259 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 252 | vsmp_send_ipi_single(i, action); |
253 | } | ||
260 | 254 | ||
261 | /* We'll wait until starting the secondaries before starting MVPE */ | 255 | static void __cpuinit vsmp_init_secondary(void) |
256 | { | ||
257 | /* Enable per-cpu interrupts */ | ||
262 | 258 | ||
263 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); | 259 | /* This is Malta specific: IPI,performance and timer inetrrupts */ |
260 | write_c0_status((read_c0_status() & ~ST0_IM ) | | ||
261 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); | ||
264 | } | 262 | } |
265 | 263 | ||
266 | void __init plat_prepare_cpus(unsigned int max_cpus) | 264 | static void __cpuinit vsmp_smp_finish(void) |
267 | { | 265 | { |
268 | mips_mt_set_cpuoptions(); | 266 | write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); |
269 | |||
270 | /* set up ipi interrupts */ | ||
271 | if (cpu_has_vint) { | ||
272 | set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); | ||
273 | set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); | ||
274 | } | ||
275 | 267 | ||
276 | cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; | 268 | #ifdef CONFIG_MIPS_MT_FPAFF |
277 | cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; | 269 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
270 | if (cpu_has_fpu) | ||
271 | cpu_set(smp_processor_id(), mt_fpu_cpumask); | ||
272 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
278 | 273 | ||
279 | setup_irq(cpu_ipi_resched_irq, &irq_resched); | 274 | local_irq_enable(); |
280 | setup_irq(cpu_ipi_call_irq, &irq_call); | 275 | } |
281 | 276 | ||
282 | set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); | 277 | static void vsmp_cpus_done(void) |
283 | set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); | 278 | { |
284 | } | 279 | } |
285 | 280 | ||
286 | /* | 281 | /* |
@@ -291,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
291 | * (unsigned long)idle->thread_info the gp | 286 | * (unsigned long)idle->thread_info the gp |
292 | * assumes a 1:1 mapping of TC => VPE | 287 | * assumes a 1:1 mapping of TC => VPE |
293 | */ | 288 | */ |
294 | void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) | 289 | static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) |
295 | { | 290 | { |
296 | struct thread_info *gp = task_thread_info(idle); | 291 | struct thread_info *gp = task_thread_info(idle); |
297 | dvpe(); | 292 | dvpe(); |
@@ -325,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) | |||
325 | evpe(EVPE_ENABLE); | 320 | evpe(EVPE_ENABLE); |
326 | } | 321 | } |
327 | 322 | ||
328 | void __cpuinit prom_init_secondary(void) | 323 | /* |
329 | { | 324 | * Common setup before any secondaries are started |
330 | /* Enable per-cpu interrupts */ | 325 | * Make sure all CPU's are in a sensible state before we boot any of the |
331 | 326 | * secondarys | |
332 | /* This is Malta specific: IPI,performance and timer inetrrupts */ | 327 | */ |
333 | write_c0_status((read_c0_status() & ~ST0_IM ) | | 328 | static void __init vsmp_smp_setup(void) |
334 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); | ||
335 | } | ||
336 | |||
337 | void __cpuinit prom_smp_finish(void) | ||
338 | { | 329 | { |
339 | write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); | 330 | unsigned int mvpconf0, ntc, tc, ncpu = 0; |
331 | unsigned int nvpe; | ||
340 | 332 | ||
341 | #ifdef CONFIG_MIPS_MT_FPAFF | 333 | #ifdef CONFIG_MIPS_MT_FPAFF |
342 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | 334 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
343 | if (cpu_has_fpu) | 335 | if (cpu_has_fpu) |
344 | cpu_set(smp_processor_id(), mt_fpu_cpumask); | 336 | cpu_set(0, mt_fpu_cpumask); |
345 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 337 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
338 | if (!cpu_has_mipsmt) | ||
339 | return; | ||
346 | 340 | ||
347 | local_irq_enable(); | 341 | /* disable MT so we can configure */ |
348 | } | 342 | dvpe(); |
343 | dmt(); | ||
349 | 344 | ||
350 | void prom_cpus_done(void) | 345 | /* Put MVPE's into 'configuration state' */ |
351 | { | 346 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
352 | } | ||
353 | 347 | ||
354 | void core_send_ipi(int cpu, unsigned int action) | 348 | mvpconf0 = read_c0_mvpconf0(); |
355 | { | 349 | ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; |
356 | int i; | ||
357 | unsigned long flags; | ||
358 | int vpflags; | ||
359 | 350 | ||
360 | local_irq_save(flags); | 351 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; |
352 | smp_num_siblings = nvpe; | ||
361 | 353 | ||
362 | vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ | 354 | /* we'll always have more TC's than VPE's, so loop setting everything |
355 | to a sensible state */ | ||
356 | for (tc = 0; tc <= ntc; tc++) { | ||
357 | settc(tc); | ||
363 | 358 | ||
364 | switch (action) { | 359 | smp_tc_init(tc, mvpconf0); |
365 | case SMP_CALL_FUNCTION: | 360 | ncpu = smp_vpe_init(tc, mvpconf0, ncpu); |
366 | i = C_SW1; | 361 | } |
367 | break; | ||
368 | 362 | ||
369 | case SMP_RESCHEDULE_YOURSELF: | 363 | /* Release config state */ |
370 | default: | 364 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
371 | i = C_SW0; | 365 | |
372 | break; | 366 | /* We'll wait until starting the secondaries before starting MVPE */ |
367 | |||
368 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); | ||
369 | } | ||
370 | |||
371 | static void __init vsmp_prepare_cpus(unsigned int max_cpus) | ||
372 | { | ||
373 | mips_mt_set_cpuoptions(); | ||
374 | |||
375 | /* set up ipi interrupts */ | ||
376 | if (cpu_has_vint) { | ||
377 | set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); | ||
378 | set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); | ||
373 | } | 379 | } |
374 | 380 | ||
375 | /* 1:1 mapping of vpe and tc... */ | 381 | cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; |
376 | settc(cpu); | 382 | cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; |
377 | write_vpe_c0_cause(read_vpe_c0_cause() | i); | ||
378 | evpe(vpflags); | ||
379 | 383 | ||
380 | local_irq_restore(flags); | 384 | setup_irq(cpu_ipi_resched_irq, &irq_resched); |
385 | setup_irq(cpu_ipi_call_irq, &irq_call); | ||
386 | |||
387 | set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); | ||
388 | set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); | ||
381 | } | 389 | } |
390 | |||
391 | struct plat_smp_ops vsmp_smp_ops = { | ||
392 | .send_ipi_single = vsmp_send_ipi_single, | ||
393 | .send_ipi_mask = vsmp_send_ipi_mask, | ||
394 | .init_secondary = vsmp_init_secondary, | ||
395 | .smp_finish = vsmp_smp_finish, | ||
396 | .cpus_done = vsmp_cpus_done, | ||
397 | .boot_secondary = vsmp_boot_secondary, | ||
398 | .smp_setup = vsmp_smp_setup, | ||
399 | .prepare_cpus = vsmp_prepare_cpus, | ||
400 | }; | ||
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 335be9bcf0dc..1e5dfc28294a 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
38 | #include <asm/system.h> | 38 | #include <asm/system.h> |
39 | #include <asm/mmu_context.h> | 39 | #include <asm/mmu_context.h> |
40 | #include <asm/smp.h> | ||
41 | #include <asm/time.h> | 40 | #include <asm/time.h> |
42 | 41 | ||
43 | #ifdef CONFIG_MIPS_MT_SMTC | 42 | #ifdef CONFIG_MIPS_MT_SMTC |
@@ -84,6 +83,16 @@ static inline void set_cpu_sibling_map(int cpu) | |||
84 | cpu_set(cpu, cpu_sibling_map[cpu]); | 83 | cpu_set(cpu, cpu_sibling_map[cpu]); |
85 | } | 84 | } |
86 | 85 | ||
86 | struct plat_smp_ops *mp_ops; | ||
87 | |||
88 | __cpuinit void register_smp_ops(struct plat_smp_ops *ops) | ||
89 | { | ||
90 | if (ops) | ||
91 | printk(KERN_WARNING "Overriding previous set SMP ops\n"); | ||
92 | |||
93 | mp_ops = ops; | ||
94 | } | ||
95 | |||
87 | /* | 96 | /* |
88 | * First C code run on the secondary CPUs after being started up by | 97 | * First C code run on the secondary CPUs after being started up by |
89 | * the master. | 98 | * the master. |
@@ -100,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void) | |||
100 | cpu_report(); | 109 | cpu_report(); |
101 | per_cpu_trap_init(); | 110 | per_cpu_trap_init(); |
102 | mips_clockevent_init(); | 111 | mips_clockevent_init(); |
103 | prom_init_secondary(); | 112 | mp_ops->init_secondary(); |
104 | 113 | ||
105 | /* | 114 | /* |
106 | * XXX parity protection should be folded in here when it's converted | 115 | * XXX parity protection should be folded in here when it's converted |
@@ -112,7 +121,7 @@ asmlinkage __cpuinit void start_secondary(void) | |||
112 | cpu = smp_processor_id(); | 121 | cpu = smp_processor_id(); |
113 | cpu_data[cpu].udelay_val = loops_per_jiffy; | 122 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
114 | 123 | ||
115 | prom_smp_finish(); | 124 | mp_ops->smp_finish(); |
116 | set_cpu_sibling_map(cpu); | 125 | set_cpu_sibling_map(cpu); |
117 | 126 | ||
118 | cpu_set(cpu, cpu_callin_map); | 127 | cpu_set(cpu, cpu_callin_map); |
@@ -184,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), | |||
184 | smp_mb(); | 193 | smp_mb(); |
185 | 194 | ||
186 | /* Send a message to all other CPUs and wait for them to respond */ | 195 | /* Send a message to all other CPUs and wait for them to respond */ |
187 | core_send_ipi_mask(mask, SMP_CALL_FUNCTION); | 196 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); |
188 | 197 | ||
189 | /* Wait for response */ | 198 | /* Wait for response */ |
190 | /* FIXME: lock-up detection, backtrace on lock-up */ | 199 | /* FIXME: lock-up detection, backtrace on lock-up */ |
@@ -278,7 +287,7 @@ void smp_send_stop(void) | |||
278 | 287 | ||
279 | void __init smp_cpus_done(unsigned int max_cpus) | 288 | void __init smp_cpus_done(unsigned int max_cpus) |
280 | { | 289 | { |
281 | prom_cpus_done(); | 290 | mp_ops->cpus_done(); |
282 | } | 291 | } |
283 | 292 | ||
284 | /* called from main before smp_init() */ | 293 | /* called from main before smp_init() */ |
@@ -286,7 +295,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
286 | { | 295 | { |
287 | init_new_context(current, &init_mm); | 296 | init_new_context(current, &init_mm); |
288 | current_thread_info()->cpu = 0; | 297 | current_thread_info()->cpu = 0; |
289 | plat_prepare_cpus(max_cpus); | 298 | mp_ops->prepare_cpus(max_cpus); |
290 | set_cpu_sibling_map(0); | 299 | set_cpu_sibling_map(0); |
291 | #ifndef CONFIG_HOTPLUG_CPU | 300 | #ifndef CONFIG_HOTPLUG_CPU |
292 | cpu_present_map = cpu_possible_map; | 301 | cpu_present_map = cpu_possible_map; |
@@ -325,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
325 | if (IS_ERR(idle)) | 334 | if (IS_ERR(idle)) |
326 | panic(KERN_ERR "Fork failed for CPU %d", cpu); | 335 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
327 | 336 | ||
328 | prom_boot_secondary(cpu, idle); | 337 | mp_ops->boot_secondary(cpu, idle); |
329 | 338 | ||
330 | /* | 339 | /* |
331 | * Trust is futile. We should really have timeouts ... | 340 | * Trust is futile. We should really have timeouts ... |
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c index 6f3709996172..fe256559c997 100644 --- a/arch/mips/kernel/smtc-proc.c +++ b/arch/mips/kernel/smtc-proc.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | #include <asm/hardirq.h> | 15 | #include <asm/hardirq.h> |
16 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
17 | #include <asm/smp.h> | ||
18 | #include <asm/mipsregs.h> | 17 | #include <asm/mipsregs.h> |
19 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
20 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 9c92d42996cb..85f700e58131 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <asm/hazards.h> | 16 | #include <asm/hazards.h> |
17 | #include <asm/irq.h> | 17 | #include <asm/irq.h> |
18 | #include <asm/mmu_context.h> | 18 | #include <asm/mmu_context.h> |
19 | #include <asm/smp.h> | ||
20 | #include <asm/mipsregs.h> | 19 | #include <asm/mipsregs.h> |
21 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
22 | #include <asm/time.h> | 21 | #include <asm/time.h> |