aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc/Kconfig9
-rw-r--r--arch/ppc/kernel/head.S28
-rw-r--r--arch/ppc/kernel/idle.c6
-rw-r--r--arch/ppc/kernel/smp.c44
-rw-r--r--arch/ppc/platforms/pmac_sleep.S2
-rw-r--r--arch/ppc/platforms/pmac_smp.c85
6 files changed, 113 insertions, 61 deletions
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index e3f1ce33e642..347ea284140b 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -265,6 +265,15 @@ config PPC601_SYNC_FIX
265 265
266 If in doubt, say Y here. 266 If in doubt, say Y here.
267 267
268config HOTPLUG_CPU
269 bool "Support for enabling/disabling CPUs"
270 depends on SMP && HOTPLUG && EXPERIMENTAL && PPC_PMAC
271 ---help---
272 Say Y here to be able to disable and re-enable individual
273 CPUs at runtime on SMP machines.
274
275 Say N if you are unsure.
276
268source arch/ppc/platforms/4xx/Kconfig 277source arch/ppc/platforms/4xx/Kconfig
269source arch/ppc/platforms/85xx/Kconfig 278source arch/ppc/platforms/85xx/Kconfig
270 279
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 55daf1210f32..1960fb8c259c 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -1023,23 +1023,21 @@ __secondary_start_gemini:
1023 andc r4,r4,r3 1023 andc r4,r4,r3
1024 mtspr SPRN_HID0,r4 1024 mtspr SPRN_HID0,r4
1025 sync 1025 sync
1026 bl gemini_prom_init
1027 b __secondary_start 1026 b __secondary_start
1028#endif /* CONFIG_GEMINI */ 1027#endif /* CONFIG_GEMINI */
1029 .globl __secondary_start_psurge 1028
1030__secondary_start_psurge: 1029 .globl __secondary_start_pmac_0
1031 li r24,1 /* cpu # */ 1030__secondary_start_pmac_0:
1032 b __secondary_start_psurge99 1031 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1033 .globl __secondary_start_psurge2 1032 li r24,0
1034__secondary_start_psurge2: 1033 b 1f
1035 li r24,2 /* cpu # */ 1034 li r24,1
1036 b __secondary_start_psurge99 1035 b 1f
1037 .globl __secondary_start_psurge3 1036 li r24,2
1038__secondary_start_psurge3: 1037 b 1f
1039 li r24,3 /* cpu # */ 1038 li r24,3
1040 b __secondary_start_psurge99 10391:
1041__secondary_start_psurge99: 1040 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
1042 /* we come in here with IR=0 and DR=1, and DBAT 0
1043 set to map the 0xf0000000 - 0xffffffff region */ 1041 set to map the 0xf0000000 - 0xffffffff region */
1044 mfmsr r0 1042 mfmsr r0
1045 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ 1043 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index 53547b6de45b..fba29c876b62 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -22,6 +22,7 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/sysctl.h> 24#include <linux/sysctl.h>
25#include <linux/cpu.h>
25 26
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
@@ -35,6 +36,7 @@
35void default_idle(void) 36void default_idle(void)
36{ 37{
37 void (*powersave)(void); 38 void (*powersave)(void);
39 int cpu = smp_processor_id();
38 40
39 powersave = ppc_md.power_save; 41 powersave = ppc_md.power_save;
40 42
@@ -44,7 +46,7 @@ void default_idle(void)
44#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
45 else { 47 else {
46 set_thread_flag(TIF_POLLING_NRFLAG); 48 set_thread_flag(TIF_POLLING_NRFLAG);
47 while (!need_resched()) 49 while (!need_resched() && !cpu_is_offline(cpu))
48 barrier(); 50 barrier();
49 clear_thread_flag(TIF_POLLING_NRFLAG); 51 clear_thread_flag(TIF_POLLING_NRFLAG);
50 } 52 }
@@ -52,6 +54,8 @@ void default_idle(void)
52 } 54 }
53 if (need_resched()) 55 if (need_resched())
54 schedule(); 56 schedule();
57 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
58 cpu_die();
55} 59}
56 60
57/* 61/*
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index e70b587b9e51..726fe7ce1747 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -45,6 +45,7 @@ cpumask_t cpu_online_map;
45cpumask_t cpu_possible_map; 45cpumask_t cpu_possible_map;
46int smp_hw_index[NR_CPUS]; 46int smp_hw_index[NR_CPUS];
47struct thread_info *secondary_ti; 47struct thread_info *secondary_ti;
48static struct task_struct *idle_tasks[NR_CPUS];
48 49
49EXPORT_SYMBOL(cpu_online_map); 50EXPORT_SYMBOL(cpu_online_map);
50EXPORT_SYMBOL(cpu_possible_map); 51EXPORT_SYMBOL(cpu_possible_map);
@@ -286,7 +287,8 @@ static void __devinit smp_store_cpu_info(int id)
286 287
287void __init smp_prepare_cpus(unsigned int max_cpus) 288void __init smp_prepare_cpus(unsigned int max_cpus)
288{ 289{
289 int num_cpus, i; 290 int num_cpus, i, cpu;
291 struct task_struct *p;
290 292
291 /* Fixup boot cpu */ 293 /* Fixup boot cpu */
292 smp_store_cpu_info(smp_processor_id()); 294 smp_store_cpu_info(smp_processor_id());
@@ -308,6 +310,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
308 310
309 if (smp_ops->space_timers) 311 if (smp_ops->space_timers)
310 smp_ops->space_timers(num_cpus); 312 smp_ops->space_timers(num_cpus);
313
314 for_each_cpu(cpu) {
315 if (cpu == smp_processor_id())
316 continue;
317 /* create a process for the processor */
318 p = fork_idle(cpu);
319 if (IS_ERR(p))
320 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
321 p->thread_info->cpu = cpu;
322 idle_tasks[cpu] = p;
323 }
311} 324}
312 325
313void __devinit smp_prepare_boot_cpu(void) 326void __devinit smp_prepare_boot_cpu(void)
@@ -334,12 +347,17 @@ int __devinit start_secondary(void *unused)
334 set_dec(tb_ticks_per_jiffy); 347 set_dec(tb_ticks_per_jiffy);
335 cpu_callin_map[cpu] = 1; 348 cpu_callin_map[cpu] = 1;
336 349
337 printk("CPU %i done callin...\n", cpu); 350 printk("CPU %d done callin...\n", cpu);
338 smp_ops->setup_cpu(cpu); 351 smp_ops->setup_cpu(cpu);
339 printk("CPU %i done setup...\n", cpu); 352 printk("CPU %d done setup...\n", cpu);
340 local_irq_enable();
341 smp_ops->take_timebase(); 353 smp_ops->take_timebase();
342 printk("CPU %i done timebase take...\n", cpu); 354 printk("CPU %d done timebase take...\n", cpu);
355
356 spin_lock(&call_lock);
357 cpu_set(cpu, cpu_online_map);
358 spin_unlock(&call_lock);
359
360 local_irq_enable();
343 361
344 cpu_idle(); 362 cpu_idle();
345 return 0; 363 return 0;
@@ -347,17 +365,11 @@ int __devinit start_secondary(void *unused)
347 365
348int __cpu_up(unsigned int cpu) 366int __cpu_up(unsigned int cpu)
349{ 367{
350 struct task_struct *p;
351 char buf[32]; 368 char buf[32];
352 int c; 369 int c;
353 370
354 /* create a process for the processor */ 371 secondary_ti = idle_tasks[cpu]->thread_info;
355 /* only regs.msr is actually used, and 0 is OK for it */ 372 mb();
356 p = fork_idle(cpu);
357 if (IS_ERR(p))
358 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
359 secondary_ti = p->thread_info;
360 p->thread_info->cpu = cpu;
361 373
362 /* 374 /*
363 * There was a cache flush loop here to flush the cache 375 * There was a cache flush loop here to flush the cache
@@ -389,7 +401,11 @@ int __cpu_up(unsigned int cpu)
389 printk("Processor %d found.\n", cpu); 401 printk("Processor %d found.\n", cpu);
390 402
391 smp_ops->give_timebase(); 403 smp_ops->give_timebase();
392 cpu_set(cpu, cpu_online_map); 404
405 /* Wait until cpu puts itself in the online map */
406 while (!cpu_online(cpu))
407 cpu_relax();
408
393 return 0; 409 return 0;
394} 410}
395 411
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S
index 8d67adc76925..88419c77ac43 100644
--- a/arch/ppc/platforms/pmac_sleep.S
+++ b/arch/ppc/platforms/pmac_sleep.S
@@ -161,6 +161,8 @@ _GLOBAL(low_sleep_handler)
161 addi r3,r3,sleep_storage@l 161 addi r3,r3,sleep_storage@l
162 stw r5,0(r3) 162 stw r5,0(r3)
163 163
164 .globl low_cpu_die
165low_cpu_die:
164 /* Flush & disable all caches */ 166 /* Flush & disable all caches */
165 bl flush_disable_caches 167 bl flush_disable_caches
166 168
diff --git a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c
index 8e049dab4e63..794a23994b82 100644
--- a/arch/ppc/platforms/pmac_smp.c
+++ b/arch/ppc/platforms/pmac_smp.c
@@ -33,6 +33,7 @@
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/hardirq.h> 35#include <linux/hardirq.h>
36#include <linux/cpu.h>
36 37
37#include <asm/ptrace.h> 38#include <asm/ptrace.h>
38#include <asm/atomic.h> 39#include <asm/atomic.h>
@@ -55,9 +56,7 @@
55 * Powersurge (old powermac SMP) support. 56 * Powersurge (old powermac SMP) support.
56 */ 57 */
57 58
58extern void __secondary_start_psurge(void); 59extern void __secondary_start_pmac_0(void);
59extern void __secondary_start_psurge2(void); /* Temporary horrible hack */
60extern void __secondary_start_psurge3(void); /* Temporary horrible hack */
61 60
62/* Addresses for powersurge registers */ 61/* Addresses for powersurge registers */
63#define HAMMERHEAD_BASE 0xf8000000 62#define HAMMERHEAD_BASE 0xf8000000
@@ -119,7 +118,7 @@ static volatile int sec_tb_reset = 0;
119static unsigned int pri_tb_hi, pri_tb_lo; 118static unsigned int pri_tb_hi, pri_tb_lo;
120static unsigned int pri_tb_stamp; 119static unsigned int pri_tb_stamp;
121 120
122static void __init core99_init_caches(int cpu) 121static void __devinit core99_init_caches(int cpu)
123{ 122{
124 if (!cpu_has_feature(CPU_FTR_L2CR)) 123 if (!cpu_has_feature(CPU_FTR_L2CR))
125 return; 124 return;
@@ -346,7 +345,7 @@ static int __init smp_psurge_probe(void)
346 345
347static void __init smp_psurge_kick_cpu(int nr) 346static void __init smp_psurge_kick_cpu(int nr)
348{ 347{
349 void (*start)(void) = __secondary_start_psurge; 348 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
350 unsigned long a; 349 unsigned long a;
351 350
352 /* may need to flush here if secondary bats aren't setup */ 351 /* may need to flush here if secondary bats aren't setup */
@@ -356,17 +355,7 @@ static void __init smp_psurge_kick_cpu(int nr)
356 355
357 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); 356 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
358 357
359 /* setup entry point of secondary processor */ 358 out_be32(psurge_start, start);
360 switch (nr) {
361 case 2:
362 start = __secondary_start_psurge2;
363 break;
364 case 3:
365 start = __secondary_start_psurge3;
366 break;
367 }
368
369 out_be32(psurge_start, __pa(start));
370 mb(); 359 mb();
371 360
372 psurge_set_ipi(nr); 361 psurge_set_ipi(nr);
@@ -500,14 +489,14 @@ static int __init smp_core99_probe(void)
500 return ncpus; 489 return ncpus;
501} 490}
502 491
503static void __init smp_core99_kick_cpu(int nr) 492static void __devinit smp_core99_kick_cpu(int nr)
504{ 493{
505 unsigned long save_vector, new_vector; 494 unsigned long save_vector, new_vector;
506 unsigned long flags; 495 unsigned long flags;
507 496
508 volatile unsigned long *vector 497 volatile unsigned long *vector
509 = ((volatile unsigned long *)(KERNELBASE+0x100)); 498 = ((volatile unsigned long *)(KERNELBASE+0x100));
510 if (nr < 1 || nr > 3) 499 if (nr < 0 || nr > 3)
511 return; 500 return;
512 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); 501 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
513 502
@@ -518,19 +507,9 @@ static void __init smp_core99_kick_cpu(int nr)
518 save_vector = *vector; 507 save_vector = *vector;
519 508
520 /* Setup fake reset vector that does 509 /* Setup fake reset vector that does
521 * b __secondary_start_psurge - KERNELBASE 510 * b __secondary_start_pmac_0 + nr*8 - KERNELBASE
522 */ 511 */
523 switch(nr) { 512 new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
524 case 1:
525 new_vector = (unsigned long)__secondary_start_psurge;
526 break;
527 case 2:
528 new_vector = (unsigned long)__secondary_start_psurge2;
529 break;
530 case 3:
531 new_vector = (unsigned long)__secondary_start_psurge3;
532 break;
533 }
534 *vector = 0x48000002 + new_vector - KERNELBASE; 513 *vector = 0x48000002 + new_vector - KERNELBASE;
535 514
536 /* flush data cache and inval instruction cache */ 515 /* flush data cache and inval instruction cache */
@@ -554,7 +533,7 @@ static void __init smp_core99_kick_cpu(int nr)
554 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); 533 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
555} 534}
556 535
557static void __init smp_core99_setup_cpu(int cpu_nr) 536static void __devinit smp_core99_setup_cpu(int cpu_nr)
558{ 537{
559 /* Setup L2/L3 */ 538 /* Setup L2/L3 */
560 if (cpu_nr != 0) 539 if (cpu_nr != 0)
@@ -668,3 +647,47 @@ struct smp_ops_t core99_smp_ops __pmacdata = {
668 .give_timebase = smp_core99_give_timebase, 647 .give_timebase = smp_core99_give_timebase,
669 .take_timebase = smp_core99_take_timebase, 648 .take_timebase = smp_core99_take_timebase,
670}; 649};
650
651#ifdef CONFIG_HOTPLUG_CPU
652
653int __cpu_disable(void)
654{
655 cpu_clear(smp_processor_id(), cpu_online_map);
656
657 /* XXX reset cpu affinity here */
658 openpic_set_priority(0xf);
659 asm volatile("mtdec %0" : : "r" (0x7fffffff));
660 mb();
661 udelay(20);
662 asm volatile("mtdec %0" : : "r" (0x7fffffff));
663 return 0;
664}
665
666extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */
667static int cpu_dead[NR_CPUS];
668
669void cpu_die(void)
670{
671 local_irq_disable();
672 cpu_dead[smp_processor_id()] = 1;
673 mb();
674 low_cpu_die();
675}
676
677void __cpu_die(unsigned int cpu)
678{
679 int timeout;
680
681 timeout = 1000;
682 while (!cpu_dead[cpu]) {
683 if (--timeout == 0) {
684 printk("CPU %u refused to die!\n", cpu);
685 break;
686 }
687 msleep(1);
688 }
689 cpu_callin_map[cpu] = 0;
690 cpu_dead[cpu] = 0;
691}
692
693#endif