aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/machdep.h3
-rw-r--r--arch/powerpc/include/asm/smp.h5
-rw-r--r--arch/powerpc/kernel/head_32.S9
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/idle_power4.S21
-rw-r--r--arch/powerpc/kernel/irq.c3
-rw-r--r--arch/powerpc/kernel/smp.c153
-rw-r--r--arch/powerpc/kernel/time.c15
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h1
-rw-r--r--arch/powerpc/platforms/powermac/setup.c56
-rw-r--r--arch/powerpc/platforms/powermac/smp.c154
-rw-r--r--arch/powerpc/platforms/pseries/offline_states.h2
12 files changed, 238 insertions, 191 deletions
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index fe56a23e1ff0..e4f01915fbb0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -35,9 +35,9 @@ struct smp_ops_t {
35 int (*probe)(void); 35 int (*probe)(void);
36 void (*kick_cpu)(int nr); 36 void (*kick_cpu)(int nr);
37 void (*setup_cpu)(int nr); 37 void (*setup_cpu)(int nr);
38 void (*bringup_done)(void);
38 void (*take_timebase)(void); 39 void (*take_timebase)(void);
39 void (*give_timebase)(void); 40 void (*give_timebase)(void);
40 int (*cpu_enable)(unsigned int nr);
41 int (*cpu_disable)(void); 41 int (*cpu_disable)(void);
42 void (*cpu_die)(unsigned int nr); 42 void (*cpu_die)(unsigned int nr);
43 int (*cpu_bootable)(unsigned int nr); 43 int (*cpu_bootable)(unsigned int nr);
@@ -267,7 +267,6 @@ struct machdep_calls {
267 267
268extern void e500_idle(void); 268extern void e500_idle(void);
269extern void power4_idle(void); 269extern void power4_idle(void);
270extern void power4_cpu_offline_powersave(void);
271extern void ppc6xx_idle(void); 270extern void ppc6xx_idle(void);
272extern void book3e_idle(void); 271extern void book3e_idle(void);
273 272
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 66e237bbe15f..a902a0d3ae0d 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -36,15 +36,16 @@ extern void cpu_die(void);
36 36
37extern void smp_send_debugger_break(int cpu); 37extern void smp_send_debugger_break(int cpu);
38extern void smp_message_recv(int); 38extern void smp_message_recv(int);
39extern void start_secondary_resume(void);
39 40
40DECLARE_PER_CPU(unsigned int, cpu_pvr); 41DECLARE_PER_CPU(unsigned int, cpu_pvr);
41 42
42#ifdef CONFIG_HOTPLUG_CPU 43#ifdef CONFIG_HOTPLUG_CPU
43extern void fixup_irqs(const struct cpumask *map); 44extern void migrate_irqs(void);
44int generic_cpu_disable(void); 45int generic_cpu_disable(void);
45int generic_cpu_enable(unsigned int cpu);
46void generic_cpu_die(unsigned int cpu); 46void generic_cpu_die(unsigned int cpu);
47void generic_mach_cpu_die(void); 47void generic_mach_cpu_die(void);
48void generic_set_cpu_dead(unsigned int cpu);
48#endif 49#endif
49 50
50#ifdef CONFIG_PPC64 51#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 98c4b29a56f4..c5c24beb8387 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -890,6 +890,15 @@ __secondary_start:
890 mtspr SPRN_SRR1,r4 890 mtspr SPRN_SRR1,r4
891 SYNC 891 SYNC
892 RFI 892 RFI
893
894_GLOBAL(start_secondary_resume)
895 /* Reset stack */
896 rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
897 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
898 li r3,0
899 std r3,0(r1) /* Zero the stack frame pointer */
900 bl start_secondary
901 b .
893#endif /* CONFIG_SMP */ 902#endif /* CONFIG_SMP */
894 903
895#ifdef CONFIG_KVM_BOOK3S_HANDLER 904#ifdef CONFIG_KVM_BOOK3S_HANDLER
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 782f23df7c85..271140b38b6f 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start)
536 add r13,r13,r4 /* for this processor. */ 536 add r13,r13,r4 /* for this processor. */
537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ 537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
538 538
539 /* Mark interrupts soft and hard disabled (they might be enabled
540 * in the PACA when doing hotplug)
541 */
542 li r0,0
543 stb r0,PACASOFTIRQEN(r13)
544 stb r0,PACAHARDIRQEN(r13)
545
539 /* Create a temp kernel stack for use before relocation is on. */ 546 /* Create a temp kernel stack for use before relocation is on. */
540 ld r1,PACAEMERGSP(r13) 547 ld r1,PACAEMERGSP(r13)
541 subi r1,r1,STACK_FRAME_OVERHEAD 548 subi r1,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 5328709eeedc..ba3195478600 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
53 isync 53 isync
54 b 1b 54 b 1b
55 55
56_GLOBAL(power4_cpu_offline_powersave)
57 /* Go to NAP now */
58 mfmsr r7
59 rldicl r0,r7,48,1
60 rotldi r0,r0,16
61 mtmsrd r0,1 /* hard-disable interrupts */
62 li r0,1
63 li r6,0
64 stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */
65 stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */
66BEGIN_FTR_SECTION
67 DSSALL
68 sync
69END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
70 ori r7,r7,MSR_EE
71 oris r7,r7,MSR_POW@h
72 sync
73 isync
74 mtmsrd r7
75 isync
76 blr
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 63625e0650b5..f621b7d2d869 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -246,12 +246,13 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
246} 246}
247 247
248#ifdef CONFIG_HOTPLUG_CPU 248#ifdef CONFIG_HOTPLUG_CPU
249void fixup_irqs(const struct cpumask *map) 249void migrate_irqs(void)
250{ 250{
251 struct irq_desc *desc; 251 struct irq_desc *desc;
252 unsigned int irq; 252 unsigned int irq;
253 static int warned; 253 static int warned;
254 cpumask_var_t mask; 254 cpumask_var_t mask;
255 const struct cpumask *map = cpu_online_mask;
255 256
256 alloc_cpumask_var(&mask, GFP_KERNEL); 257 alloc_cpumask_var(&mask, GFP_KERNEL);
257 258
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 981360509172..cbdbb14be4b0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,6 +57,25 @@
57#define DBG(fmt...) 57#define DBG(fmt...)
58#endif 58#endif
59 59
60
61/* Store all idle threads, this can be reused instead of creating
62* a new thread. Also avoids complicated thread destroy functionality
63* for idle threads.
64*/
65#ifdef CONFIG_HOTPLUG_CPU
66/*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73#else
74static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75#define get_idle_for_cpu(x) (idle_thread_array[(x)])
76#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
77#endif
78
60struct thread_info *secondary_ti; 79struct thread_info *secondary_ti;
61 80
62DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 81DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
@@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id)
238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 257 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239} 258}
240 259
241static void __init smp_create_idle(unsigned int cpu)
242{
243 struct task_struct *p;
244
245 /* create a process for the processor */
246 p = fork_idle(cpu);
247 if (IS_ERR(p))
248 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
249#ifdef CONFIG_PPC64
250 paca[cpu].__current = p;
251 paca[cpu].kstack = (unsigned long) task_thread_info(p)
252 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
253#endif
254 current_set[cpu] = task_thread_info(p);
255 task_thread_info(p)->cpu = cpu;
256}
257
258void __init smp_prepare_cpus(unsigned int max_cpus) 260void __init smp_prepare_cpus(unsigned int max_cpus)
259{ 261{
260 unsigned int cpu; 262 unsigned int cpu;
@@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
288 max_cpus = NR_CPUS; 290 max_cpus = NR_CPUS;
289 else 291 else
290 max_cpus = 1; 292 max_cpus = 1;
291
292 for_each_possible_cpu(cpu)
293 if (cpu != boot_cpuid)
294 smp_create_idle(cpu);
295} 293}
296 294
297void __devinit smp_prepare_boot_cpu(void) 295void __devinit smp_prepare_boot_cpu(void)
@@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void)
305 303
306#ifdef CONFIG_HOTPLUG_CPU 304#ifdef CONFIG_HOTPLUG_CPU
307/* State of each CPU during hotplug phases */ 305/* State of each CPU during hotplug phases */
308DEFINE_PER_CPU(int, cpu_state) = { 0 }; 306static DEFINE_PER_CPU(int, cpu_state) = { 0 };
309 307
310int generic_cpu_disable(void) 308int generic_cpu_disable(void)
311{ 309{
@@ -317,30 +315,8 @@ int generic_cpu_disable(void)
317 set_cpu_online(cpu, false); 315 set_cpu_online(cpu, false);
318#ifdef CONFIG_PPC64 316#ifdef CONFIG_PPC64
319 vdso_data->processorCount--; 317 vdso_data->processorCount--;
320 fixup_irqs(cpu_online_mask);
321#endif
322 return 0;
323}
324
325int generic_cpu_enable(unsigned int cpu)
326{
327 /* Do the normal bootup if we haven't
328 * already bootstrapped. */
329 if (system_state != SYSTEM_RUNNING)
330 return -ENOSYS;
331
332 /* get the target out of it's holding state */
333 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
334 smp_wmb();
335
336 while (!cpu_online(cpu))
337 cpu_relax();
338
339#ifdef CONFIG_PPC64
340 fixup_irqs(cpu_online_mask);
341 /* counter the irq disable in fixup_irqs */
342 local_irq_enable();
343#endif 318#endif
319 migrate_irqs();
344 return 0; 320 return 0;
345} 321}
346 322
@@ -362,37 +338,89 @@ void generic_mach_cpu_die(void)
362 unsigned int cpu; 338 unsigned int cpu;
363 339
364 local_irq_disable(); 340 local_irq_disable();
341 idle_task_exit();
365 cpu = smp_processor_id(); 342 cpu = smp_processor_id();
366 printk(KERN_DEBUG "CPU%d offline\n", cpu); 343 printk(KERN_DEBUG "CPU%d offline\n", cpu);
367 __get_cpu_var(cpu_state) = CPU_DEAD; 344 __get_cpu_var(cpu_state) = CPU_DEAD;
368 smp_wmb(); 345 smp_wmb();
369 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 346 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
370 cpu_relax(); 347 cpu_relax();
371 set_cpu_online(cpu, true); 348}
372 local_irq_enable(); 349
350void generic_set_cpu_dead(unsigned int cpu)
351{
352 per_cpu(cpu_state, cpu) = CPU_DEAD;
373} 353}
374#endif 354#endif
375 355
376static int __devinit cpu_enable(unsigned int cpu) 356struct create_idle {
357 struct work_struct work;
358 struct task_struct *idle;
359 struct completion done;
360 int cpu;
361};
362
363static void __cpuinit do_fork_idle(struct work_struct *work)
377{ 364{
378 if (smp_ops && smp_ops->cpu_enable) 365 struct create_idle *c_idle =
379 return smp_ops->cpu_enable(cpu); 366 container_of(work, struct create_idle, work);
367
368 c_idle->idle = fork_idle(c_idle->cpu);
369 complete(&c_idle->done);
370}
371
372static int __cpuinit create_idle(unsigned int cpu)
373{
374 struct thread_info *ti;
375 struct create_idle c_idle = {
376 .cpu = cpu,
377 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
378 };
379 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
380
381 c_idle.idle = get_idle_for_cpu(cpu);
382
383 /* We can't use kernel_thread since we must avoid to
384 * reschedule the child. We use a workqueue because
385 * we want to fork from a kernel thread, not whatever
386 * userspace process happens to be trying to online us.
387 */
388 if (!c_idle.idle) {
389 schedule_work(&c_idle.work);
390 wait_for_completion(&c_idle.done);
391 } else
392 init_idle(c_idle.idle, cpu);
393 if (IS_ERR(c_idle.idle)) {
394 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
395 return PTR_ERR(c_idle.idle);
396 }
397 ti = task_thread_info(c_idle.idle);
398
399#ifdef CONFIG_PPC64
400 paca[cpu].__current = c_idle.idle;
401 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
402#endif
403 ti->cpu = cpu;
404 current_set[cpu] = ti;
380 405
381 return -ENOSYS; 406 return 0;
382} 407}
383 408
384int __cpuinit __cpu_up(unsigned int cpu) 409int __cpuinit __cpu_up(unsigned int cpu)
385{ 410{
386 int c; 411 int rc, c;
387 412
388 secondary_ti = current_set[cpu]; 413 secondary_ti = current_set[cpu];
389 if (!cpu_enable(cpu))
390 return 0;
391 414
392 if (smp_ops == NULL || 415 if (smp_ops == NULL ||
393 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
394 return -EINVAL; 417 return -EINVAL;
395 418
419 /* Make sure we have an idle thread */
420 rc = create_idle(cpu);
421 if (rc)
422 return rc;
423
396 /* Make sure callin-map entry is 0 (can be leftover a CPU 424 /* Make sure callin-map entry is 0 (can be leftover a CPU
397 * hotplug 425 * hotplug
398 */ 426 */
@@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
502} 530}
503 531
504/* Activate a secondary processor. */ 532/* Activate a secondary processor. */
505int __devinit start_secondary(void *unused) 533void __devinit start_secondary(void *unused)
506{ 534{
507 unsigned int cpu = smp_processor_id(); 535 unsigned int cpu = smp_processor_id();
508 struct device_node *l2_cache; 536 struct device_node *l2_cache;
@@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused)
523 551
524 secondary_cpu_time_init(); 552 secondary_cpu_time_init();
525 553
554#ifdef CONFIG_PPC64
555 if (system_state == SYSTEM_RUNNING)
556 vdso_data->processorCount++;
557#endif
526 ipi_call_lock(); 558 ipi_call_lock();
527 notify_cpu_starting(cpu); 559 notify_cpu_starting(cpu);
528 set_cpu_online(cpu, true); 560 set_cpu_online(cpu, true);
@@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused)
558 local_irq_enable(); 590 local_irq_enable();
559 591
560 cpu_idle(); 592 cpu_idle();
561 return 0; 593
594 BUG();
562} 595}
563 596
564int setup_profiling_timer(unsigned int multiplier) 597int setup_profiling_timer(unsigned int multiplier)
@@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
585 618
586 free_cpumask_var(old_mask); 619 free_cpumask_var(old_mask);
587 620
621 if (smp_ops && smp_ops->bringup_done)
622 smp_ops->bringup_done();
623
588 dump_numa_cpu_topology(); 624 dump_numa_cpu_topology();
625
589} 626}
590 627
591int arch_sd_sibling_asym_packing(void) 628int arch_sd_sibling_asym_packing(void)
@@ -660,5 +697,9 @@ void cpu_die(void)
660{ 697{
661 if (ppc_md.cpu_die) 698 if (ppc_md.cpu_die)
662 ppc_md.cpu_die(); 699 ppc_md.cpu_die();
700
701 /* If we return, we re-enter start_secondary */
702 start_secondary_resume();
663} 703}
704
664#endif 705#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index aa9269600ca2..375480c56eb9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -577,14 +577,21 @@ void timer_interrupt(struct pt_regs * regs)
577 struct clock_event_device *evt = &decrementer->event; 577 struct clock_event_device *evt = &decrementer->event;
578 u64 now; 578 u64 now;
579 579
580 /* Ensure a positive value is written to the decrementer, or else
581 * some CPUs will continue to take decrementer exceptions.
582 */
583 set_dec(DECREMENTER_MAX);
584
585 /* Some implementations of hotplug will get timer interrupts while
586 * offline, just ignore these
587 */
588 if (!cpu_online(smp_processor_id()))
589 return;
590
580 trace_timer_interrupt_entry(regs); 591 trace_timer_interrupt_entry(regs);
581 592
582 __get_cpu_var(irq_stat).timer_irqs++; 593 __get_cpu_var(irq_stat).timer_irqs++;
583 594
584 /* Ensure a positive value is written to the decrementer, or else
585 * some CPUs will continuue to take decrementer exceptions */
586 set_dec(DECREMENTER_MAX);
587
588#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 595#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
589 if (atomic_read(&ppc_n_lost_interrupts) != 0) 596 if (atomic_read(&ppc_n_lost_interrupts) != 0)
590 do_IRQ(regs); 597 do_IRQ(regs);
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index f0bc08f6c1f0..20468f49aec0 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -33,7 +33,6 @@ extern void pmac_setup_pci_dma(void);
33extern void pmac_check_ht_link(void); 33extern void pmac_check_ht_link(void);
34 34
35extern void pmac_setup_smp(void); 35extern void pmac_setup_smp(void);
36extern void pmac32_cpu_die(void);
37extern void low_cpu_die(void) __attribute__((noreturn)); 36extern void low_cpu_die(void) __attribute__((noreturn));
38 37
39extern int pmac_nvram_init(void); 38extern int pmac_nvram_init(void);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index d5aceb7fb125..aa45281bd296 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -650,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus)
650 return PCI_PROBE_NORMAL; 650 return PCI_PROBE_NORMAL;
651 return PCI_PROBE_DEVTREE; 651 return PCI_PROBE_DEVTREE;
652} 652}
653
654#ifdef CONFIG_HOTPLUG_CPU
655/* access per cpu vars from generic smp.c */
656DECLARE_PER_CPU(int, cpu_state);
657
658static void pmac64_cpu_die(void)
659{
660 /*
661 * turn off as much as possible, we'll be
662 * kicked out as this will only be invoked
663 * on core99 platforms for now ...
664 */
665
666 printk(KERN_INFO "CPU#%d offline\n", smp_processor_id());
667 __get_cpu_var(cpu_state) = CPU_DEAD;
668 smp_wmb();
669
670 /*
671 * during the path that leads here preemption is disabled,
672 * reenable it now so that when coming up preempt count is
673 * zero correctly
674 */
675 preempt_enable();
676
677 /*
678 * hard-disable interrupts for the non-NAP case, the NAP code
679 * needs to re-enable interrupts (but soft-disables them)
680 */
681 hard_irq_disable();
682
683 while (1) {
684 /* let's not take timer interrupts too often ... */
685 set_dec(0x7fffffff);
686
687 /* should always be true at this point */
688 if (cpu_has_feature(CPU_FTR_CAN_NAP))
689 power4_cpu_offline_powersave();
690 else {
691 HMT_low();
692 HMT_very_low();
693 }
694 }
695}
696#endif /* CONFIG_HOTPLUG_CPU */
697
698#endif /* CONFIG_PPC64 */ 653#endif /* CONFIG_PPC64 */
699 654
700define_machine(powermac) { 655define_machine(powermac) {
@@ -726,15 +681,4 @@ define_machine(powermac) {
726 .pcibios_after_init = pmac_pcibios_after_init, 681 .pcibios_after_init = pmac_pcibios_after_init,
727 .phys_mem_access_prot = pci_phys_mem_access_prot, 682 .phys_mem_access_prot = pci_phys_mem_access_prot,
728#endif 683#endif
729#ifdef CONFIG_HOTPLUG_CPU
730#ifdef CONFIG_PPC64
731 .cpu_die = pmac64_cpu_die,
732#endif
733#ifdef CONFIG_PPC32
734 .cpu_die = pmac32_cpu_die,
735#endif
736#endif
737#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
738 .cpu_die = generic_mach_cpu_die,
739#endif
740}; 684};
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index c95215f4f8b6..a830c5e80657 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -840,92 +840,149 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
840 840
841 /* Setup openpic */ 841 /* Setup openpic */
842 mpic_setup_this_cpu(); 842 mpic_setup_this_cpu();
843}
843 844
844 if (cpu_nr == 0) { 845#ifdef CONFIG_HOTPLUG_CPU
845#ifdef CONFIG_PPC64 846static int smp_core99_cpu_notify(struct notifier_block *self,
846 extern void g5_phy_disable_cpu1(void); 847 unsigned long action, void *hcpu)
848{
849 int rc;
847 850
848 /* Close i2c bus if it was used for tb sync */ 851 switch(action) {
852 case CPU_UP_PREPARE:
853 case CPU_UP_PREPARE_FROZEN:
854 /* Open i2c bus if it was used for tb sync */
849 if (pmac_tb_clock_chip_host) { 855 if (pmac_tb_clock_chip_host) {
850 pmac_i2c_close(pmac_tb_clock_chip_host); 856 rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
851 pmac_tb_clock_chip_host = NULL; 857 if (rc) {
858 pr_err("Failed to open i2c bus for time sync\n");
859 return notifier_from_errno(rc);
860 }
852 } 861 }
862 break;
863 case CPU_ONLINE:
864 case CPU_UP_CANCELED:
865 /* Close i2c bus if it was used for tb sync */
866 if (pmac_tb_clock_chip_host)
867 pmac_i2c_close(pmac_tb_clock_chip_host);
868 break;
869 default:
870 break;
871 }
872 return NOTIFY_OK;
873}
853 874
854 /* If we didn't start the second CPU, we must take 875static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
855 * it off the bus 876 .notifier_call = smp_core99_cpu_notify,
856 */ 877};
857 if (of_machine_is_compatible("MacRISC4") && 878#endif /* CONFIG_HOTPLUG_CPU */
858 num_online_cpus() < 2) 879
859 g5_phy_disable_cpu1(); 880static void __init smp_core99_bringup_done(void)
860#endif /* CONFIG_PPC64 */ 881{
882#ifdef CONFIG_PPC64
883 extern void g5_phy_disable_cpu1(void);
884
885 /* Close i2c bus if it was used for tb sync */
886 if (pmac_tb_clock_chip_host)
887 pmac_i2c_close(pmac_tb_clock_chip_host);
861 888
862 if (ppc_md.progress) 889 /* If we didn't start the second CPU, we must take
863 ppc_md.progress("core99_setup_cpu 0 done", 0x349); 890 * it off the bus.
891 */
892 if (of_machine_is_compatible("MacRISC4") &&
893 num_online_cpus() < 2) {
894 set_cpu_present(1, false);
895 g5_phy_disable_cpu1();
864 } 896 }
865} 897#endif /* CONFIG_PPC64 */
866 898
899#ifdef CONFIG_HOTPLUG_CPU
900 register_cpu_notifier(&smp_core99_cpu_nb);
901#endif
902 if (ppc_md.progress)
903 ppc_md.progress("smp_core99_bringup_done", 0x349);
904}
867 905
868#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) 906#ifdef CONFIG_HOTPLUG_CPU
869 907
870int smp_core99_cpu_disable(void) 908static int smp_core99_cpu_disable(void)
871{ 909{
872 set_cpu_online(smp_processor_id(), false); 910 int rc = generic_cpu_disable();
911 if (rc)
912 return rc;
873 913
874 /* XXX reset cpu affinity here */
875 mpic_cpu_set_priority(0xf); 914 mpic_cpu_set_priority(0xf);
876 asm volatile("mtdec %0" : : "r" (0x7fffffff)); 915
877 mb();
878 udelay(20);
879 asm volatile("mtdec %0" : : "r" (0x7fffffff));
880 return 0; 916 return 0;
881} 917}
882 918
883static int cpu_dead[NR_CPUS]; 919#ifdef CONFIG_PPC32
884 920
885void pmac32_cpu_die(void) 921static void pmac_cpu_die(void)
886{ 922{
923 int cpu = smp_processor_id();
924
887 local_irq_disable(); 925 local_irq_disable();
888 cpu_dead[smp_processor_id()] = 1; 926 idle_task_exit();
927 pr_debug("CPU%d offline\n", cpu);
928 generic_set_cpu_dead(cpu);
929 smp_wmb();
889 mb(); 930 mb();
890 low_cpu_die(); 931 low_cpu_die();
891} 932}
892 933
893void smp_core99_cpu_die(unsigned int cpu) 934#else /* CONFIG_PPC32 */
935
936static void pmac_cpu_die(void)
894{ 937{
895 int timeout; 938 int cpu = smp_processor_id();
896 939
897 timeout = 1000; 940 local_irq_disable();
898 while (!cpu_dead[cpu]) { 941 idle_task_exit();
899 if (--timeout == 0) { 942
900 printk("CPU %u refused to die!\n", cpu); 943 /*
901 break; 944 * turn off as much as possible, we'll be
902 } 945 * kicked out as this will only be invoked
903 msleep(1); 946 * on core99 platforms for now ...
947 */
948
949 printk(KERN_INFO "CPU#%d offline\n", cpu);
950 generic_set_cpu_dead(cpu);
951 smp_wmb();
952
953 /*
954 * Re-enable interrupts. The NAP code needs to enable them
955 * anyways, do it now so we deal with the case where one already
956 * happened while soft-disabled.
957 * We shouldn't get any external interrupts, only decrementer, and the
958 * decrementer handler is safe for use on offline CPUs
959 */
960 local_irq_enable();
961
962 while (1) {
963 /* let's not take timer interrupts too often ... */
964 set_dec(0x7fffffff);
965
966 /* Enter NAP mode */
967 power4_idle();
904 } 968 }
905 cpu_dead[cpu] = 0;
906} 969}
907 970
908#endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ 971#endif /* else CONFIG_PPC32 */
972#endif /* CONFIG_HOTPLUG_CPU */
909 973
910/* Core99 Macs (dual G4s and G5s) */ 974/* Core99 Macs (dual G4s and G5s) */
911struct smp_ops_t core99_smp_ops = { 975struct smp_ops_t core99_smp_ops = {
912 .message_pass = smp_mpic_message_pass, 976 .message_pass = smp_mpic_message_pass,
913 .probe = smp_core99_probe, 977 .probe = smp_core99_probe,
978 .bringup_done = smp_core99_bringup_done,
914 .kick_cpu = smp_core99_kick_cpu, 979 .kick_cpu = smp_core99_kick_cpu,
915 .setup_cpu = smp_core99_setup_cpu, 980 .setup_cpu = smp_core99_setup_cpu,
916 .give_timebase = smp_core99_give_timebase, 981 .give_timebase = smp_core99_give_timebase,
917 .take_timebase = smp_core99_take_timebase, 982 .take_timebase = smp_core99_take_timebase,
918#if defined(CONFIG_HOTPLUG_CPU) 983#if defined(CONFIG_HOTPLUG_CPU)
919# if defined(CONFIG_PPC32)
920 .cpu_disable = smp_core99_cpu_disable, 984 .cpu_disable = smp_core99_cpu_disable,
921 .cpu_die = smp_core99_cpu_die,
922# endif
923# if defined(CONFIG_PPC64)
924 .cpu_disable = generic_cpu_disable,
925 .cpu_die = generic_cpu_die, 985 .cpu_die = generic_cpu_die,
926 /* intentionally do *NOT* assign cpu_enable,
927 * the generic code will use kick_cpu then! */
928# endif
929#endif 986#endif
930}; 987};
931 988
@@ -957,5 +1014,10 @@ void __init pmac_setup_smp(void)
957 smp_ops = &psurge_smp_ops; 1014 smp_ops = &psurge_smp_ops;
958 } 1015 }
959#endif /* CONFIG_PPC32 */ 1016#endif /* CONFIG_PPC32 */
1017
1018#ifdef CONFIG_HOTPLUG_CPU
1019 ppc_md.cpu_die = pmac_cpu_die;
1020#endif
960} 1021}
961 1022
1023
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
index 75a6f480d931..08672d9136ab 100644
--- a/arch/powerpc/platforms/pseries/offline_states.h
+++ b/arch/powerpc/platforms/pseries/offline_states.h
@@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu)
34#endif 34#endif
35 35
36extern enum cpu_state_vals get_preferred_offline_state(int cpu); 36extern enum cpu_state_vals get_preferred_offline_state(int cpu);
37extern int start_secondary(void);
38extern void start_secondary_resume(void);
39#endif 37#endif