aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/crash.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/head_32.S9
-rw-r--r--arch/powerpc/kernel/head_40x.S2
-rw-r--r--arch/powerpc/kernel/head_44x.S2
-rw-r--r--arch/powerpc/kernel/head_64.S9
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/idle_power4.S21
-rw-r--r--arch/powerpc/kernel/irq.c3
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S2
-rw-r--r--arch/powerpc/kernel/lparcfg.c2
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c2
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S2
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/ptrace.c2
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/smp.c153
-rw-r--r--arch/powerpc/kernel/swsusp_32.S2
-rw-r--r--arch/powerpc/kernel/time.c15
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c2
-rw-r--r--arch/powerpc/kernel/vdso32/sigtramp.S2
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S2
27 files changed, 153 insertions, 106 deletions
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 625942ae558..60b3e377b1e 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void)
99 99
100/* This function can be used to enable the early boot text when doing 100/* This function can be used to enable the early boot text when doing
101 * OF booting or within bootx init. It must be followed by a btext_unmap() 101 * OF booting or within bootx init. It must be followed by a btext_unmap()
102 * call before the logical address becomes unuseable 102 * call before the logical address becomes unusable
103 */ 103 */
104void __init btext_setup_display(int width, int height, int depth, int pitch, 104void __init btext_setup_display(int width, int height, int depth, int pitch,
105 unsigned long address) 105 unsigned long address)
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 3d569e2aff1..3d3d416339d 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
163} 163}
164 164
165/* wait for all the CPUs to hit real mode but timeout if they don't come in */ 165/* wait for all the CPUs to hit real mode but timeout if they don't come in */
166#ifdef CONFIG_PPC_STD_MMU_64 166#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
167static void crash_kexec_wait_realmode(int cpu) 167static void crash_kexec_wait_realmode(int cpu)
168{ 168{
169 unsigned int msecs; 169 unsigned int msecs;
@@ -188,6 +188,8 @@ static void crash_kexec_wait_realmode(int cpu)
188 } 188 }
189 mb(); 189 mb();
190} 190}
191#else
192static inline void crash_kexec_wait_realmode(int cpu) {}
191#endif 193#endif
192 194
193/* 195/*
@@ -344,9 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
344 crash_save_cpu(regs, crashing_cpu); 346 crash_save_cpu(regs, crashing_cpu);
345 crash_kexec_prepare_cpus(crashing_cpu); 347 crash_kexec_prepare_cpus(crashing_cpu);
346 cpu_set(crashing_cpu, cpus_in_crash); 348 cpu_set(crashing_cpu, cpus_in_crash);
347#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
348 crash_kexec_wait_realmode(crashing_cpu); 349 crash_kexec_wait_realmode(crashing_cpu);
349#endif
350 350
351 machine_kexec_mask_interrupts(); 351 machine_kexec_mask_interrupts();
352 352
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 5c43063d250..9651acc3504 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -379,7 +379,7 @@ interrupt_end_book3e:
379 mfspr r13,SPRN_SPRG_PACA /* get our PACA */ 379 mfspr r13,SPRN_SPRG_PACA /* get our PACA */
380 b system_call_common 380 b system_call_common
381 381
382/* Auxillary Processor Unavailable Interrupt */ 382/* Auxiliary Processor Unavailable Interrupt */
383 START_EXCEPTION(ap_unavailable); 383 START_EXCEPTION(ap_unavailable);
384 NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) 384 NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
385 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) 385 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c532cb2c927..aeb739e1876 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -5,7 +5,7 @@
5 * handling and other fixed offset specific things. 5 * handling and other fixed offset specific things.
6 * 6 *
7 * This file is meant to be #included from head_64.S due to 7 * This file is meant to be #included from head_64.S due to
8 * position dependant assembly. 8 * position dependent assembly.
9 * 9 *
10 * Most of this originates from head_64.S and thus has the same 10 * Most of this originates from head_64.S and thus has the same
11 * copyright history. 11 * copyright history.
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 98c4b29a56f..c5c24beb838 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -890,6 +890,15 @@ __secondary_start:
890 mtspr SPRN_SRR1,r4 890 mtspr SPRN_SRR1,r4
891 SYNC 891 SYNC
892 RFI 892 RFI
893
894_GLOBAL(start_secondary_resume)
895 /* Reset stack */
896 rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
897 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
898 li r3,0
899 std r3,0(r1) /* Zero the stack frame pointer */
900 bl start_secondary
901 b .
893#endif /* CONFIG_SMP */ 902#endif /* CONFIG_SMP */
894 903
895#ifdef CONFIG_KVM_BOOK3S_HANDLER 904#ifdef CONFIG_KVM_BOOK3S_HANDLER
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 9dd21a8c4d5..a91626d87fc 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -766,7 +766,7 @@ DataAccess:
766 * miss get to this point to load the TLB. 766 * miss get to this point to load the TLB.
767 * r10 - TLB_TAG value 767 * r10 - TLB_TAG value
768 * r11 - Linux PTE 768 * r11 - Linux PTE
769 * r12, r9 - avilable to use 769 * r12, r9 - available to use
770 * PID - loaded with proper value when we get here 770 * PID - loaded with proper value when we get here
771 * Upon exit, we reload everything and RFI. 771 * Upon exit, we reload everything and RFI.
772 * Actually, it will fit now, but oh well.....a common place 772 * Actually, it will fit now, but oh well.....a common place
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index cbb3436b592..5e12b741ba5 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -178,7 +178,7 @@ interrupt_base:
178 NORMAL_EXCEPTION_PROLOG 178 NORMAL_EXCEPTION_PROLOG
179 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 179 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
180 180
181 /* Auxillary Processor Unavailable Interrupt */ 181 /* Auxiliary Processor Unavailable Interrupt */
182 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 182 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
183 183
184 /* Decrementer Interrupt */ 184 /* Decrementer Interrupt */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 782f23df7c8..3a319f9c9d3 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -40,7 +40,7 @@
40#include <asm/kvm_book3s_asm.h> 40#include <asm/kvm_book3s_asm.h>
41#include <asm/ptrace.h> 41#include <asm/ptrace.h>
42 42
43/* The physical memory is layed out such that the secondary processor 43/* The physical memory is laid out such that the secondary processor
44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
45 * using the layout described in exceptions-64s.S 45 * using the layout described in exceptions-64s.S
46 */ 46 */
@@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start)
536 add r13,r13,r4 /* for this processor. */ 536 add r13,r13,r4 /* for this processor. */
537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ 537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
538 538
539 /* Mark interrupts soft and hard disabled (they might be enabled
540 * in the PACA when doing hotplug)
541 */
542 li r0,0
543 stb r0,PACASOFTIRQEN(r13)
544 stb r0,PACAHARDIRQEN(r13)
545
539 /* Create a temp kernel stack for use before relocation is on. */ 546 /* Create a temp kernel stack for use before relocation is on. */
540 ld r1,PACAEMERGSP(r13) 547 ld r1,PACAEMERGSP(r13)
541 subi r1,r1,STACK_FRAME_OVERHEAD 548 subi r1,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3e02710d956..5ecf54cfa7d 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -326,7 +326,7 @@ interrupt_base:
326 NORMAL_EXCEPTION_PROLOG 326 NORMAL_EXCEPTION_PROLOG
327 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 327 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
328 328
329 /* Auxillary Processor Unavailable Interrupt */ 329 /* Auxiliary Processor Unavailable Interrupt */
330 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 330 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
331 331
332 /* Decrementer Interrupt */ 332 /* Decrementer Interrupt */
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 5328709eeed..ba319547860 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
53 isync 53 isync
54 b 1b 54 b 1b
55 55
56_GLOBAL(power4_cpu_offline_powersave)
57 /* Go to NAP now */
58 mfmsr r7
59 rldicl r0,r7,48,1
60 rotldi r0,r0,16
61 mtmsrd r0,1 /* hard-disable interrupts */
62 li r0,1
63 li r6,0
64 stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */
65 stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */
66BEGIN_FTR_SECTION
67 DSSALL
68 sync
69END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
70 ori r7,r7,MSR_EE
71 oris r7,r7,MSR_POW@h
72 sync
73 isync
74 mtmsrd r7
75 isync
76 blr
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 63625e0650b..f621b7d2d86 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -246,12 +246,13 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
246} 246}
247 247
248#ifdef CONFIG_HOTPLUG_CPU 248#ifdef CONFIG_HOTPLUG_CPU
249void fixup_irqs(const struct cpumask *map) 249void migrate_irqs(void)
250{ 250{
251 struct irq_desc *desc; 251 struct irq_desc *desc;
252 unsigned int irq; 252 unsigned int irq;
253 static int warned; 253 static int warned;
254 cpumask_var_t mask; 254 cpumask_var_t mask;
255 const struct cpumask *map = cpu_online_mask;
255 256
256 alloc_cpumask_var(&mask, GFP_KERNEL); 257 alloc_cpumask_var(&mask, GFP_KERNEL);
257 258
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 2a2f3c3f6d8..97ec8557f97 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
151 /**** Might be a good idea to set L2DO here - to prevent instructions 151 /**** Might be a good idea to set L2DO here - to prevent instructions
152 from getting into the cache. But since we invalidate 152 from getting into the cache. But since we invalidate
153 the next time we enable the cache it doesn't really matter. 153 the next time we enable the cache it doesn't really matter.
154 Don't do this unless you accomodate all processor variations. 154 Don't do this unless you accommodate all processor variations.
155 The bit moved on the 7450..... 155 The bit moved on the 7450.....
156 ****/ 156 ****/
157 157
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 16468362ad5..301db65f05a 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -262,7 +262,7 @@ static void parse_ppp_data(struct seq_file *m)
262 seq_printf(m, "system_active_processors=%d\n", 262 seq_printf(m, "system_active_processors=%d\n",
263 ppp_data.active_system_procs); 263 ppp_data.active_system_procs);
264 264
265 /* pool related entries are apropriate for shared configs */ 265 /* pool related entries are appropriate for shared configs */
266 if (lppaca_of(0).shared_proc) { 266 if (lppaca_of(0).shared_proc) {
267 unsigned long pool_idle_time, pool_procs; 267 unsigned long pool_idle_time, pool_procs;
268 268
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index f4adf89d761..10f0aadee95 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -203,7 +203,7 @@ void __init free_unused_pacas(void)
203{ 203{
204 int new_size; 204 int new_size;
205 205
206 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); 206 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
207 207
208 if (new_size >= paca_size) 208 if (new_size >= paca_size)
209 return; 209 return;
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 97e0ae41494..c4063b7f49a 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -759,7 +759,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
759 759
760 /* 760 /*
761 * If group events scheduling transaction was started, 761 * If group events scheduling transaction was started,
762 * skip the schedulability test here, it will be peformed 762 * skip the schedulability test here, it will be performed
763 * at commit time(->commit_txn) as a whole 763 * at commit time(->commit_txn) as a whole
764 */ 764 */
765 if (cpuhw->group_flag & PERF_EVENT_TXN) 765 if (cpuhw->group_flag & PERF_EVENT_TXN)
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index e83ba3f078e..1b1787d5289 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -15,7 +15,7 @@
15 15
16/* 16/*
17 * Grab the register values as they are now. 17 * Grab the register values as they are now.
18 * This won't do a particularily good job because we really 18 * This won't do a particularly good job because we really
19 * want our caller's caller's registers, and our caller has 19 * want our caller's caller's registers, and our caller has
20 * already executed its prologue. 20 * already executed its prologue.
21 * ToDo: We could reach back into the caller's save area to do 21 * ToDo: We could reach back into the caller's save area to do
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 05b7139d6a2..e74fa12afc8 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -683,7 +683,7 @@ void __init early_init_devtree(void *params)
683#endif 683#endif
684 684
685#ifdef CONFIG_PHYP_DUMP 685#ifdef CONFIG_PHYP_DUMP
686 /* scan tree to see if dump occured during last boot */ 686 /* scan tree to see if dump occurred during last boot */
687 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); 687 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
688#endif 688#endif
689 689
@@ -739,7 +739,7 @@ void __init early_init_devtree(void *params)
739 739
740 DBG("Scanning CPUs ...\n"); 740 DBG("Scanning CPUs ...\n");
741 741
742 /* Retreive CPU related informations from the flat tree 742 /* Retrieve CPU related informations from the flat tree
743 * (altivec support, boot CPU ID, ...) 743 * (altivec support, boot CPU ID, ...)
744 */ 744 */
745 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 745 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 895b082f1e4..55613e33e26 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -463,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
463#ifdef CONFIG_VSX 463#ifdef CONFIG_VSX
464/* 464/*
465 * Currently to set and and get all the vsx state, you need to call 465 * Currently to set and and get all the vsx state, you need to call
466 * the fp and VMX calls aswell. This only get/sets the lower 32 466 * the fp and VMX calls as well. This only get/sets the lower 32
467 * 128bit VSX registers. 467 * 128bit VSX registers.
468 */ 468 */
469 469
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 7980ec0e1e1..67f6c3b5135 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -465,7 +465,7 @@ static void start_event_scan(void)
465 pr_debug("rtasd: will sleep for %d milliseconds\n", 465 pr_debug("rtasd: will sleep for %d milliseconds\n",
466 (30000 / rtas_event_scan_rate)); 466 (30000 / rtas_event_scan_rate));
467 467
468 /* Retreive errors from nvram if any */ 468 /* Retrieve errors from nvram if any */
469 retreive_nvram_error_log(); 469 retreive_nvram_error_log();
470 470
471 schedule_delayed_work_on(cpumask_first(cpu_online_mask), 471 schedule_delayed_work_on(cpumask_first(cpu_online_mask),
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9d4882a4664..21f30cb6807 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void)
509 */ 509 */
510 cpu_init_thread_core_maps(nthreads); 510 cpu_init_thread_core_maps(nthreads);
511 511
512 /* Now that possible cpus are set, set nr_cpu_ids for later use */
513 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
514
512 free_unused_pacas(); 515 free_unused_pacas();
513} 516}
514#endif /* CONFIG_SMP */ 517#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 98136050917..cbdbb14be4b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,6 +57,25 @@
57#define DBG(fmt...) 57#define DBG(fmt...)
58#endif 58#endif
59 59
60
61/* Store all idle threads, this can be reused instead of creating
62* a new thread. Also avoids complicated thread destroy functionality
63* for idle threads.
64*/
65#ifdef CONFIG_HOTPLUG_CPU
66/*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73#else
74static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75#define get_idle_for_cpu(x) (idle_thread_array[(x)])
76#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
77#endif
78
60struct thread_info *secondary_ti; 79struct thread_info *secondary_ti;
61 80
62DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 81DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
@@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id)
238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 257 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239} 258}
240 259
241static void __init smp_create_idle(unsigned int cpu)
242{
243 struct task_struct *p;
244
245 /* create a process for the processor */
246 p = fork_idle(cpu);
247 if (IS_ERR(p))
248 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
249#ifdef CONFIG_PPC64
250 paca[cpu].__current = p;
251 paca[cpu].kstack = (unsigned long) task_thread_info(p)
252 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
253#endif
254 current_set[cpu] = task_thread_info(p);
255 task_thread_info(p)->cpu = cpu;
256}
257
258void __init smp_prepare_cpus(unsigned int max_cpus) 260void __init smp_prepare_cpus(unsigned int max_cpus)
259{ 261{
260 unsigned int cpu; 262 unsigned int cpu;
@@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
288 max_cpus = NR_CPUS; 290 max_cpus = NR_CPUS;
289 else 291 else
290 max_cpus = 1; 292 max_cpus = 1;
291
292 for_each_possible_cpu(cpu)
293 if (cpu != boot_cpuid)
294 smp_create_idle(cpu);
295} 293}
296 294
297void __devinit smp_prepare_boot_cpu(void) 295void __devinit smp_prepare_boot_cpu(void)
@@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void)
305 303
306#ifdef CONFIG_HOTPLUG_CPU 304#ifdef CONFIG_HOTPLUG_CPU
307/* State of each CPU during hotplug phases */ 305/* State of each CPU during hotplug phases */
308DEFINE_PER_CPU(int, cpu_state) = { 0 }; 306static DEFINE_PER_CPU(int, cpu_state) = { 0 };
309 307
310int generic_cpu_disable(void) 308int generic_cpu_disable(void)
311{ 309{
@@ -317,30 +315,8 @@ int generic_cpu_disable(void)
317 set_cpu_online(cpu, false); 315 set_cpu_online(cpu, false);
318#ifdef CONFIG_PPC64 316#ifdef CONFIG_PPC64
319 vdso_data->processorCount--; 317 vdso_data->processorCount--;
320 fixup_irqs(cpu_online_mask);
321#endif
322 return 0;
323}
324
325int generic_cpu_enable(unsigned int cpu)
326{
327 /* Do the normal bootup if we haven't
328 * already bootstrapped. */
329 if (system_state != SYSTEM_RUNNING)
330 return -ENOSYS;
331
332 /* get the target out of it's holding state */
333 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
334 smp_wmb();
335
336 while (!cpu_online(cpu))
337 cpu_relax();
338
339#ifdef CONFIG_PPC64
340 fixup_irqs(cpu_online_mask);
341 /* counter the irq disable in fixup_irqs */
342 local_irq_enable();
343#endif 318#endif
319 migrate_irqs();
344 return 0; 320 return 0;
345} 321}
346 322
@@ -362,37 +338,89 @@ void generic_mach_cpu_die(void)
362 unsigned int cpu; 338 unsigned int cpu;
363 339
364 local_irq_disable(); 340 local_irq_disable();
341 idle_task_exit();
365 cpu = smp_processor_id(); 342 cpu = smp_processor_id();
366 printk(KERN_DEBUG "CPU%d offline\n", cpu); 343 printk(KERN_DEBUG "CPU%d offline\n", cpu);
367 __get_cpu_var(cpu_state) = CPU_DEAD; 344 __get_cpu_var(cpu_state) = CPU_DEAD;
368 smp_wmb(); 345 smp_wmb();
369 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 346 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
370 cpu_relax(); 347 cpu_relax();
371 set_cpu_online(cpu, true); 348}
372 local_irq_enable(); 349
350void generic_set_cpu_dead(unsigned int cpu)
351{
352 per_cpu(cpu_state, cpu) = CPU_DEAD;
373} 353}
374#endif 354#endif
375 355
376static int __devinit cpu_enable(unsigned int cpu) 356struct create_idle {
357 struct work_struct work;
358 struct task_struct *idle;
359 struct completion done;
360 int cpu;
361};
362
363static void __cpuinit do_fork_idle(struct work_struct *work)
377{ 364{
378 if (smp_ops && smp_ops->cpu_enable) 365 struct create_idle *c_idle =
379 return smp_ops->cpu_enable(cpu); 366 container_of(work, struct create_idle, work);
367
368 c_idle->idle = fork_idle(c_idle->cpu);
369 complete(&c_idle->done);
370}
371
372static int __cpuinit create_idle(unsigned int cpu)
373{
374 struct thread_info *ti;
375 struct create_idle c_idle = {
376 .cpu = cpu,
377 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
378 };
379 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
380
381 c_idle.idle = get_idle_for_cpu(cpu);
382
383 /* We can't use kernel_thread since we must avoid to
384 * reschedule the child. We use a workqueue because
385 * we want to fork from a kernel thread, not whatever
386 * userspace process happens to be trying to online us.
387 */
388 if (!c_idle.idle) {
389 schedule_work(&c_idle.work);
390 wait_for_completion(&c_idle.done);
391 } else
392 init_idle(c_idle.idle, cpu);
393 if (IS_ERR(c_idle.idle)) {
394 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
395 return PTR_ERR(c_idle.idle);
396 }
397 ti = task_thread_info(c_idle.idle);
398
399#ifdef CONFIG_PPC64
400 paca[cpu].__current = c_idle.idle;
401 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
402#endif
403 ti->cpu = cpu;
404 current_set[cpu] = ti;
380 405
381 return -ENOSYS; 406 return 0;
382} 407}
383 408
384int __cpuinit __cpu_up(unsigned int cpu) 409int __cpuinit __cpu_up(unsigned int cpu)
385{ 410{
386 int c; 411 int rc, c;
387 412
388 secondary_ti = current_set[cpu]; 413 secondary_ti = current_set[cpu];
389 if (!cpu_enable(cpu))
390 return 0;
391 414
392 if (smp_ops == NULL || 415 if (smp_ops == NULL ||
393 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
394 return -EINVAL; 417 return -EINVAL;
395 418
419 /* Make sure we have an idle thread */
420 rc = create_idle(cpu);
421 if (rc)
422 return rc;
423
396 /* Make sure callin-map entry is 0 (can be leftover a CPU 424 /* Make sure callin-map entry is 0 (can be leftover a CPU
397 * hotplug 425 * hotplug
398 */ 426 */
@@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
502} 530}
503 531
504/* Activate a secondary processor. */ 532/* Activate a secondary processor. */
505int __devinit start_secondary(void *unused) 533void __devinit start_secondary(void *unused)
506{ 534{
507 unsigned int cpu = smp_processor_id(); 535 unsigned int cpu = smp_processor_id();
508 struct device_node *l2_cache; 536 struct device_node *l2_cache;
@@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused)
523 551
524 secondary_cpu_time_init(); 552 secondary_cpu_time_init();
525 553
554#ifdef CONFIG_PPC64
555 if (system_state == SYSTEM_RUNNING)
556 vdso_data->processorCount++;
557#endif
526 ipi_call_lock(); 558 ipi_call_lock();
527 notify_cpu_starting(cpu); 559 notify_cpu_starting(cpu);
528 set_cpu_online(cpu, true); 560 set_cpu_online(cpu, true);
@@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused)
558 local_irq_enable(); 590 local_irq_enable();
559 591
560 cpu_idle(); 592 cpu_idle();
561 return 0; 593
594 BUG();
562} 595}
563 596
564int setup_profiling_timer(unsigned int multiplier) 597int setup_profiling_timer(unsigned int multiplier)
@@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
585 618
586 free_cpumask_var(old_mask); 619 free_cpumask_var(old_mask);
587 620
621 if (smp_ops && smp_ops->bringup_done)
622 smp_ops->bringup_done();
623
588 dump_numa_cpu_topology(); 624 dump_numa_cpu_topology();
625
589} 626}
590 627
591int arch_sd_sibling_asym_packing(void) 628int arch_sd_sibling_asym_packing(void)
@@ -660,5 +697,9 @@ void cpu_die(void)
660{ 697{
661 if (ppc_md.cpu_die) 698 if (ppc_md.cpu_die)
662 ppc_md.cpu_die(); 699 ppc_md.cpu_die();
700
701 /* If we return, we re-enter start_secondary */
702 start_secondary_resume();
663} 703}
704
664#endif 705#endif
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index b0754e23743..ba4dee3d233 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
143 143
144 /* Disable MSR:DR to make sure we don't take a TLB or 144 /* Disable MSR:DR to make sure we don't take a TLB or
145 * hash miss during the copy, as our hash table will 145 * hash miss during the copy, as our hash table will
146 * for a while be unuseable. For .text, we assume we are 146 * for a while be unusable. For .text, we assume we are
147 * covered by a BAT. This works only for non-G5 at this 147 * covered by a BAT. This works only for non-G5 at this
148 * point. G5 will need a better approach, possibly using 148 * point. G5 will need a better approach, possibly using
149 * a small temporary hash table filled with large mappings, 149 * a small temporary hash table filled with large mappings,
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index aa9269600ca..375480c56eb 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -577,14 +577,21 @@ void timer_interrupt(struct pt_regs * regs)
577 struct clock_event_device *evt = &decrementer->event; 577 struct clock_event_device *evt = &decrementer->event;
578 u64 now; 578 u64 now;
579 579
580 /* Ensure a positive value is written to the decrementer, or else
581 * some CPUs will continue to take decrementer exceptions.
582 */
583 set_dec(DECREMENTER_MAX);
584
585 /* Some implementations of hotplug will get timer interrupts while
586 * offline, just ignore these
587 */
588 if (!cpu_online(smp_processor_id()))
589 return;
590
580 trace_timer_interrupt_entry(regs); 591 trace_timer_interrupt_entry(regs);
581 592
582 __get_cpu_var(irq_stat).timer_irqs++; 593 __get_cpu_var(irq_stat).timer_irqs++;
583 594
584 /* Ensure a positive value is written to the decrementer, or else
585 * some CPUs will continuue to take decrementer exceptions */
586 set_dec(DECREMENTER_MAX);
587
588#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 595#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
589 if (atomic_read(&ppc_n_lost_interrupts) != 0) 596 if (atomic_read(&ppc_n_lost_interrupts) != 0)
590 do_IRQ(regs); 597 do_IRQ(regs);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bd74fac169b..5ddb801bc15 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -959,7 +959,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
959 * ESR_DST (!?) or 0. In the process of chasing this with the 959 * ESR_DST (!?) or 0. In the process of chasing this with the
960 * hardware people - not sure if it can happen on any illegal 960 * hardware people - not sure if it can happen on any illegal
961 * instruction or only on FP instructions, whether there is a 961 * instruction or only on FP instructions, whether there is a
962 * pattern to occurences etc. -dgibson 31/Mar/2003 */ 962 * pattern to occurrences etc. -dgibson 31/Mar/2003 */
963 switch (do_mathemu(regs)) { 963 switch (do_mathemu(regs)) {
964 case 0: 964 case 0:
965 emulate_single_step(regs); 965 emulate_single_step(regs);
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index b4b167b3364..baa33a7517b 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * udbg for NS16550 compatable serial ports 2 * udbg for NS16550 compatible serial ports
3 * 3 *
4 * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp 4 * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
5 * 5 *
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S
index 68d49dd71dc..cf0c9c9c24f 100644
--- a/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/arch/powerpc/kernel/vdso32/sigtramp.S
@@ -19,7 +19,7 @@
19 19
20/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from 20/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
21 the return address to get an address in the middle of the presumed 21 the return address to get an address in the middle of the presumed
22 call instruction. Since we don't have a call here, we artifically 22 call instruction. Since we don't have a call here, we artificially
23 extend the range covered by the unwind info by adding a nop before 23 extend the range covered by the unwind info by adding a nop before
24 the real start. */ 24 the real start. */
25 nop 25 nop
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index 59eb59bb408..45ea281e9a2 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -20,7 +20,7 @@
20 20
21/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from 21/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
22 the return address to get an address in the middle of the presumed 22 the return address to get an address in the middle of the presumed
23 call instruction. Since we don't have a call here, we artifically 23 call instruction. Since we don't have a call here, we artificially
24 extend the range covered by the unwind info by padding before the 24 extend the range covered by the unwind info by padding before the
25 real start. */ 25 real start. */
26 nop 26 nop