aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:15 -0400
committerJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:59 -0400
commit07f9479a40cc778bc1462ada11f95b01360ae4ff (patch)
tree0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /arch/powerpc/kernel
parent9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff)
parentcd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff)
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/crash.c10
-rw-r--r--arch/powerpc/kernel/crash_dump.c17
-rw-r--r--arch/powerpc/kernel/dma.c18
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S30
-rw-r--r--arch/powerpc/kernel/head_32.S9
-rw-r--r--arch/powerpc/kernel/head_40x.S2
-rw-r--r--arch/powerpc/kernel/head_44x.S2
-rw-r--r--arch/powerpc/kernel/head_64.S9
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/ibmebus.c6
-rw-r--r--arch/powerpc/kernel/idle_power4.S21
-rw-r--r--arch/powerpc/kernel/irq.c84
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S2
-rw-r--r--arch/powerpc/kernel/legacy_serial.c8
-rw-r--r--arch/powerpc/kernel/lparcfg.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec.c6
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/pci_dn.c7
-rw-r--r--arch/powerpc/kernel/perf_event.c39
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S2
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/ptrace.c17
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/smp.c153
-rw-r--r--arch/powerpc/kernel/swsusp_32.S2
-rw-r--r--arch/powerpc/kernel/time.c20
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c2
-rw-r--r--arch/powerpc/kernel/vdso.c6
-rw-r--r--arch/powerpc/kernel/vdso32/sigtramp.S2
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S2
38 files changed, 269 insertions, 238 deletions
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 625942ae5585..60b3e377b1e4 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void)
99 99
100/* This function can be used to enable the early boot text when doing 100/* This function can be used to enable the early boot text when doing
101 * OF booting or within bootx init. It must be followed by a btext_unmap() 101 * OF booting or within bootx init. It must be followed by a btext_unmap()
102 * call before the logical address becomes unuseable 102 * call before the logical address becomes unusable
103 */ 103 */
104void __init btext_setup_display(int width, int height, int depth, int pitch, 104void __init btext_setup_display(int width, int height, int depth, int pitch,
105 unsigned long address) 105 unsigned long address)
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 5c518ad3445c..913611105c1f 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -64,7 +64,7 @@ _GLOBAL(__setup_cpu_e500v2)
64 bl __e500_icache_setup 64 bl __e500_icache_setup
65 bl __e500_dcache_setup 65 bl __e500_dcache_setup
66 bl __setup_e500_ivors 66 bl __setup_e500_ivors
67#ifdef CONFIG_RAPIDIO 67#ifdef CONFIG_FSL_RIO
68 /* Ensure that RFXE is set */ 68 /* Ensure that RFXE is set */
69 mfspr r3,SPRN_HID1 69 mfspr r3,SPRN_HID1
70 oris r3,r3,HID1_RFXE@h 70 oris r3,r3,HID1_RFXE@h
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c9b68d07ac4f..b9602ee06deb 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1973 .pvr_mask = 0xffff0000, 1973 .pvr_mask = 0xffff0000,
1974 .pvr_value = 0x80240000, 1974 .pvr_value = 0x80240000,
1975 .cpu_name = "e5500", 1975 .cpu_name = "e5500",
1976 .cpu_features = CPU_FTRS_E500MC, 1976 .cpu_features = CPU_FTRS_E5500,
1977 .cpu_user_features = COMMON_USER_BOOKE, 1977 .cpu_user_features = COMMON_USER_BOOKE,
1978 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1978 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1979 MMU_FTR_USE_TLBILX, 1979 MMU_FTR_USE_TLBILX,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 3d569e2aff18..5b5e1f002a8e 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -188,7 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
188 } 188 }
189 mb(); 189 mb();
190} 190}
191#endif 191#endif /* CONFIG_PPC_STD_MMU_64 */
192 192
193/* 193/*
194 * This function will be called by secondary cpus or by kexec cpu 194 * This function will be called by secondary cpus or by kexec cpu
@@ -233,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs)
233 crash_ipi_callback(regs); 233 crash_ipi_callback(regs);
234} 234}
235 235
236#else 236#else /* ! CONFIG_SMP */
237static inline void crash_kexec_wait_realmode(int cpu) {}
238
237static void crash_kexec_prepare_cpus(int cpu) 239static void crash_kexec_prepare_cpus(int cpu)
238{ 240{
239 /* 241 /*
@@ -253,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
253{ 255{
254 cpus_in_sr = CPU_MASK_NONE; 256 cpus_in_sr = CPU_MASK_NONE;
255} 257}
256#endif 258#endif /* CONFIG_SMP */
257 259
258/* 260/*
259 * Register a function to be called on shutdown. Only use this if you 261 * Register a function to be called on shutdown. Only use this if you
@@ -344,9 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
344 crash_save_cpu(regs, crashing_cpu); 346 crash_save_cpu(regs, crashing_cpu);
345 crash_kexec_prepare_cpus(crashing_cpu); 347 crash_kexec_prepare_cpus(crashing_cpu);
346 cpu_set(crashing_cpu, cpus_in_crash); 348 cpu_set(crashing_cpu, cpus_in_crash);
347#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
348 crash_kexec_wait_realmode(crashing_cpu); 349 crash_kexec_wait_realmode(crashing_cpu);
349#endif
350 350
351 machine_kexec_mask_interrupts(); 351 machine_kexec_mask_interrupts();
352 352
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 0a2af50243cb..424afb6b8fba 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -28,9 +28,6 @@
28#define DBG(fmt...) 28#define DBG(fmt...)
29#endif 29#endif
30 30
31/* Stores the physical address of elf header of crash image. */
32unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
33
34#ifndef CONFIG_RELOCATABLE 31#ifndef CONFIG_RELOCATABLE
35void __init reserve_kdump_trampoline(void) 32void __init reserve_kdump_trampoline(void)
36{ 33{
@@ -72,20 +69,6 @@ void __init setup_kdump_trampoline(void)
72} 69}
73#endif /* CONFIG_RELOCATABLE */ 70#endif /* CONFIG_RELOCATABLE */
74 71
75/*
76 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
77 * is_kdump_kernel() to determine if we are booting after a panic. Hence
78 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
79 */
80static int __init parse_elfcorehdr(char *p)
81{
82 if (p)
83 elfcorehdr_addr = memparse(p, &p);
84
85 return 1;
86}
87__setup("elfcorehdr=", parse_elfcorehdr);
88
89static int __init parse_savemaxmem(char *p) 72static int __init parse_savemaxmem(char *p)
90{ 73{
91 if (p) 74 if (p)
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index cf02cad62d9a..d238c082c3c5 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -179,3 +179,21 @@ static int __init dma_init(void)
179 return 0; 179 return 0;
180} 180}
181fs_initcall(dma_init); 181fs_initcall(dma_init);
182
183int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
184 void *cpu_addr, dma_addr_t handle, size_t size)
185{
186 unsigned long pfn;
187
188#ifdef CONFIG_NOT_COHERENT_CACHE
189 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
190 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
191#else
192 pfn = page_to_pfn(virt_to_page(cpu_addr));
193#endif
194 return remap_pfn_range(vma, vma->vm_start,
195 pfn + vma->vm_pgoff,
196 vma->vm_end - vma->vm_start,
197 vma->vm_page_prot);
198}
199EXPORT_SYMBOL_GPL(dma_mmap_coherent);
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 5c43063d2506..9651acc3504a 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -379,7 +379,7 @@ interrupt_end_book3e:
379 mfspr r13,SPRN_SPRG_PACA /* get our PACA */ 379 mfspr r13,SPRN_SPRG_PACA /* get our PACA */
380 b system_call_common 380 b system_call_common
381 381
382/* Auxillary Processor Unavailable Interrupt */ 382/* Auxiliary Processor Unavailable Interrupt */
383 START_EXCEPTION(ap_unavailable); 383 START_EXCEPTION(ap_unavailable);
384 NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) 384 NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
385 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) 385 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 8a817995b4cd..aeb739e18769 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -5,7 +5,7 @@
5 * handling and other fixed offset specific things. 5 * handling and other fixed offset specific things.
6 * 6 *
7 * This file is meant to be #included from head_64.S due to 7 * This file is meant to be #included from head_64.S due to
8 * position dependant assembly. 8 * position dependent assembly.
9 * 9 *
10 * Most of this originates from head_64.S and thus has the same 10 * Most of this originates from head_64.S and thus has the same
11 * copyright history. 11 * copyright history.
@@ -977,20 +977,6 @@ _GLOBAL(do_stab_bolted)
977 rfid 977 rfid
978 b . /* prevent speculative execution */ 978 b . /* prevent speculative execution */
979 979
980/*
981 * Space for CPU0's segment table.
982 *
983 * On iSeries, the hypervisor must fill in at least one entry before
984 * we get control (with relocate on). The address is given to the hv
985 * as a page number (see xLparMap below), so this must be at a
986 * fixed address (the linker can't compute (u64)&initial_stab >>
987 * PAGE_SHIFT).
988 */
989 . = STAB0_OFFSET /* 0x6000 */
990 .globl initial_stab
991initial_stab:
992 .space 4096
993
994#ifdef CONFIG_PPC_PSERIES 980#ifdef CONFIG_PPC_PSERIES
995/* 981/*
996 * Data area reserved for FWNMI option. 982 * Data area reserved for FWNMI option.
@@ -1027,3 +1013,17 @@ xLparMap:
1027#ifdef CONFIG_PPC_PSERIES 1013#ifdef CONFIG_PPC_PSERIES
1028 . = 0x8000 1014 . = 0x8000
1029#endif /* CONFIG_PPC_PSERIES */ 1015#endif /* CONFIG_PPC_PSERIES */
1016
1017/*
1018 * Space for CPU0's segment table.
1019 *
1020 * On iSeries, the hypervisor must fill in at least one entry before
1021 * we get control (with relocate on). The address is given to the hv
1022 * as a page number (see xLparMap above), so this must be at a
1023 * fixed address (the linker can't compute (u64)&initial_stab >>
1024 * PAGE_SHIFT).
1025 */
1026 . = STAB0_OFFSET /* 0x8000 */
1027 .globl initial_stab
1028initial_stab:
1029 .space 4096
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 98c4b29a56f4..c5c24beb8387 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -890,6 +890,15 @@ __secondary_start:
890 mtspr SPRN_SRR1,r4 890 mtspr SPRN_SRR1,r4
891 SYNC 891 SYNC
892 RFI 892 RFI
893
894_GLOBAL(start_secondary_resume)
895 /* Reset stack */
896 rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
897 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
898 li r3,0
899 std r3,0(r1) /* Zero the stack frame pointer */
900 bl start_secondary
901 b .
893#endif /* CONFIG_SMP */ 902#endif /* CONFIG_SMP */
894 903
895#ifdef CONFIG_KVM_BOOK3S_HANDLER 904#ifdef CONFIG_KVM_BOOK3S_HANDLER
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 9dd21a8c4d52..a91626d87fc9 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -766,7 +766,7 @@ DataAccess:
766 * miss get to this point to load the TLB. 766 * miss get to this point to load the TLB.
767 * r10 - TLB_TAG value 767 * r10 - TLB_TAG value
768 * r11 - Linux PTE 768 * r11 - Linux PTE
769 * r12, r9 - avilable to use 769 * r12, r9 - available to use
770 * PID - loaded with proper value when we get here 770 * PID - loaded with proper value when we get here
771 * Upon exit, we reload everything and RFI. 771 * Upon exit, we reload everything and RFI.
772 * Actually, it will fit now, but oh well.....a common place 772 * Actually, it will fit now, but oh well.....a common place
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index cbb3436b592d..5e12b741ba5f 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -178,7 +178,7 @@ interrupt_base:
178 NORMAL_EXCEPTION_PROLOG 178 NORMAL_EXCEPTION_PROLOG
179 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 179 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
180 180
181 /* Auxillary Processor Unavailable Interrupt */ 181 /* Auxiliary Processor Unavailable Interrupt */
182 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 182 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
183 183
184 /* Decrementer Interrupt */ 184 /* Decrementer Interrupt */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 782f23df7c85..3a319f9c9d3e 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -40,7 +40,7 @@
40#include <asm/kvm_book3s_asm.h> 40#include <asm/kvm_book3s_asm.h>
41#include <asm/ptrace.h> 41#include <asm/ptrace.h>
42 42
43/* The physical memory is layed out such that the secondary processor 43/* The physical memory is laid out such that the secondary processor
44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
45 * using the layout described in exceptions-64s.S 45 * using the layout described in exceptions-64s.S
46 */ 46 */
@@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start)
536 add r13,r13,r4 /* for this processor. */ 536 add r13,r13,r4 /* for this processor. */
537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ 537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
538 538
539 /* Mark interrupts soft and hard disabled (they might be enabled
540 * in the PACA when doing hotplug)
541 */
542 li r0,0
543 stb r0,PACASOFTIRQEN(r13)
544 stb r0,PACAHARDIRQEN(r13)
545
539 /* Create a temp kernel stack for use before relocation is on. */ 546 /* Create a temp kernel stack for use before relocation is on. */
540 ld r1,PACAEMERGSP(r13) 547 ld r1,PACAEMERGSP(r13)
541 subi r1,r1,STACK_FRAME_OVERHEAD 548 subi r1,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3e02710d9562..5ecf54cfa7d4 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -326,7 +326,7 @@ interrupt_base:
326 NORMAL_EXCEPTION_PROLOG 326 NORMAL_EXCEPTION_PROLOG
327 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 327 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
328 328
329 /* Auxillary Processor Unavailable Interrupt */ 329 /* Auxiliary Processor Unavailable Interrupt */
330 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 330 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
331 331
332 /* Decrementer Interrupt */ 332 /* Decrementer Interrupt */
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index c00d4ca1ee15..28581f1ad2c0 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
527 527
528#endif /* !CONFIG_SUSPEND */ 528#endif /* !CONFIG_SUSPEND */
529 529
530#ifdef CONFIG_HIBERNATION 530#ifdef CONFIG_HIBERNATE_CALLBACKS
531 531
532static int ibmebus_bus_pm_freeze(struct device *dev) 532static int ibmebus_bus_pm_freeze(struct device *dev)
533{ 533{
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
665 return ret; 665 return ret;
666} 666}
667 667
668#else /* !CONFIG_HIBERNATION */ 668#else /* !CONFIG_HIBERNATE_CALLBACKS */
669 669
670#define ibmebus_bus_pm_freeze NULL 670#define ibmebus_bus_pm_freeze NULL
671#define ibmebus_bus_pm_thaw NULL 671#define ibmebus_bus_pm_thaw NULL
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
676#define ibmebus_bus_pm_poweroff_noirq NULL 676#define ibmebus_bus_pm_poweroff_noirq NULL
677#define ibmebus_bus_pm_restore_noirq NULL 677#define ibmebus_bus_pm_restore_noirq NULL
678 678
679#endif /* !CONFIG_HIBERNATION */ 679#endif /* !CONFIG_HIBERNATE_CALLBACKS */
680 680
681static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { 681static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
682 .prepare = ibmebus_bus_pm_prepare, 682 .prepare = ibmebus_bus_pm_prepare,
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 5328709eeedc..ba3195478600 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
53 isync 53 isync
54 b 1b 54 b 1b
55 55
56_GLOBAL(power4_cpu_offline_powersave)
57 /* Go to NAP now */
58 mfmsr r7
59 rldicl r0,r7,48,1
60 rotldi r0,r0,16
61 mtmsrd r0,1 /* hard-disable interrupts */
62 li r0,1
63 li r6,0
64 stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */
65 stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */
66BEGIN_FTR_SECTION
67 DSSALL
68 sync
69END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
70 ori r7,r7,MSR_EE
71 oris r7,r7,MSR_POW@h
72 sync
73 isync
74 mtmsrd r7
75 isync
76 blr
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0a5570338b96..f621b7d2d869 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -195,7 +195,7 @@ notrace void arch_local_irq_restore(unsigned long en)
195EXPORT_SYMBOL(arch_local_irq_restore); 195EXPORT_SYMBOL(arch_local_irq_restore);
196#endif /* CONFIG_PPC64 */ 196#endif /* CONFIG_PPC64 */
197 197
198static int show_other_interrupts(struct seq_file *p, int prec) 198int arch_show_interrupts(struct seq_file *p, int prec)
199{ 199{
200 int j; 200 int j;
201 201
@@ -231,65 +231,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)
231 return 0; 231 return 0;
232} 232}
233 233
234int show_interrupts(struct seq_file *p, void *v)
235{
236 unsigned long flags, any_count = 0;
237 int i = *(loff_t *) v, j, prec;
238 struct irqaction *action;
239 struct irq_desc *desc;
240 struct irq_chip *chip;
241
242 if (i > nr_irqs)
243 return 0;
244
245 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
246 j *= 10;
247
248 if (i == nr_irqs)
249 return show_other_interrupts(p, prec);
250
251 /* print header */
252 if (i == 0) {
253 seq_printf(p, "%*s", prec + 8, "");
254 for_each_online_cpu(j)
255 seq_printf(p, "CPU%-8d", j);
256 seq_putc(p, '\n');
257 }
258
259 desc = irq_to_desc(i);
260 if (!desc)
261 return 0;
262
263 raw_spin_lock_irqsave(&desc->lock, flags);
264 for_each_online_cpu(j)
265 any_count |= kstat_irqs_cpu(i, j);
266 action = desc->action;
267 if (!action && !any_count)
268 goto out;
269
270 seq_printf(p, "%*d: ", prec, i);
271 for_each_online_cpu(j)
272 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
273
274 chip = get_irq_desc_chip(desc);
275 if (chip)
276 seq_printf(p, " %-16s", chip->name);
277 else
278 seq_printf(p, " %-16s", "None");
279 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
280
281 if (action) {
282 seq_printf(p, " %s", action->name);
283 while ((action = action->next) != NULL)
284 seq_printf(p, ", %s", action->name);
285 }
286
287 seq_putc(p, '\n');
288out:
289 raw_spin_unlock_irqrestore(&desc->lock, flags);
290 return 0;
291}
292
293/* 234/*
294 * /proc/stat helpers 235 * /proc/stat helpers
295 */ 236 */
@@ -305,34 +246,37 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
305} 246}
306 247
307#ifdef CONFIG_HOTPLUG_CPU 248#ifdef CONFIG_HOTPLUG_CPU
308void fixup_irqs(const struct cpumask *map) 249void migrate_irqs(void)
309{ 250{
310 struct irq_desc *desc; 251 struct irq_desc *desc;
311 unsigned int irq; 252 unsigned int irq;
312 static int warned; 253 static int warned;
313 cpumask_var_t mask; 254 cpumask_var_t mask;
255 const struct cpumask *map = cpu_online_mask;
314 256
315 alloc_cpumask_var(&mask, GFP_KERNEL); 257 alloc_cpumask_var(&mask, GFP_KERNEL);
316 258
317 for_each_irq(irq) { 259 for_each_irq(irq) {
260 struct irq_data *data;
318 struct irq_chip *chip; 261 struct irq_chip *chip;
319 262
320 desc = irq_to_desc(irq); 263 desc = irq_to_desc(irq);
321 if (!desc) 264 if (!desc)
322 continue; 265 continue;
323 266
324 if (desc->status & IRQ_PER_CPU) 267 data = irq_desc_get_irq_data(desc);
268 if (irqd_is_per_cpu(data))
325 continue; 269 continue;
326 270
327 chip = get_irq_desc_chip(desc); 271 chip = irq_data_get_irq_chip(data);
328 272
329 cpumask_and(mask, desc->irq_data.affinity, map); 273 cpumask_and(mask, data->affinity, map);
330 if (cpumask_any(mask) >= nr_cpu_ids) { 274 if (cpumask_any(mask) >= nr_cpu_ids) {
331 printk("Breaking affinity for irq %i\n", irq); 275 printk("Breaking affinity for irq %i\n", irq);
332 cpumask_copy(mask, map); 276 cpumask_copy(mask, map);
333 } 277 }
334 if (chip->irq_set_affinity) 278 if (chip->irq_set_affinity)
335 chip->irq_set_affinity(&desc->irq_data, mask, true); 279 chip->irq_set_affinity(data, mask, true);
336 else if (desc->action && !(warned++)) 280 else if (desc->action && !(warned++))
337 printk("Cannot set affinity for irq %i\n", irq); 281 printk("Cannot set affinity for irq %i\n", irq);
338 } 282 }
@@ -618,7 +562,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
618 smp_wmb(); 562 smp_wmb();
619 563
620 /* Clear norequest flags */ 564 /* Clear norequest flags */
621 irq_to_desc(i)->status &= ~IRQ_NOREQUEST; 565 irq_clear_status_flags(i, IRQ_NOREQUEST);
622 566
623 /* Legacy flags are left to default at this point, 567 /* Legacy flags are left to default at this point,
624 * one can then use irq_create_mapping() to 568 * one can then use irq_create_mapping() to
@@ -827,8 +771,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
827 771
828 /* Set type if specified and different than the current one */ 772 /* Set type if specified and different than the current one */
829 if (type != IRQ_TYPE_NONE && 773 if (type != IRQ_TYPE_NONE &&
830 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) 774 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
831 set_irq_type(virq, type); 775 irq_set_irq_type(virq, type);
832 return virq; 776 return virq;
833} 777}
834EXPORT_SYMBOL_GPL(irq_create_of_mapping); 778EXPORT_SYMBOL_GPL(irq_create_of_mapping);
@@ -851,7 +795,7 @@ void irq_dispose_mapping(unsigned int virq)
851 return; 795 return;
852 796
853 /* remove chip and handler */ 797 /* remove chip and handler */
854 set_irq_chip_and_handler(virq, NULL, NULL); 798 irq_set_chip_and_handler(virq, NULL, NULL);
855 799
856 /* Make sure it's completed */ 800 /* Make sure it's completed */
857 synchronize_irq(virq); 801 synchronize_irq(virq);
@@ -1156,7 +1100,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
1156 seq_printf(m, "%5d ", i); 1100 seq_printf(m, "%5d ", i);
1157 seq_printf(m, "0x%05lx ", virq_to_hw(i)); 1101 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1158 1102
1159 chip = get_irq_desc_chip(desc); 1103 chip = irq_desc_get_chip(desc);
1160 if (chip && chip->name) 1104 if (chip && chip->name)
1161 p = chip->name; 1105 p = chip->name;
1162 else 1106 else
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 2a2f3c3f6d80..97ec8557f974 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
151 /**** Might be a good idea to set L2DO here - to prevent instructions 151 /**** Might be a good idea to set L2DO here - to prevent instructions
152 from getting into the cache. But since we invalidate 152 from getting into the cache. But since we invalidate
153 the next time we enable the cache it doesn't really matter. 153 the next time we enable the cache it doesn't really matter.
154 Don't do this unless you accomodate all processor variations. 154 Don't do this unless you accommodate all processor variations.
155 The bit moved on the 7450..... 155 The bit moved on the 7450.....
156 ****/ 156 ****/
157 157
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c834757bebc0..2b97b80d6d7d 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void)
330 if (!parent) 330 if (!parent)
331 continue; 331 continue;
332 if (of_match_node(legacy_serial_parents, parent) != NULL) { 332 if (of_match_node(legacy_serial_parents, parent) != NULL) {
333 index = add_legacy_soc_port(np, np); 333 if (of_device_is_available(np)) {
334 if (index >= 0 && np == stdout) 334 index = add_legacy_soc_port(np, np);
335 legacy_serial_console = index; 335 if (index >= 0 && np == stdout)
336 legacy_serial_console = index;
337 }
336 } 338 }
337 of_node_put(parent); 339 of_node_put(parent);
338 } 340 }
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 16468362ad57..301db65f05a1 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -262,7 +262,7 @@ static void parse_ppp_data(struct seq_file *m)
262 seq_printf(m, "system_active_processors=%d\n", 262 seq_printf(m, "system_active_processors=%d\n",
263 ppp_data.active_system_procs); 263 ppp_data.active_system_procs);
264 264
265 /* pool related entries are apropriate for shared configs */ 265 /* pool related entries are appropriate for shared configs */
266 if (lppaca_of(0).shared_proc) { 266 if (lppaca_of(0).shared_proc) {
267 unsigned long pool_idle_time, pool_procs; 267 unsigned long pool_idle_time, pool_procs;
268 268
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index bd1e1ff17b2d..7ee50f0547cb 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -31,17 +31,17 @@ void machine_kexec_mask_interrupts(void) {
31 if (!desc) 31 if (!desc)
32 continue; 32 continue;
33 33
34 chip = get_irq_desc_chip(desc); 34 chip = irq_desc_get_chip(desc);
35 if (!chip) 35 if (!chip)
36 continue; 36 continue;
37 37
38 if (chip->irq_eoi && desc->status & IRQ_INPROGRESS) 38 if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
39 chip->irq_eoi(&desc->irq_data); 39 chip->irq_eoi(&desc->irq_data);
40 40
41 if (chip->irq_mask) 41 if (chip->irq_mask)
42 chip->irq_mask(&desc->irq_data); 42 chip->irq_mask(&desc->irq_data);
43 43
44 if (chip->irq_disable && !(desc->status & IRQ_DISABLED)) 44 if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
45 chip->irq_disable(&desc->irq_data); 45 chip->irq_disable(&desc->irq_data);
46 } 46 }
47} 47}
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index f4adf89d7614..10f0aadee95b 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -203,7 +203,7 @@ void __init free_unused_pacas(void)
203{ 203{
204 int new_size; 204 int new_size;
205 205
206 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); 206 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
207 207
208 if (new_size >= paca_size) 208 if (new_size >= paca_size)
209 return; 209 return;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 3cd85faa8ac6..893af2a9cd03 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -261,7 +261,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
261 261
262 virq = irq_create_mapping(NULL, line); 262 virq = irq_create_mapping(NULL, line);
263 if (virq != NO_IRQ) 263 if (virq != NO_IRQ)
264 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 264 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
265 } else { 265 } else {
266 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 266 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
267 oirq.size, oirq.specifier[0], oirq.specifier[1], 267 oirq.size, oirq.specifier[0], oirq.specifier[1],
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 29852688ceaa..d225d99fe39d 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -176,11 +176,14 @@ static void *is_devfn_node(struct device_node *dn, void *data)
176 */ 176 */
177struct device_node *fetch_dev_dn(struct pci_dev *dev) 177struct device_node *fetch_dev_dn(struct pci_dev *dev)
178{ 178{
179 struct device_node *orig_dn = dev->dev.of_node; 179 struct pci_controller *phb = dev->sysdata;
180 struct device_node *dn; 180 struct device_node *dn;
181 unsigned long searchval = (dev->bus->number << 8) | dev->devfn; 181 unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
182 182
183 dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval); 183 if (WARN_ON(!phb))
184 return NULL;
185
186 dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval);
184 if (dn) 187 if (dn)
185 dev->dev.of_node = dn; 188 dev->dev.of_node = dn;
186 return dn; 189 return dn;
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 97e0ae414940..822f63008ae1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
398 return 0; 398 return 0;
399} 399}
400 400
401static u64 check_and_compute_delta(u64 prev, u64 val)
402{
403 u64 delta = (val - prev) & 0xfffffffful;
404
405 /*
406 * POWER7 can roll back counter values, if the new value is smaller
407 * than the previous value it will cause the delta and the counter to
408 * have bogus values unless we rolled a counter over. If a coutner is
409 * rolled back, it will be smaller, but within 256, which is the maximum
410 * number of events to rollback at once. If we dectect a rollback
411 * return 0. This can lead to a small lack of precision in the
412 * counters.
413 */
414 if (prev > val && (prev - val) < 256)
415 delta = 0;
416
417 return delta;
418}
419
401static void power_pmu_read(struct perf_event *event) 420static void power_pmu_read(struct perf_event *event)
402{ 421{
403 s64 val, delta, prev; 422 s64 val, delta, prev;
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
416 prev = local64_read(&event->hw.prev_count); 435 prev = local64_read(&event->hw.prev_count);
417 barrier(); 436 barrier();
418 val = read_pmc(event->hw.idx); 437 val = read_pmc(event->hw.idx);
438 delta = check_and_compute_delta(prev, val);
439 if (!delta)
440 return;
419 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 441 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
420 442
421 /* The counters are only 32 bits wide */
422 delta = (val - prev) & 0xfffffffful;
423 local64_add(delta, &event->count); 443 local64_add(delta, &event->count);
424 local64_sub(delta, &event->hw.period_left); 444 local64_sub(delta, &event->hw.period_left);
425} 445}
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
449 val = (event->hw.idx == 5) ? pmc5 : pmc6; 469 val = (event->hw.idx == 5) ? pmc5 : pmc6;
450 prev = local64_read(&event->hw.prev_count); 470 prev = local64_read(&event->hw.prev_count);
451 event->hw.idx = 0; 471 event->hw.idx = 0;
452 delta = (val - prev) & 0xfffffffful; 472 delta = check_and_compute_delta(prev, val);
453 local64_add(delta, &event->count); 473 if (delta)
474 local64_add(delta, &event->count);
454 } 475 }
455} 476}
456 477
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
458 unsigned long pmc5, unsigned long pmc6) 479 unsigned long pmc5, unsigned long pmc6)
459{ 480{
460 struct perf_event *event; 481 struct perf_event *event;
461 u64 val; 482 u64 val, prev;
462 int i; 483 int i;
463 484
464 for (i = 0; i < cpuhw->n_limited; ++i) { 485 for (i = 0; i < cpuhw->n_limited; ++i) {
465 event = cpuhw->limited_counter[i]; 486 event = cpuhw->limited_counter[i];
466 event->hw.idx = cpuhw->limited_hwidx[i]; 487 event->hw.idx = cpuhw->limited_hwidx[i];
467 val = (event->hw.idx == 5) ? pmc5 : pmc6; 488 val = (event->hw.idx == 5) ? pmc5 : pmc6;
468 local64_set(&event->hw.prev_count, val); 489 prev = local64_read(&event->hw.prev_count);
490 if (check_and_compute_delta(prev, val))
491 local64_set(&event->hw.prev_count, val);
469 perf_event_update_userpage(event); 492 perf_event_update_userpage(event);
470 } 493 }
471} 494}
@@ -759,7 +782,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
759 782
760 /* 783 /*
761 * If group events scheduling transaction was started, 784 * If group events scheduling transaction was started,
762 * skip the schedulability test here, it will be peformed 785 * skip the schedulability test here, it will be performed
763 * at commit time(->commit_txn) as a whole 786 * at commit time(->commit_txn) as a whole
764 */ 787 */
765 if (cpuhw->group_flag & PERF_EVENT_TXN) 788 if (cpuhw->group_flag & PERF_EVENT_TXN)
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1197 1220
1198 /* we don't have to worry about interrupts here */ 1221 /* we don't have to worry about interrupts here */
1199 prev = local64_read(&event->hw.prev_count); 1222 prev = local64_read(&event->hw.prev_count);
1200 delta = (val - prev) & 0xfffffffful; 1223 delta = check_and_compute_delta(prev, val);
1201 local64_add(delta, &event->count); 1224 local64_add(delta, &event->count);
1202 1225
1203 /* 1226 /*
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index e83ba3f078e4..1b1787d52896 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -15,7 +15,7 @@
15 15
16/* 16/*
17 * Grab the register values as they are now. 17 * Grab the register values as they are now.
18 * This won't do a particularily good job because we really 18 * This won't do a particularly good job because we really
19 * want our caller's caller's registers, and our caller has 19 * want our caller's caller's registers, and our caller has
20 * already executed its prologue. 20 * already executed its prologue.
21 * ToDo: We could reach back into the caller's save area to do 21 * ToDo: We could reach back into the caller's save area to do
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8303a6c65ef7..f74f355a9617 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1218,11 +1218,11 @@ void __ppc64_runlatch_off(void)
1218 1218
1219static struct kmem_cache *thread_info_cache; 1219static struct kmem_cache *thread_info_cache;
1220 1220
1221struct thread_info *alloc_thread_info(struct task_struct *tsk) 1221struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
1222{ 1222{
1223 struct thread_info *ti; 1223 struct thread_info *ti;
1224 1224
1225 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); 1225 ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
1226 if (unlikely(ti == NULL)) 1226 if (unlikely(ti == NULL))
1227 return NULL; 1227 return NULL;
1228#ifdef CONFIG_DEBUG_STACK_USAGE 1228#ifdef CONFIG_DEBUG_STACK_USAGE
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 05b7139d6a27..e74fa12afc82 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -683,7 +683,7 @@ void __init early_init_devtree(void *params)
683#endif 683#endif
684 684
685#ifdef CONFIG_PHYP_DUMP 685#ifdef CONFIG_PHYP_DUMP
686 /* scan tree to see if dump occured during last boot */ 686 /* scan tree to see if dump occurred during last boot */
687 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); 687 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
688#endif 688#endif
689 689
@@ -739,7 +739,7 @@ void __init early_init_devtree(void *params)
739 739
740 DBG("Scanning CPUs ...\n"); 740 DBG("Scanning CPUs ...\n");
741 741
742 /* Retreive CPU related informations from the flat tree 742 /* Retrieve CPU related informations from the flat tree
743 * (altivec support, boot CPU ID, ...) 743 * (altivec support, boot CPU ID, ...)
744 */ 744 */
745 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 745 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 906536998291..55613e33e263 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -229,12 +229,16 @@ static int gpr_get(struct task_struct *target, const struct user_regset *regset,
229 unsigned int pos, unsigned int count, 229 unsigned int pos, unsigned int count,
230 void *kbuf, void __user *ubuf) 230 void *kbuf, void __user *ubuf)
231{ 231{
232 int ret; 232 int i, ret;
233 233
234 if (target->thread.regs == NULL) 234 if (target->thread.regs == NULL)
235 return -EIO; 235 return -EIO;
236 236
237 CHECK_FULL_REGS(target->thread.regs); 237 if (!FULL_REGS(target->thread.regs)) {
238 /* We have a partial register set. Fill 14-31 with bogus values */
239 for (i = 14; i < 32; i++)
240 target->thread.regs->gpr[i] = NV_REG_POISON;
241 }
238 242
239 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 243 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
240 target->thread.regs, 244 target->thread.regs,
@@ -459,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
459#ifdef CONFIG_VSX 463#ifdef CONFIG_VSX
460/* 464/*
461 * Currently to set and and get all the vsx state, you need to call 465 * Currently to set and and get all the vsx state, you need to call
462 * the fp and VMX calls aswell. This only get/sets the lower 32 466 * the fp and VMX calls as well. This only get/sets the lower 32
463 * 128bit VSX registers. 467 * 128bit VSX registers.
464 */ 468 */
465 469
@@ -641,11 +645,16 @@ static int gpr32_get(struct task_struct *target,
641 compat_ulong_t *k = kbuf; 645 compat_ulong_t *k = kbuf;
642 compat_ulong_t __user *u = ubuf; 646 compat_ulong_t __user *u = ubuf;
643 compat_ulong_t reg; 647 compat_ulong_t reg;
648 int i;
644 649
645 if (target->thread.regs == NULL) 650 if (target->thread.regs == NULL)
646 return -EIO; 651 return -EIO;
647 652
648 CHECK_FULL_REGS(target->thread.regs); 653 if (!FULL_REGS(target->thread.regs)) {
654 /* We have a partial register set. Fill 14-31 with bogus values */
655 for (i = 14; i < 32; i++)
656 target->thread.regs->gpr[i] = NV_REG_POISON;
657 }
649 658
650 pos /= sizeof(reg); 659 pos /= sizeof(reg);
651 count /= sizeof(reg); 660 count /= sizeof(reg);
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 7980ec0e1e1a..67f6c3b51357 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -465,7 +465,7 @@ static void start_event_scan(void)
465 pr_debug("rtasd: will sleep for %d milliseconds\n", 465 pr_debug("rtasd: will sleep for %d milliseconds\n",
466 (30000 / rtas_event_scan_rate)); 466 (30000 / rtas_event_scan_rate));
467 467
468 /* Retreive errors from nvram if any */ 468 /* Retrieve errors from nvram if any */
469 retreive_nvram_error_log(); 469 retreive_nvram_error_log();
470 470
471 schedule_delayed_work_on(cpumask_first(cpu_online_mask), 471 schedule_delayed_work_on(cpumask_first(cpu_online_mask),
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9d4882a46647..21f30cb68077 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void)
509 */ 509 */
510 cpu_init_thread_core_maps(nthreads); 510 cpu_init_thread_core_maps(nthreads);
511 511
512 /* Now that possible cpus are set, set nr_cpu_ids for later use */
513 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
514
512 free_unused_pacas(); 515 free_unused_pacas();
513} 516}
514#endif /* CONFIG_SMP */ 517#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 981360509172..cbdbb14be4b0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,6 +57,25 @@
57#define DBG(fmt...) 57#define DBG(fmt...)
58#endif 58#endif
59 59
60
61/* Store all idle threads, this can be reused instead of creating
62* a new thread. Also avoids complicated thread destroy functionality
63* for idle threads.
64*/
65#ifdef CONFIG_HOTPLUG_CPU
66/*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73#else
74static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75#define get_idle_for_cpu(x) (idle_thread_array[(x)])
76#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
77#endif
78
60struct thread_info *secondary_ti; 79struct thread_info *secondary_ti;
61 80
62DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 81DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
@@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id)
238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 257 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239} 258}
240 259
241static void __init smp_create_idle(unsigned int cpu)
242{
243 struct task_struct *p;
244
245 /* create a process for the processor */
246 p = fork_idle(cpu);
247 if (IS_ERR(p))
248 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
249#ifdef CONFIG_PPC64
250 paca[cpu].__current = p;
251 paca[cpu].kstack = (unsigned long) task_thread_info(p)
252 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
253#endif
254 current_set[cpu] = task_thread_info(p);
255 task_thread_info(p)->cpu = cpu;
256}
257
258void __init smp_prepare_cpus(unsigned int max_cpus) 260void __init smp_prepare_cpus(unsigned int max_cpus)
259{ 261{
260 unsigned int cpu; 262 unsigned int cpu;
@@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
288 max_cpus = NR_CPUS; 290 max_cpus = NR_CPUS;
289 else 291 else
290 max_cpus = 1; 292 max_cpus = 1;
291
292 for_each_possible_cpu(cpu)
293 if (cpu != boot_cpuid)
294 smp_create_idle(cpu);
295} 293}
296 294
297void __devinit smp_prepare_boot_cpu(void) 295void __devinit smp_prepare_boot_cpu(void)
@@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void)
305 303
306#ifdef CONFIG_HOTPLUG_CPU 304#ifdef CONFIG_HOTPLUG_CPU
307/* State of each CPU during hotplug phases */ 305/* State of each CPU during hotplug phases */
308DEFINE_PER_CPU(int, cpu_state) = { 0 }; 306static DEFINE_PER_CPU(int, cpu_state) = { 0 };
309 307
310int generic_cpu_disable(void) 308int generic_cpu_disable(void)
311{ 309{
@@ -317,30 +315,8 @@ int generic_cpu_disable(void)
317 set_cpu_online(cpu, false); 315 set_cpu_online(cpu, false);
318#ifdef CONFIG_PPC64 316#ifdef CONFIG_PPC64
319 vdso_data->processorCount--; 317 vdso_data->processorCount--;
320 fixup_irqs(cpu_online_mask);
321#endif
322 return 0;
323}
324
325int generic_cpu_enable(unsigned int cpu)
326{
327 /* Do the normal bootup if we haven't
328 * already bootstrapped. */
329 if (system_state != SYSTEM_RUNNING)
330 return -ENOSYS;
331
332 /* get the target out of it's holding state */
333 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
334 smp_wmb();
335
336 while (!cpu_online(cpu))
337 cpu_relax();
338
339#ifdef CONFIG_PPC64
340 fixup_irqs(cpu_online_mask);
341 /* counter the irq disable in fixup_irqs */
342 local_irq_enable();
343#endif 318#endif
319 migrate_irqs();
344 return 0; 320 return 0;
345} 321}
346 322
@@ -362,37 +338,89 @@ void generic_mach_cpu_die(void)
362 unsigned int cpu; 338 unsigned int cpu;
363 339
364 local_irq_disable(); 340 local_irq_disable();
341 idle_task_exit();
365 cpu = smp_processor_id(); 342 cpu = smp_processor_id();
366 printk(KERN_DEBUG "CPU%d offline\n", cpu); 343 printk(KERN_DEBUG "CPU%d offline\n", cpu);
367 __get_cpu_var(cpu_state) = CPU_DEAD; 344 __get_cpu_var(cpu_state) = CPU_DEAD;
368 smp_wmb(); 345 smp_wmb();
369 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 346 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
370 cpu_relax(); 347 cpu_relax();
371 set_cpu_online(cpu, true); 348}
372 local_irq_enable(); 349
350void generic_set_cpu_dead(unsigned int cpu)
351{
352 per_cpu(cpu_state, cpu) = CPU_DEAD;
373} 353}
374#endif 354#endif
375 355
376static int __devinit cpu_enable(unsigned int cpu) 356struct create_idle {
357 struct work_struct work;
358 struct task_struct *idle;
359 struct completion done;
360 int cpu;
361};
362
363static void __cpuinit do_fork_idle(struct work_struct *work)
377{ 364{
378 if (smp_ops && smp_ops->cpu_enable) 365 struct create_idle *c_idle =
379 return smp_ops->cpu_enable(cpu); 366 container_of(work, struct create_idle, work);
367
368 c_idle->idle = fork_idle(c_idle->cpu);
369 complete(&c_idle->done);
370}
371
372static int __cpuinit create_idle(unsigned int cpu)
373{
374 struct thread_info *ti;
375 struct create_idle c_idle = {
376 .cpu = cpu,
377 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
378 };
379 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
380
381 c_idle.idle = get_idle_for_cpu(cpu);
382
383 /* We can't use kernel_thread since we must avoid to
384 * reschedule the child. We use a workqueue because
385 * we want to fork from a kernel thread, not whatever
386 * userspace process happens to be trying to online us.
387 */
388 if (!c_idle.idle) {
389 schedule_work(&c_idle.work);
390 wait_for_completion(&c_idle.done);
391 } else
392 init_idle(c_idle.idle, cpu);
393 if (IS_ERR(c_idle.idle)) {
394 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
395 return PTR_ERR(c_idle.idle);
396 }
397 ti = task_thread_info(c_idle.idle);
398
399#ifdef CONFIG_PPC64
400 paca[cpu].__current = c_idle.idle;
401 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
402#endif
403 ti->cpu = cpu;
404 current_set[cpu] = ti;
380 405
381 return -ENOSYS; 406 return 0;
382} 407}
383 408
384int __cpuinit __cpu_up(unsigned int cpu) 409int __cpuinit __cpu_up(unsigned int cpu)
385{ 410{
386 int c; 411 int rc, c;
387 412
388 secondary_ti = current_set[cpu]; 413 secondary_ti = current_set[cpu];
389 if (!cpu_enable(cpu))
390 return 0;
391 414
392 if (smp_ops == NULL || 415 if (smp_ops == NULL ||
393 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
394 return -EINVAL; 417 return -EINVAL;
395 418
419 /* Make sure we have an idle thread */
420 rc = create_idle(cpu);
421 if (rc)
422 return rc;
423
396 /* Make sure callin-map entry is 0 (can be leftover a CPU 424 /* Make sure callin-map entry is 0 (can be leftover a CPU
397 * hotplug 425 * hotplug
398 */ 426 */
@@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
502} 530}
503 531
504/* Activate a secondary processor. */ 532/* Activate a secondary processor. */
505int __devinit start_secondary(void *unused) 533void __devinit start_secondary(void *unused)
506{ 534{
507 unsigned int cpu = smp_processor_id(); 535 unsigned int cpu = smp_processor_id();
508 struct device_node *l2_cache; 536 struct device_node *l2_cache;
@@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused)
523 551
524 secondary_cpu_time_init(); 552 secondary_cpu_time_init();
525 553
554#ifdef CONFIG_PPC64
555 if (system_state == SYSTEM_RUNNING)
556 vdso_data->processorCount++;
557#endif
526 ipi_call_lock(); 558 ipi_call_lock();
527 notify_cpu_starting(cpu); 559 notify_cpu_starting(cpu);
528 set_cpu_online(cpu, true); 560 set_cpu_online(cpu, true);
@@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused)
558 local_irq_enable(); 590 local_irq_enable();
559 591
560 cpu_idle(); 592 cpu_idle();
561 return 0; 593
594 BUG();
562} 595}
563 596
564int setup_profiling_timer(unsigned int multiplier) 597int setup_profiling_timer(unsigned int multiplier)
@@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
585 618
586 free_cpumask_var(old_mask); 619 free_cpumask_var(old_mask);
587 620
621 if (smp_ops && smp_ops->bringup_done)
622 smp_ops->bringup_done();
623
588 dump_numa_cpu_topology(); 624 dump_numa_cpu_topology();
625
589} 626}
590 627
591int arch_sd_sibling_asym_packing(void) 628int arch_sd_sibling_asym_packing(void)
@@ -660,5 +697,9 @@ void cpu_die(void)
660{ 697{
661 if (ppc_md.cpu_die) 698 if (ppc_md.cpu_die)
662 ppc_md.cpu_die(); 699 ppc_md.cpu_die();
700
701 /* If we return, we re-enter start_secondary */
702 start_secondary_resume();
663} 703}
704
664#endif 705#endif
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index b0754e237438..ba4dee3d233f 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
143 143
144 /* Disable MSR:DR to make sure we don't take a TLB or 144 /* Disable MSR:DR to make sure we don't take a TLB or
145 * hash miss during the copy, as our hash table will 145 * hash miss during the copy, as our hash table will
146 * for a while be unuseable. For .text, we assume we are 146 * for a while be unusable. For .text, we assume we are
147 * covered by a BAT. This works only for non-G5 at this 147 * covered by a BAT. This works only for non-G5 at this
148 * point. G5 will need a better approach, possibly using 148 * point. G5 will need a better approach, possibly using
149 * a small temporary hash table filled with large mappings, 149 * a small temporary hash table filled with large mappings,
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 09d31dbf43f9..f33acfd872ad 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
229 u64 stolen = 0; 229 u64 stolen = 0;
230 u64 dtb; 230 u64 dtb;
231 231
232 if (!dtl)
233 return 0;
234
232 if (i == vpa->dtl_idx) 235 if (i == vpa->dtl_idx)
233 return 0; 236 return 0;
234 while (i < vpa->dtl_idx) { 237 while (i < vpa->dtl_idx) {
@@ -356,7 +359,7 @@ void account_system_vtime(struct task_struct *tsk)
356 } 359 }
357 get_paca()->user_time_scaled += user_scaled; 360 get_paca()->user_time_scaled += user_scaled;
358 361
359 if (in_irq() || idle_task(smp_processor_id()) != tsk) { 362 if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
360 account_system_time(tsk, 0, delta, sys_scaled); 363 account_system_time(tsk, 0, delta, sys_scaled);
361 if (stolen) 364 if (stolen)
362 account_steal_time(stolen); 365 account_steal_time(stolen);
@@ -577,14 +580,21 @@ void timer_interrupt(struct pt_regs * regs)
577 struct clock_event_device *evt = &decrementer->event; 580 struct clock_event_device *evt = &decrementer->event;
578 u64 now; 581 u64 now;
579 582
583 /* Ensure a positive value is written to the decrementer, or else
584 * some CPUs will continue to take decrementer exceptions.
585 */
586 set_dec(DECREMENTER_MAX);
587
588 /* Some implementations of hotplug will get timer interrupts while
589 * offline, just ignore these
590 */
591 if (!cpu_online(smp_processor_id()))
592 return;
593
580 trace_timer_interrupt_entry(regs); 594 trace_timer_interrupt_entry(regs);
581 595
582 __get_cpu_var(irq_stat).timer_irqs++; 596 __get_cpu_var(irq_stat).timer_irqs++;
583 597
584 /* Ensure a positive value is written to the decrementer, or else
585 * some CPUs will continuue to take decrementer exceptions */
586 set_dec(DECREMENTER_MAX);
587
588#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 598#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
589 if (atomic_read(&ppc_n_lost_interrupts) != 0) 599 if (atomic_read(&ppc_n_lost_interrupts) != 0)
590 do_IRQ(regs); 600 do_IRQ(regs);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bd74fac169be..5ddb801bc154 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -959,7 +959,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
959 * ESR_DST (!?) or 0. In the process of chasing this with the 959 * ESR_DST (!?) or 0. In the process of chasing this with the
960 * hardware people - not sure if it can happen on any illegal 960 * hardware people - not sure if it can happen on any illegal
961 * instruction or only on FP instructions, whether there is a 961 * instruction or only on FP instructions, whether there is a
962 * pattern to occurences etc. -dgibson 31/Mar/2003 */ 962 * pattern to occurrences etc. -dgibson 31/Mar/2003 */
963 switch (do_mathemu(regs)) { 963 switch (do_mathemu(regs)) {
964 case 0: 964 case 0:
965 emulate_single_step(regs); 965 emulate_single_step(regs);
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index b4b167b33643..baa33a7517bc 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * udbg for NS16550 compatable serial ports 2 * udbg for NS16550 compatible serial ports
3 * 3 *
4 * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp 4 * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
5 * 5 *
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index fd8728729abc..142ab1008c3b 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -820,17 +820,17 @@ static int __init vdso_init(void)
820} 820}
821arch_initcall(vdso_init); 821arch_initcall(vdso_init);
822 822
823int in_gate_area_no_task(unsigned long addr) 823int in_gate_area_no_mm(unsigned long addr)
824{ 824{
825 return 0; 825 return 0;
826} 826}
827 827
828int in_gate_area(struct task_struct *task, unsigned long addr) 828int in_gate_area(struct mm_struct *mm, unsigned long addr)
829{ 829{
830 return 0; 830 return 0;
831} 831}
832 832
833struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 833struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
834{ 834{
835 return NULL; 835 return NULL;
836} 836}
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S
index 68d49dd71dcc..cf0c9c9c24f9 100644
--- a/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/arch/powerpc/kernel/vdso32/sigtramp.S
@@ -19,7 +19,7 @@
19 19
20/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from 20/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
21 the return address to get an address in the middle of the presumed 21 the return address to get an address in the middle of the presumed
22 call instruction. Since we don't have a call here, we artifically 22 call instruction. Since we don't have a call here, we artificially
23 extend the range covered by the unwind info by adding a nop before 23 extend the range covered by the unwind info by adding a nop before
24 the real start. */ 24 the real start. */
25 nop 25 nop
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index 59eb59bb4082..45ea281e9a21 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -20,7 +20,7 @@
20 20
21/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from 21/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
22 the return address to get an address in the middle of the presumed 22 the return address to get an address in the middle of the presumed
23 call instruction. Since we don't have a call here, we artifically 23 call instruction. Since we don't have a call here, we artificially
24 extend the range covered by the unwind info by padding before the 24 extend the range covered by the unwind info by padding before the
25 real start. */ 25 real start. */
26 nop 26 nop