aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-14 10:43:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-14 10:43:11 -0400
commita2c7a54fcc36dc3d0d461e883d9535f8f52f5a3f (patch)
tree3bc7e727e6ebe67d7099b01a1b3cc159840912ca /arch/powerpc
parent674825d05001e218afe5a04438683e1e597e14fb (diff)
parente34166ad63eac4d0fa98b4c4ed7a98202a18faef (diff)
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Benjamin Herrenschmidt: "This is mostly bug fixes (some of them regressions, some of them I deemed worth merging now) along with some patches from Li Zhong hooking up the new context tracking stuff (for the new full NO_HZ)" * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (25 commits) powerpc: Set show_unhandled_signals to 1 by default powerpc/perf: Fix setting of "to" addresses for BHRB powerpc/pmu: Fix order of interpreting BHRB target entries powerpc/perf: Move BHRB code into CONFIG_PPC64 region powerpc: select HAVE_CONTEXT_TRACKING for pSeries powerpc: Use the new schedule_user API on userspace preemption powerpc: Exit user context on notify resume powerpc: Exception hooks for context tracking subsystem powerpc: Syscall hooks for context tracking subsystem powerpc/booke64: Fix kernel hangs at kernel_dbg_exc powerpc: Fix irq_set_affinity() return values powerpc: Provide __bswapdi2 powerpc/powernv: Fix starting of secondary CPUs on OPALv2 and v3 powerpc/powernv: Detect OPAL v3 API version powerpc: Fix MAX_STACK_TRACE_ENTRIES too low warning again powerpc: Make CONFIG_RTAS_PROC depend on CONFIG_PROC_FS powerpc: Bring all threads online prior to migration/hibernation powerpc/rtas_flash: Fix validate_flash buffer overflow issue powerpc/kexec: Fix kexec when using VMX optimised memcpy powerpc: Fix build errors STRICT_MM_TYPECHECKS ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig.debug23
-rw-r--r--arch/powerpc/include/asm/context_tracking.h10
-rw-r--r--arch/powerpc/include/asm/firmware.h4
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/opal.h5
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h2
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h2
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h7
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S8
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c4
-rw-r--r--arch/powerpc/kernel/misc_32.S11
-rw-r--r--arch/powerpc/kernel/misc_64.S11
-rw-r--r--arch/powerpc/kernel/pci-common.c5
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/ptrace.c5
-rw-r--r--arch/powerpc/kernel/rtas.c113
-rw-r--r--arch/powerpc/kernel/rtas_flash.c10
-rw-r--r--arch/powerpc/kernel/signal.c7
-rw-r--r--arch/powerpc/kernel/traps.c80
-rw-r--r--arch/powerpc/kernel/udbg.c3
-rw-r--r--arch/powerpc/mm/fault.c41
-rw-r--r--arch/powerpc/mm/hash_utils_64.c36
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c280
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c30
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c9
-rw-r--r--arch/powerpc/platforms/powernv/pci.c12
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c16
-rw-r--r--arch/powerpc/platforms/powernv/smp.c62
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c22
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c105
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c2
45 files changed, 763 insertions, 206 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 5416e28a7538..863d877e0b5f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI
262 Select this to enable early debugging for the PowerNV platform 262 Select this to enable early debugging for the PowerNV platform
263 using an "hvsi" console 263 using an "hvsi" console
264 264
265config PPC_EARLY_DEBUG_MEMCONS
266 bool "In memory console"
267 help
268 Select this to enable early debugging using an in memory console.
269 This console provides input and output buffers stored within the
270 kernel BSS and should be safe to select on any system. A debugger
271 can then be used to read kernel output or send input to the console.
265endchoice 272endchoice
266 273
274config PPC_MEMCONS_OUTPUT_SIZE
275 int "In memory console output buffer size"
276 depends on PPC_EARLY_DEBUG_MEMCONS
277 default 4096
278 help
279 Selects the size of the output buffer (in bytes) of the in memory
280 console.
281
282config PPC_MEMCONS_INPUT_SIZE
283 int "In memory console input buffer size"
284 depends on PPC_EARLY_DEBUG_MEMCONS
285 default 128
286 help
287 Selects the size of the input buffer (in bytes) of the in memory
288 console.
289
267config PPC_EARLY_DEBUG_OPAL 290config PPC_EARLY_DEBUG_OPAL
268 def_bool y 291 def_bool y
269 depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI 292 depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644
index 000000000000..b6f5a33b8ee2
--- /dev/null
+++ b/arch/powerpc/include/asm/context_tracking.h
@@ -0,0 +1,10 @@
1#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
2#define _ASM_POWERPC_CONTEXT_TRACKING_H
3
4#ifdef CONFIG_CONTEXT_TRACKING
5#define SCHEDULE_USER bl .schedule_user
6#else
7#define SCHEDULE_USER bl .schedule
8#endif
9
10#endif
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 0df54646f968..681bc0314b6b 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -52,6 +52,7 @@
52#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) 52#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
53#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) 53#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
54#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) 54#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
55#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
55 56
56#ifndef __ASSEMBLY__ 57#ifndef __ASSEMBLY__
57 58
@@ -69,7 +70,8 @@ enum {
69 FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | 70 FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
70 FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, 71 FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
71 FW_FEATURE_PSERIES_ALWAYS = 0, 72 FW_FEATURE_PSERIES_ALWAYS = 0,
72 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, 73 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
74 FW_FEATURE_OPALv3,
73 FW_FEATURE_POWERNV_ALWAYS = 0, 75 FW_FEATURE_POWERNV_ALWAYS = 0,
74 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 76 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
75 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 77 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index d615b28dda82..ba713f166fa5 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void)
96#endif 96#endif
97 97
98#define hard_irq_disable() do { \ 98#define hard_irq_disable() do { \
99 u8 _was_enabled = get_paca()->soft_enabled; \
99 __hard_irq_disable(); \ 100 __hard_irq_disable(); \
100 if (local_paca->soft_enabled) \
101 trace_hardirqs_off(); \
102 get_paca()->soft_enabled = 0; \ 101 get_paca()->soft_enabled = 0; \
103 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ 102 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
103 if (_was_enabled) \
104 trace_hardirqs_off(); \
104} while(0) 105} while(0)
105 106
106static inline bool lazy_irq_pending(void) 107static inline bool lazy_irq_pending(void)
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index b6c8b58b1d76..cbb9305ab15a 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType {
243 243
244enum OpalThreadStatus { 244enum OpalThreadStatus {
245 OPAL_THREAD_INACTIVE = 0x0, 245 OPAL_THREAD_INACTIVE = 0x0,
246 OPAL_THREAD_STARTED = 0x1 246 OPAL_THREAD_STARTED = 0x1,
247 OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
247}; 248};
248 249
249enum OpalPciBusCompare { 250enum OpalPciBusCompare {
@@ -563,6 +564,8 @@ extern void opal_nvram_init(void);
563 564
564extern int opal_machine_check(struct pt_regs *regs); 565extern int opal_machine_check(struct pt_regs *regs);
565 566
567extern void opal_shutdown(void);
568
566#endif /* __ASSEMBLY__ */ 569#endif /* __ASSEMBLY__ */
567 570
568#endif /* __OPAL_H */ 571#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 91acb12bac92..b66ae722a8e9 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
186 186
187static inline pgtable_t pmd_pgtable(pmd_t pmd) 187static inline pgtable_t pmd_pgtable(pmd_t pmd)
188{ 188{
189 return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); 189 return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
190} 190}
191 191
192static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 192static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 3e13e23e4fdf..d836d945068d 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,7 @@
47 * generic accessors and iterators here 47 * generic accessors and iterators here
48 */ 48 */
49#define __real_pte(e,p) ((real_pte_t) { \ 49#define __real_pte(e,p) ((real_pte_t) { \
50 (e), ((e) & _PAGE_COMBO) ? \ 50 (e), (pte_val(e) & _PAGE_COMBO) ? \
51 (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) 51 (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
52#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ 52#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
53 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) 53 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index a8bc2bb4adc9..34fd70488d83 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex);
264extern void rtas_initialize(void); 264extern void rtas_initialize(void);
265extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); 265extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
266extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); 266extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
267extern int rtas_online_cpus_mask(cpumask_var_t cpus);
268extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
267extern int rtas_ibm_suspend_me(struct rtas_args *); 269extern int rtas_ibm_suspend_me(struct rtas_args *);
268 270
269struct rtc_time; 271struct rtc_time;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 8ceea14d6fe4..ba7b1973866e 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
97#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ 97#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
98#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 98#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
99#define TIF_SINGLESTEP 8 /* singlestepping active */ 99#define TIF_SINGLESTEP 8 /* singlestepping active */
100#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ 100#define TIF_NOHZ 9 /* in adaptive nohz mode */
101#define TIF_SECCOMP 10 /* secure computing */ 101#define TIF_SECCOMP 10 /* secure computing */
102#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 102#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
103#define TIF_NOERROR 12 /* Force successful syscall return */ 103#define TIF_NOERROR 12 /* Force successful syscall return */
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ 106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation 107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
108 for stack store? */ 108 for stack store? */
109#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
109 110
110/* as above, but as bit values */ 111/* as above, but as bit values */
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void)
124#define _TIF_UPROBE (1<<TIF_UPROBE) 125#define _TIF_UPROBE (1<<TIF_UPROBE)
125#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 126#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
126#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) 127#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
128#define _TIF_NOHZ (1<<TIF_NOHZ)
127#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 129#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
128 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) 130 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
131 _TIF_NOHZ)
129 132
130#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 133#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
131 _TIF_NOTIFY_RESUME | _TIF_UPROBE) 134 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 5a7510e9d09d..dc590919f8eb 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void);
52extern void __init udbg_init_cpm(void); 52extern void __init udbg_init_cpm(void);
53extern void __init udbg_init_usbgecko(void); 53extern void __init udbg_init_usbgecko(void);
54extern void __init udbg_init_wsp(void); 54extern void __init udbg_init_wsp(void);
55extern void __init udbg_init_memcons(void);
55extern void __init udbg_init_ehv_bc(void); 56extern void __init udbg_init_ehv_bc(void);
56extern void __init udbg_init_ps3gelic(void); 57extern void __init udbg_init_ps3gelic(void);
57extern void __init udbg_init_debug_opal_raw(void); 58extern void __init udbg_init_debug_opal_raw(void);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index e514de57a125..d22e73e4618b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -439,8 +439,6 @@ ret_from_fork:
439ret_from_kernel_thread: 439ret_from_kernel_thread:
440 REST_NVGPRS(r1) 440 REST_NVGPRS(r1)
441 bl schedule_tail 441 bl schedule_tail
442 li r3,0
443 stw r3,0(r1)
444 mtlr r14 442 mtlr r14
445 mr r3,r15 443 mr r3,r15
446 PPC440EP_ERR42 444 PPC440EP_ERR42
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 915fbb4fc2fe..51cfb8fc301f 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -33,6 +33,7 @@
33#include <asm/irqflags.h> 33#include <asm/irqflags.h>
34#include <asm/ftrace.h> 34#include <asm/ftrace.h>
35#include <asm/hw_irq.h> 35#include <asm/hw_irq.h>
36#include <asm/context_tracking.h>
36 37
37/* 38/*
38 * System calls. 39 * System calls.
@@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork)
376_GLOBAL(ret_from_kernel_thread) 377_GLOBAL(ret_from_kernel_thread)
377 bl .schedule_tail 378 bl .schedule_tail
378 REST_NVGPRS(r1) 379 REST_NVGPRS(r1)
379 li r3,0
380 std r3,0(r1)
381 ld r14, 0(r14) 380 ld r14, 0(r14)
382 mtlr r14 381 mtlr r14
383 mr r3,r15 382 mr r3,r15
@@ -634,7 +633,7 @@ _GLOBAL(ret_from_except_lite)
634 andi. r0,r4,_TIF_NEED_RESCHED 633 andi. r0,r4,_TIF_NEED_RESCHED
635 beq 1f 634 beq 1f
636 bl .restore_interrupts 635 bl .restore_interrupts
637 bl .schedule 636 SCHEDULE_USER
638 b .ret_from_except_lite 637 b .ret_from_except_lite
639 638
6401: bl .save_nvgprs 6391: bl .save_nvgprs
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 42a756eec9ff..645170a07ada 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
489 */ 489 */
490 490
491 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 491 mfspr r14,SPRN_DBSR /* check single-step/branch taken */
492 andis. r15,r14,DBSR_IC@h 492 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
493 beq+ 1f 493 beq+ 1f
494 494
495 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) 495 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
500 bge+ cr1,1f 500 bge+ cr1,1f
501 501
502 /* here it looks like we got an inappropriate debug exception. */ 502 /* here it looks like we got an inappropriate debug exception. */
503 lis r14,DBSR_IC@h /* clear the IC event */ 503 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
504 rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ 504 rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
505 mtspr SPRN_DBSR,r14 505 mtspr SPRN_DBSR,r14
506 mtspr SPRN_CSRR1,r11 506 mtspr SPRN_CSRR1,r11
@@ -555,7 +555,7 @@ kernel_dbg_exc:
555 */ 555 */
556 556
557 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 557 mfspr r14,SPRN_DBSR /* check single-step/branch taken */
558 andis. r15,r14,DBSR_IC@h 558 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
559 beq+ 1f 559 beq+ 1f
560 560
561 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) 561 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -566,7 +566,7 @@ kernel_dbg_exc:
566 bge+ cr1,1f 566 bge+ cr1,1f
567 567
568 /* here it looks like we got an inappropriate debug exception. */ 568 /* here it looks like we got an inappropriate debug exception. */
569 lis r14,DBSR_IC@h /* clear the IC event */ 569 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
570 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ 570 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
571 mtspr SPRN_DBSR,r14 571 mtspr SPRN_DBSR,r14
572 mtspr SPRN_DSRR1,r11 572 mtspr SPRN_DSRR1,r11
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 466a2908bb63..611acdf30096 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/hardirq.h>
20 21
21#include <asm/page.h> 22#include <asm/page.h>
22#include <asm/current.h> 23#include <asm/current.h>
@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
335 pr_debug("kexec: Starting switchover sequence.\n"); 336 pr_debug("kexec: Starting switchover sequence.\n");
336 337
337 /* switch to a staticly allocated stack. Based on irq stack code. 338 /* switch to a staticly allocated stack. Based on irq stack code.
339 * We setup preempt_count to avoid using VMX in memcpy.
338 * XXX: the task struct will likely be invalid once we do the copy! 340 * XXX: the task struct will likely be invalid once we do the copy!
339 */ 341 */
340 kexec_stack.thread_info.task = current_thread_info()->task; 342 kexec_stack.thread_info.task = current_thread_info()->task;
341 kexec_stack.thread_info.flags = 0; 343 kexec_stack.thread_info.flags = 0;
344 kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
345 kexec_stack.thread_info.cpu = current_thread_info()->cpu;
342 346
343 /* We need a static PACA, too; copy this CPU's PACA over and switch to 347 /* We need a static PACA, too; copy this CPU's PACA over and switch to
344 * it. Also poison per_cpu_offset to catch anyone using non-static 348 * it. Also poison per_cpu_offset to catch anyone using non-static
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 19e096bd0e73..e469f30e6eeb 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2)
657 li r3,2 657 li r3,2
658 blr 658 blr
659 659
660_GLOBAL(__bswapdi2)
661 rotlwi r9,r4,8
662 rotlwi r10,r3,8
663 rlwimi r9,r4,24,0,7
664 rlwimi r10,r3,24,0,7
665 rlwimi r9,r4,24,16,23
666 rlwimi r10,r3,24,16,23
667 mr r3,r9
668 mr r4,r10
669 blr
670
660_GLOBAL(abs) 671_GLOBAL(abs)
661 srawi r4,r3,31 672 srawi r4,r3,31
662 xor r3,r3,r4 673 xor r3,r3,r4
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 5cfa8008693b..6820e45f557b 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache)
234 isync 234 isync
235 blr 235 blr
236 236
237_GLOBAL(__bswapdi2)
238 srdi r8,r3,32
239 rlwinm r7,r3,8,0xffffffff
240 rlwimi r7,r3,24,0,7
241 rlwinm r9,r8,8,0xffffffff
242 rlwimi r7,r3,24,16,23
243 rlwimi r9,r8,24,0,7
244 rlwimi r9,r8,24,16,23
245 sldi r7,r7,32
246 or r3,r7,r9
247 blr
237 248
238#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 249#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
239/* 250/*
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f5c5c90799a7..6053f037ef0a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
359 enum pci_mmap_state mmap_state, 359 enum pci_mmap_state mmap_state,
360 int write_combine) 360 int write_combine)
361{ 361{
362 unsigned long prot = pgprot_val(protection);
363 362
364 /* Write combine is always 0 on non-memory space mappings. On 363 /* Write combine is always 0 on non-memory space mappings. On
365 * memory space, if the user didn't pass 1, we check for a 364 * memory space, if the user didn't pass 1, we check for a
@@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
376 375
377 /* XXX would be nice to have a way to ask for write-through */ 376 /* XXX would be nice to have a way to ask for write-through */
378 if (write_combine) 377 if (write_combine)
379 return pgprot_noncached_wc(prot); 378 return pgprot_noncached_wc(protection);
380 else 379 else
381 return pgprot_noncached(prot); 380 return pgprot_noncached(protection);
382} 381}
383 382
384/* 383/*
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 78b8766fd79e..c29666586998 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3);
143int __ucmpdi2(unsigned long long, unsigned long long); 143int __ucmpdi2(unsigned long long, unsigned long long);
144EXPORT_SYMBOL(__ucmpdi2); 144EXPORT_SYMBOL(__ucmpdi2);
145#endif 145#endif
146 146long long __bswapdi2(long long);
147EXPORT_SYMBOL(__bswapdi2);
147EXPORT_SYMBOL(memcpy); 148EXPORT_SYMBOL(memcpy);
148EXPORT_SYMBOL(memset); 149EXPORT_SYMBOL(memset);
149EXPORT_SYMBOL(memmove); 150EXPORT_SYMBOL(memmove);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ceb4e7b62cf4..a902723fdc69 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
339 339
340static void prime_debug_regs(struct thread_struct *thread) 340static void prime_debug_regs(struct thread_struct *thread)
341{ 341{
342 /*
343 * We could have inherited MSR_DE from userspace, since
344 * it doesn't get cleared on exception entry. Make sure
345 * MSR_DE is clear before we enable any debug events.
346 */
347 mtmsr(mfmsr() & ~MSR_DE);
348
342 mtspr(SPRN_IAC1, thread->iac1); 349 mtspr(SPRN_IAC1, thread->iac1);
343 mtspr(SPRN_IAC2, thread->iac2); 350 mtspr(SPRN_IAC2, thread->iac2);
344#if CONFIG_PPC_ADV_DEBUG_IACS > 2 351#if CONFIG_PPC_ADV_DEBUG_IACS > 2
@@ -971,6 +978,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
971 * do some house keeping and then return from the fork or clone 978 * do some house keeping and then return from the fork or clone
972 * system call, using the stack frame created above. 979 * system call, using the stack frame created above.
973 */ 980 */
981 ((unsigned long *)sp)[0] = 0;
974 sp -= sizeof(struct pt_regs); 982 sp -= sizeof(struct pt_regs);
975 kregs = (struct pt_regs *) sp; 983 kregs = (struct pt_regs *) sp;
976 sp -= STACK_FRAME_OVERHEAD; 984 sp -= STACK_FRAME_OVERHEAD;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 3b14d320e69f..98c2fc198712 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -32,6 +32,7 @@
32#include <trace/syscall.h> 32#include <trace/syscall.h>
33#include <linux/hw_breakpoint.h> 33#include <linux/hw_breakpoint.h>
34#include <linux/perf_event.h> 34#include <linux/perf_event.h>
35#include <linux/context_tracking.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/page.h> 38#include <asm/page.h>
@@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
1788{ 1789{
1789 long ret = 0; 1790 long ret = 0;
1790 1791
1792 user_exit();
1793
1791 secure_computing_strict(regs->gpr[0]); 1794 secure_computing_strict(regs->gpr[0]);
1792 1795
1793 if (test_thread_flag(TIF_SYSCALL_TRACE) && 1796 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
@@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs)
1832 step = test_thread_flag(TIF_SINGLESTEP); 1835 step = test_thread_flag(TIF_SINGLESTEP);
1833 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1836 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1834 tracehook_report_syscall_exit(regs, step); 1837 tracehook_report_syscall_exit(regs, step);
1838
1839 user_enter();
1835} 1840}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1fd6e7b2f390..52add6f3e201 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/capability.h> 20#include <linux/capability.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/cpu.h>
22#include <linux/smp.h> 23#include <linux/smp.h>
23#include <linux/completion.h> 24#include <linux/completion.h>
24#include <linux/cpumask.h> 25#include <linux/cpumask.h>
@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
807 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); 808 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
808} 809}
809 810
811enum rtas_cpu_state {
812 DOWN,
813 UP,
814};
815
816#ifndef CONFIG_SMP
817static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
818 cpumask_var_t cpus)
819{
820 if (!cpumask_empty(cpus)) {
821 cpumask_clear(cpus);
822 return -EINVAL;
823 } else
824 return 0;
825}
826#else
827/* On return cpumask will be altered to indicate CPUs changed.
828 * CPUs with states changed will be set in the mask,
829 * CPUs with status unchanged will be unset in the mask. */
830static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
831 cpumask_var_t cpus)
832{
833 int cpu;
834 int cpuret = 0;
835 int ret = 0;
836
837 if (cpumask_empty(cpus))
838 return 0;
839
840 for_each_cpu(cpu, cpus) {
841 switch (state) {
842 case DOWN:
843 cpuret = cpu_down(cpu);
844 break;
845 case UP:
846 cpuret = cpu_up(cpu);
847 break;
848 }
849 if (cpuret) {
850 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
851 __func__,
852 ((state == UP) ? "up" : "down"),
853 cpu, cpuret);
854 if (!ret)
855 ret = cpuret;
856 if (state == UP) {
857 /* clear bits for unchanged cpus, return */
858 cpumask_shift_right(cpus, cpus, cpu);
859 cpumask_shift_left(cpus, cpus, cpu);
860 break;
861 } else {
862 /* clear bit for unchanged cpu, continue */
863 cpumask_clear_cpu(cpu, cpus);
864 }
865 }
866 }
867
868 return ret;
869}
870#endif
871
872int rtas_online_cpus_mask(cpumask_var_t cpus)
873{
874 int ret;
875
876 ret = rtas_cpu_state_change_mask(UP, cpus);
877
878 if (ret) {
879 cpumask_var_t tmp_mask;
880
881 if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
882 return ret;
883
884 /* Use tmp_mask to preserve cpus mask from first failure */
885 cpumask_copy(tmp_mask, cpus);
886 rtas_offline_cpus_mask(tmp_mask);
887 free_cpumask_var(tmp_mask);
888 }
889
890 return ret;
891}
892EXPORT_SYMBOL(rtas_online_cpus_mask);
893
894int rtas_offline_cpus_mask(cpumask_var_t cpus)
895{
896 return rtas_cpu_state_change_mask(DOWN, cpus);
897}
898EXPORT_SYMBOL(rtas_offline_cpus_mask);
899
810int rtas_ibm_suspend_me(struct rtas_args *args) 900int rtas_ibm_suspend_me(struct rtas_args *args)
811{ 901{
812 long state; 902 long state;
@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
814 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 904 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
815 struct rtas_suspend_me_data data; 905 struct rtas_suspend_me_data data;
816 DECLARE_COMPLETION_ONSTACK(done); 906 DECLARE_COMPLETION_ONSTACK(done);
907 cpumask_var_t offline_mask;
908 int cpuret;
817 909
818 if (!rtas_service_present("ibm,suspend-me")) 910 if (!rtas_service_present("ibm,suspend-me"))
819 return -ENOSYS; 911 return -ENOSYS;
@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
837 return 0; 929 return 0;
838 } 930 }
839 931
932 if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
933 return -ENOMEM;
934
840 atomic_set(&data.working, 0); 935 atomic_set(&data.working, 0);
841 atomic_set(&data.done, 0); 936 atomic_set(&data.done, 0);
842 atomic_set(&data.error, 0); 937 atomic_set(&data.error, 0);
843 data.token = rtas_token("ibm,suspend-me"); 938 data.token = rtas_token("ibm,suspend-me");
844 data.complete = &done; 939 data.complete = &done;
940
941 /* All present CPUs must be online */
942 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
943 cpuret = rtas_online_cpus_mask(offline_mask);
944 if (cpuret) {
945 pr_err("%s: Could not bring present CPUs online.\n", __func__);
946 atomic_set(&data.error, cpuret);
947 goto out;
948 }
949
845 stop_topology_update(); 950 stop_topology_update();
846 951
847 /* Call function on all CPUs. One of us will make the 952 /* Call function on all CPUs. One of us will make the
@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
857 962
858 start_topology_update(); 963 start_topology_update();
859 964
965 /* Take down CPUs not online prior to suspend */
966 cpuret = rtas_offline_cpus_mask(offline_mask);
967 if (cpuret)
968 pr_warn("%s: Could not restore CPUs to offline state.\n",
969 __func__);
970
971out:
972 free_cpumask_var(offline_mask);
860 return atomic_read(&data.error); 973 return atomic_read(&data.error);
861} 974}
862#else /* CONFIG_PPC_PSERIES */ 975#else /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 5b3022470126..2f3cdb01506d 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -89,6 +89,7 @@
89 89
90/* Array sizes */ 90/* Array sizes */
91#define VALIDATE_BUF_SIZE 4096 91#define VALIDATE_BUF_SIZE 4096
92#define VALIDATE_MSG_LEN 256
92#define RTAS_MSG_MAXLEN 64 93#define RTAS_MSG_MAXLEN 64
93 94
94/* Quirk - RTAS requires 4k list length and block size */ 95/* Quirk - RTAS requires 4k list length and block size */
@@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf)
466} 467}
467 468
468static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, 469static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
469 char *msg) 470 char *msg, int msglen)
470{ 471{
471 int n; 472 int n;
472 473
@@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
474 n = sprintf(msg, "%d\n", args_buf->update_results); 475 n = sprintf(msg, "%d\n", args_buf->update_results);
475 if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || 476 if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
476 (args_buf->update_results == VALIDATE_TMP_UPDATE)) 477 (args_buf->update_results == VALIDATE_TMP_UPDATE))
477 n += sprintf(msg + n, "%s\n", args_buf->buf); 478 n += snprintf(msg + n, msglen - n, "%s\n",
479 args_buf->buf);
478 } else { 480 } else {
479 n = sprintf(msg, "%d\n", args_buf->status); 481 n = sprintf(msg, "%d\n", args_buf->status);
480 } 482 }
@@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
486{ 488{
487 struct rtas_validate_flash_t *const args_buf = 489 struct rtas_validate_flash_t *const args_buf =
488 &rtas_validate_flash_data; 490 &rtas_validate_flash_data;
489 char msg[RTAS_MSG_MAXLEN]; 491 char msg[VALIDATE_MSG_LEN];
490 int msglen; 492 int msglen;
491 493
492 mutex_lock(&rtas_validate_flash_mutex); 494 mutex_lock(&rtas_validate_flash_mutex);
493 msglen = get_validate_flash_msg(args_buf, msg); 495 msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
494 mutex_unlock(&rtas_validate_flash_mutex); 496 mutex_unlock(&rtas_validate_flash_mutex);
495 497
496 return simple_read_from_buffer(buf, count, ppos, msg, msglen); 498 return simple_read_from_buffer(buf, count, ppos, msg, msglen);
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index cf12eae02de5..577a8aa69c6e 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -13,6 +13,7 @@
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <linux/uprobes.h> 14#include <linux/uprobes.h>
15#include <linux/key.h> 15#include <linux/key.h>
16#include <linux/context_tracking.h>
16#include <asm/hw_breakpoint.h> 17#include <asm/hw_breakpoint.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18#include <asm/unistd.h> 19#include <asm/unistd.h>
@@ -24,7 +25,7 @@
24 * through debug.exception-trace sysctl. 25 * through debug.exception-trace sysctl.
25 */ 26 */
26 27
27int show_unhandled_signals = 0; 28int show_unhandled_signals = 1;
28 29
29/* 30/*
30 * Allocate space for the signal frame 31 * Allocate space for the signal frame
@@ -159,6 +160,8 @@ static int do_signal(struct pt_regs *regs)
159 160
160void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) 161void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
161{ 162{
163 user_exit();
164
162 if (thread_info_flags & _TIF_UPROBE) 165 if (thread_info_flags & _TIF_UPROBE)
163 uprobe_notify_resume(regs); 166 uprobe_notify_resume(regs);
164 167
@@ -169,4 +172,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
169 clear_thread_flag(TIF_NOTIFY_RESUME); 172 clear_thread_flag(TIF_NOTIFY_RESUME);
170 tracehook_notify_resume(regs); 173 tracehook_notify_resume(regs);
171 } 174 }
175
176 user_enter();
172} 177}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 83efa2f7d926..a7a648f6b750 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -35,6 +35,7 @@
35#include <linux/kdebug.h> 35#include <linux/kdebug.h>
36#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/ratelimit.h> 37#include <linux/ratelimit.h>
38#include <linux/context_tracking.h>
38 39
39#include <asm/emulated_ops.h> 40#include <asm/emulated_ops.h>
40#include <asm/pgtable.h> 41#include <asm/pgtable.h>
@@ -667,6 +668,7 @@ int machine_check_generic(struct pt_regs *regs)
667 668
668void machine_check_exception(struct pt_regs *regs) 669void machine_check_exception(struct pt_regs *regs)
669{ 670{
671 enum ctx_state prev_state = exception_enter();
670 int recover = 0; 672 int recover = 0;
671 673
672 __get_cpu_var(irq_stat).mce_exceptions++; 674 __get_cpu_var(irq_stat).mce_exceptions++;
@@ -683,7 +685,7 @@ void machine_check_exception(struct pt_regs *regs)
683 recover = cur_cpu_spec->machine_check(regs); 685 recover = cur_cpu_spec->machine_check(regs);
684 686
685 if (recover > 0) 687 if (recover > 0)
686 return; 688 goto bail;
687 689
688#if defined(CONFIG_8xx) && defined(CONFIG_PCI) 690#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
689 /* the qspan pci read routines can cause machine checks -- Cort 691 /* the qspan pci read routines can cause machine checks -- Cort
@@ -693,20 +695,23 @@ void machine_check_exception(struct pt_regs *regs)
693 * -- BenH 695 * -- BenH
694 */ 696 */
695 bad_page_fault(regs, regs->dar, SIGBUS); 697 bad_page_fault(regs, regs->dar, SIGBUS);
696 return; 698 goto bail;
697#endif 699#endif
698 700
699 if (debugger_fault_handler(regs)) 701 if (debugger_fault_handler(regs))
700 return; 702 goto bail;
701 703
702 if (check_io_access(regs)) 704 if (check_io_access(regs))
703 return; 705 goto bail;
704 706
705 die("Machine check", regs, SIGBUS); 707 die("Machine check", regs, SIGBUS);
706 708
707 /* Must die if the interrupt is not recoverable */ 709 /* Must die if the interrupt is not recoverable */
708 if (!(regs->msr & MSR_RI)) 710 if (!(regs->msr & MSR_RI))
709 panic("Unrecoverable Machine check"); 711 panic("Unrecoverable Machine check");
712
713bail:
714 exception_exit(prev_state);
710} 715}
711 716
712void SMIException(struct pt_regs *regs) 717void SMIException(struct pt_regs *regs)
@@ -716,20 +721,29 @@ void SMIException(struct pt_regs *regs)
716 721
717void unknown_exception(struct pt_regs *regs) 722void unknown_exception(struct pt_regs *regs)
718{ 723{
724 enum ctx_state prev_state = exception_enter();
725
719 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 726 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
720 regs->nip, regs->msr, regs->trap); 727 regs->nip, regs->msr, regs->trap);
721 728
722 _exception(SIGTRAP, regs, 0, 0); 729 _exception(SIGTRAP, regs, 0, 0);
730
731 exception_exit(prev_state);
723} 732}
724 733
725void instruction_breakpoint_exception(struct pt_regs *regs) 734void instruction_breakpoint_exception(struct pt_regs *regs)
726{ 735{
736 enum ctx_state prev_state = exception_enter();
737
727 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 738 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
728 5, SIGTRAP) == NOTIFY_STOP) 739 5, SIGTRAP) == NOTIFY_STOP)
729 return; 740 goto bail;
730 if (debugger_iabr_match(regs)) 741 if (debugger_iabr_match(regs))
731 return; 742 goto bail;
732 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 743 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
744
745bail:
746 exception_exit(prev_state);
733} 747}
734 748
735void RunModeException(struct pt_regs *regs) 749void RunModeException(struct pt_regs *regs)
@@ -739,15 +753,20 @@ void RunModeException(struct pt_regs *regs)
739 753
740void __kprobes single_step_exception(struct pt_regs *regs) 754void __kprobes single_step_exception(struct pt_regs *regs)
741{ 755{
756 enum ctx_state prev_state = exception_enter();
757
742 clear_single_step(regs); 758 clear_single_step(regs);
743 759
744 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 760 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
745 5, SIGTRAP) == NOTIFY_STOP) 761 5, SIGTRAP) == NOTIFY_STOP)
746 return; 762 goto bail;
747 if (debugger_sstep(regs)) 763 if (debugger_sstep(regs))
748 return; 764 goto bail;
749 765
750 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 766 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
767
768bail:
769 exception_exit(prev_state);
751} 770}
752 771
753/* 772/*
@@ -1005,6 +1024,7 @@ int is_valid_bugaddr(unsigned long addr)
1005 1024
1006void __kprobes program_check_exception(struct pt_regs *regs) 1025void __kprobes program_check_exception(struct pt_regs *regs)
1007{ 1026{
1027 enum ctx_state prev_state = exception_enter();
1008 unsigned int reason = get_reason(regs); 1028 unsigned int reason = get_reason(regs);
1009 extern int do_mathemu(struct pt_regs *regs); 1029 extern int do_mathemu(struct pt_regs *regs);
1010 1030
@@ -1014,26 +1034,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1014 if (reason & REASON_FP) { 1034 if (reason & REASON_FP) {
1015 /* IEEE FP exception */ 1035 /* IEEE FP exception */
1016 parse_fpe(regs); 1036 parse_fpe(regs);
1017 return; 1037 goto bail;
1018 } 1038 }
1019 if (reason & REASON_TRAP) { 1039 if (reason & REASON_TRAP) {
1020 /* Debugger is first in line to stop recursive faults in 1040 /* Debugger is first in line to stop recursive faults in
1021 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1041 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1022 if (debugger_bpt(regs)) 1042 if (debugger_bpt(regs))
1023 return; 1043 goto bail;
1024 1044
1025 /* trap exception */ 1045 /* trap exception */
1026 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1046 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1027 == NOTIFY_STOP) 1047 == NOTIFY_STOP)
1028 return; 1048 goto bail;
1029 1049
1030 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1050 if (!(regs->msr & MSR_PR) && /* not user-mode */
1031 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1051 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1032 regs->nip += 4; 1052 regs->nip += 4;
1033 return; 1053 goto bail;
1034 } 1054 }
1035 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1055 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1036 return; 1056 goto bail;
1037 } 1057 }
1038#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1058#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1039 if (reason & REASON_TM) { 1059 if (reason & REASON_TM) {
@@ -1049,7 +1069,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1049 if (!user_mode(regs) && 1069 if (!user_mode(regs) &&
1050 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1070 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1051 regs->nip += 4; 1071 regs->nip += 4;
1052 return; 1072 goto bail;
1053 } 1073 }
1054 /* If usermode caused this, it's done something illegal and 1074 /* If usermode caused this, it's done something illegal and
1055 * gets a SIGILL slap on the wrist. We call it an illegal 1075 * gets a SIGILL slap on the wrist. We call it an illegal
@@ -1059,7 +1079,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1059 */ 1079 */
1060 if (user_mode(regs)) { 1080 if (user_mode(regs)) {
1061 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1081 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1062 return; 1082 goto bail;
1063 } else { 1083 } else {
1064 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1084 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1065 "at %lx (msr 0x%x)\n", regs->nip, reason); 1085 "at %lx (msr 0x%x)\n", regs->nip, reason);
@@ -1083,16 +1103,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1083 switch (do_mathemu(regs)) { 1103 switch (do_mathemu(regs)) {
1084 case 0: 1104 case 0:
1085 emulate_single_step(regs); 1105 emulate_single_step(regs);
1086 return; 1106 goto bail;
1087 case 1: { 1107 case 1: {
1088 int code = 0; 1108 int code = 0;
1089 code = __parse_fpscr(current->thread.fpscr.val); 1109 code = __parse_fpscr(current->thread.fpscr.val);
1090 _exception(SIGFPE, regs, code, regs->nip); 1110 _exception(SIGFPE, regs, code, regs->nip);
1091 return; 1111 goto bail;
1092 } 1112 }
1093 case -EFAULT: 1113 case -EFAULT:
1094 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1114 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1095 return; 1115 goto bail;
1096 } 1116 }
1097 /* fall through on any other errors */ 1117 /* fall through on any other errors */
1098#endif /* CONFIG_MATH_EMULATION */ 1118#endif /* CONFIG_MATH_EMULATION */
@@ -1103,10 +1123,10 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1103 case 0: 1123 case 0:
1104 regs->nip += 4; 1124 regs->nip += 4;
1105 emulate_single_step(regs); 1125 emulate_single_step(regs);
1106 return; 1126 goto bail;
1107 case -EFAULT: 1127 case -EFAULT:
1108 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1128 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1109 return; 1129 goto bail;
1110 } 1130 }
1111 } 1131 }
1112 1132
@@ -1114,10 +1134,14 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1114 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1134 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1115 else 1135 else
1116 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1136 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1137
1138bail:
1139 exception_exit(prev_state);
1117} 1140}
1118 1141
1119void alignment_exception(struct pt_regs *regs) 1142void alignment_exception(struct pt_regs *regs)
1120{ 1143{
1144 enum ctx_state prev_state = exception_enter();
1121 int sig, code, fixed = 0; 1145 int sig, code, fixed = 0;
1122 1146
1123 /* We restore the interrupt state now */ 1147 /* We restore the interrupt state now */
@@ -1131,7 +1155,7 @@ void alignment_exception(struct pt_regs *regs)
1131 if (fixed == 1) { 1155 if (fixed == 1) {
1132 regs->nip += 4; /* skip over emulated instruction */ 1156 regs->nip += 4; /* skip over emulated instruction */
1133 emulate_single_step(regs); 1157 emulate_single_step(regs);
1134 return; 1158 goto bail;
1135 } 1159 }
1136 1160
1137 /* Operand address was bad */ 1161 /* Operand address was bad */
@@ -1146,6 +1170,9 @@ void alignment_exception(struct pt_regs *regs)
1146 _exception(sig, regs, code, regs->dar); 1170 _exception(sig, regs, code, regs->dar);
1147 else 1171 else
1148 bad_page_fault(regs, regs->dar, sig); 1172 bad_page_fault(regs, regs->dar, sig);
1173
1174bail:
1175 exception_exit(prev_state);
1149} 1176}
1150 1177
1151void StackOverflow(struct pt_regs *regs) 1178void StackOverflow(struct pt_regs *regs)
@@ -1174,23 +1201,32 @@ void trace_syscall(struct pt_regs *regs)
1174 1201
1175void kernel_fp_unavailable_exception(struct pt_regs *regs) 1202void kernel_fp_unavailable_exception(struct pt_regs *regs)
1176{ 1203{
1204 enum ctx_state prev_state = exception_enter();
1205
1177 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1206 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1178 "%lx at %lx\n", regs->trap, regs->nip); 1207 "%lx at %lx\n", regs->trap, regs->nip);
1179 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1208 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1209
1210 exception_exit(prev_state);
1180} 1211}
1181 1212
1182void altivec_unavailable_exception(struct pt_regs *regs) 1213void altivec_unavailable_exception(struct pt_regs *regs)
1183{ 1214{
1215 enum ctx_state prev_state = exception_enter();
1216
1184 if (user_mode(regs)) { 1217 if (user_mode(regs)) {
1185 /* A user program has executed an altivec instruction, 1218 /* A user program has executed an altivec instruction,
1186 but this kernel doesn't support altivec. */ 1219 but this kernel doesn't support altivec. */
1187 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1220 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1188 return; 1221 goto bail;
1189 } 1222 }
1190 1223
1191 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1224 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1192 "%lx at %lx\n", regs->trap, regs->nip); 1225 "%lx at %lx\n", regs->trap, regs->nip);
1193 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1226 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1227
1228bail:
1229 exception_exit(prev_state);
1194} 1230}
1195 1231
1196void vsx_unavailable_exception(struct pt_regs *regs) 1232void vsx_unavailable_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 13b867093499..9d3fdcd66290 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -64,6 +64,9 @@ void __init udbg_early_init(void)
64 udbg_init_usbgecko(); 64 udbg_init_usbgecko();
65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) 65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
66 udbg_init_wsp(); 66 udbg_init_wsp();
67#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
68 /* In memory console */
69 udbg_init_memcons();
67#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) 70#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
68 udbg_init_ehv_bc(); 71 udbg_init_ehv_bc();
69#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) 72#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 229951ffc351..8726779e1409 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -32,6 +32,7 @@
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
33#include <linux/magic.h> 33#include <linux/magic.h>
34#include <linux/ratelimit.h> 34#include <linux/ratelimit.h>
35#include <linux/context_tracking.h>
35 36
36#include <asm/firmware.h> 37#include <asm/firmware.h>
37#include <asm/page.h> 38#include <asm/page.h>
@@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
196int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, 197int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
197 unsigned long error_code) 198 unsigned long error_code)
198{ 199{
200 enum ctx_state prev_state = exception_enter();
199 struct vm_area_struct * vma; 201 struct vm_area_struct * vma;
200 struct mm_struct *mm = current->mm; 202 struct mm_struct *mm = current->mm;
201 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 203 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
204 int trap = TRAP(regs); 206 int trap = TRAP(regs);
205 int is_exec = trap == 0x400; 207 int is_exec = trap == 0x400;
206 int fault; 208 int fault;
209 int rc = 0;
207 210
208#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 211#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
209 /* 212 /*
@@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
230 * look at it 233 * look at it
231 */ 234 */
232 if (error_code & ICSWX_DSI_UCT) { 235 if (error_code & ICSWX_DSI_UCT) {
233 int rc = acop_handle_fault(regs, address, error_code); 236 rc = acop_handle_fault(regs, address, error_code);
234 if (rc) 237 if (rc)
235 return rc; 238 goto bail;
236 } 239 }
237#endif /* CONFIG_PPC_ICSWX */ 240#endif /* CONFIG_PPC_ICSWX */
238 241
239 if (notify_page_fault(regs)) 242 if (notify_page_fault(regs))
240 return 0; 243 goto bail;
241 244
242 if (unlikely(debugger_fault_handler(regs))) 245 if (unlikely(debugger_fault_handler(regs)))
243 return 0; 246 goto bail;
244 247
245 /* On a kernel SLB miss we can only check for a valid exception entry */ 248 /* On a kernel SLB miss we can only check for a valid exception entry */
246 if (!user_mode(regs) && (address >= TASK_SIZE)) 249 if (!user_mode(regs) && (address >= TASK_SIZE)) {
247 return SIGSEGV; 250 rc = SIGSEGV;
251 goto bail;
252 }
248 253
249#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ 254#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
250 defined(CONFIG_PPC_BOOK3S_64)) 255 defined(CONFIG_PPC_BOOK3S_64))
251 if (error_code & DSISR_DABRMATCH) { 256 if (error_code & DSISR_DABRMATCH) {
252 /* breakpoint match */ 257 /* breakpoint match */
253 do_break(regs, address, error_code); 258 do_break(regs, address, error_code);
254 return 0; 259 goto bail;
255 } 260 }
256#endif 261#endif
257 262
@@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
260 local_irq_enable(); 265 local_irq_enable();
261 266
262 if (in_atomic() || mm == NULL) { 267 if (in_atomic() || mm == NULL) {
263 if (!user_mode(regs)) 268 if (!user_mode(regs)) {
264 return SIGSEGV; 269 rc = SIGSEGV;
270 goto bail;
271 }
265 /* in_atomic() in user mode is really bad, 272 /* in_atomic() in user mode is really bad,
266 as is current->mm == NULL. */ 273 as is current->mm == NULL. */
267 printk(KERN_EMERG "Page fault in user mode with " 274 printk(KERN_EMERG "Page fault in user mode with "
@@ -417,9 +424,11 @@ good_area:
417 */ 424 */
418 fault = handle_mm_fault(mm, vma, address, flags); 425 fault = handle_mm_fault(mm, vma, address, flags);
419 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 426 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
420 int rc = mm_fault_error(regs, address, fault); 427 rc = mm_fault_error(regs, address, fault);
421 if (rc >= MM_FAULT_RETURN) 428 if (rc >= MM_FAULT_RETURN)
422 return rc; 429 goto bail;
430 else
431 rc = 0;
423 } 432 }
424 433
425 /* 434 /*
@@ -454,7 +463,7 @@ good_area:
454 } 463 }
455 464
456 up_read(&mm->mmap_sem); 465 up_read(&mm->mmap_sem);
457 return 0; 466 goto bail;
458 467
459bad_area: 468bad_area:
460 up_read(&mm->mmap_sem); 469 up_read(&mm->mmap_sem);
@@ -463,7 +472,7 @@ bad_area_nosemaphore:
463 /* User mode accesses cause a SIGSEGV */ 472 /* User mode accesses cause a SIGSEGV */
464 if (user_mode(regs)) { 473 if (user_mode(regs)) {
465 _exception(SIGSEGV, regs, code, address); 474 _exception(SIGSEGV, regs, code, address);
466 return 0; 475 goto bail;
467 } 476 }
468 477
469 if (is_exec && (error_code & DSISR_PROTFAULT)) 478 if (is_exec && (error_code & DSISR_PROTFAULT))
@@ -471,7 +480,11 @@ bad_area_nosemaphore:
471 " page (%lx) - exploit attempt? (uid: %d)\n", 480 " page (%lx) - exploit attempt? (uid: %d)\n",
472 address, from_kuid(&init_user_ns, current_uid())); 481 address, from_kuid(&init_user_ns, current_uid()));
473 482
474 return SIGSEGV; 483 rc = SIGSEGV;
484
485bail:
486 exception_exit(prev_state);
487 return rc;
475 488
476} 489}
477 490
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 88ac0eeaadde..e303a6d74e3a 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -33,6 +33,7 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/signal.h> 34#include <linux/signal.h>
35#include <linux/memblock.h> 35#include <linux/memblock.h>
36#include <linux/context_tracking.h>
36 37
37#include <asm/processor.h> 38#include <asm/processor.h>
38#include <asm/pgtable.h> 39#include <asm/pgtable.h>
@@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
954 */ 955 */
955int hash_page(unsigned long ea, unsigned long access, unsigned long trap) 956int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
956{ 957{
958 enum ctx_state prev_state = exception_enter();
957 pgd_t *pgdir; 959 pgd_t *pgdir;
958 unsigned long vsid; 960 unsigned long vsid;
959 struct mm_struct *mm; 961 struct mm_struct *mm;
@@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
973 mm = current->mm; 975 mm = current->mm;
974 if (! mm) { 976 if (! mm) {
975 DBG_LOW(" user region with no mm !\n"); 977 DBG_LOW(" user region with no mm !\n");
976 return 1; 978 rc = 1;
979 goto bail;
977 } 980 }
978 psize = get_slice_psize(mm, ea); 981 psize = get_slice_psize(mm, ea);
979 ssize = user_segment_size(ea); 982 ssize = user_segment_size(ea);
@@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
992 /* Not a valid range 995 /* Not a valid range
993 * Send the problem up to do_page_fault 996 * Send the problem up to do_page_fault
994 */ 997 */
995 return 1; 998 rc = 1;
999 goto bail;
996 } 1000 }
997 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 1001 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
998 1002
999 /* Bad address. */ 1003 /* Bad address. */
1000 if (!vsid) { 1004 if (!vsid) {
1001 DBG_LOW("Bad address!\n"); 1005 DBG_LOW("Bad address!\n");
1002 return 1; 1006 rc = 1;
1007 goto bail;
1003 } 1008 }
1004 /* Get pgdir */ 1009 /* Get pgdir */
1005 pgdir = mm->pgd; 1010 pgdir = mm->pgd;
1006 if (pgdir == NULL) 1011 if (pgdir == NULL) {
1007 return 1; 1012 rc = 1;
1013 goto bail;
1014 }
1008 1015
1009 /* Check CPU locality */ 1016 /* Check CPU locality */
1010 tmp = cpumask_of(smp_processor_id()); 1017 tmp = cpumask_of(smp_processor_id());
@@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1027 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); 1034 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
1028 if (ptep == NULL || !pte_present(*ptep)) { 1035 if (ptep == NULL || !pte_present(*ptep)) {
1029 DBG_LOW(" no PTE !\n"); 1036 DBG_LOW(" no PTE !\n");
1030 return 1; 1037 rc = 1;
1038 goto bail;
1031 } 1039 }
1032 1040
1033 /* Add _PAGE_PRESENT to the required access perm */ 1041 /* Add _PAGE_PRESENT to the required access perm */
@@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1038 */ 1046 */
1039 if (access & ~pte_val(*ptep)) { 1047 if (access & ~pte_val(*ptep)) {
1040 DBG_LOW(" no access !\n"); 1048 DBG_LOW(" no access !\n");
1041 return 1; 1049 rc = 1;
1050 goto bail;
1042 } 1051 }
1043 1052
1044#ifdef CONFIG_HUGETLB_PAGE 1053#ifdef CONFIG_HUGETLB_PAGE
1045 if (hugeshift) 1054 if (hugeshift) {
1046 return __hash_page_huge(ea, access, vsid, ptep, trap, local, 1055 rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
1047 ssize, hugeshift, psize); 1056 ssize, hugeshift, psize);
1057 goto bail;
1058 }
1048#endif /* CONFIG_HUGETLB_PAGE */ 1059#endif /* CONFIG_HUGETLB_PAGE */
1049 1060
1050#ifndef CONFIG_PPC_64K_PAGES 1061#ifndef CONFIG_PPC_64K_PAGES
@@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1124 pte_val(*(ptep + PTRS_PER_PTE))); 1135 pte_val(*(ptep + PTRS_PER_PTE)));
1125#endif 1136#endif
1126 DBG_LOW(" -> rc=%d\n", rc); 1137 DBG_LOW(" -> rc=%d\n", rc);
1138
1139bail:
1140 exception_exit(prev_state);
1127 return rc; 1141 return rc;
1128} 1142}
1129EXPORT_SYMBOL_GPL(hash_page); 1143EXPORT_SYMBOL_GPL(hash_page);
@@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local)
1259 */ 1273 */
1260void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) 1274void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1261{ 1275{
1276 enum ctx_state prev_state = exception_enter();
1277
1262 if (user_mode(regs)) { 1278 if (user_mode(regs)) {
1263#ifdef CONFIG_PPC_SUBPAGE_PROT 1279#ifdef CONFIG_PPC_SUBPAGE_PROT
1264 if (rc == -2) 1280 if (rc == -2)
@@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1268 _exception(SIGBUS, regs, BUS_ADRERR, address); 1284 _exception(SIGBUS, regs, BUS_ADRERR, address);
1269 } else 1285 } else
1270 bad_page_fault(regs, address, SIGBUS); 1286 bad_page_fault(regs, address, SIGBUS);
1287
1288 exception_exit(prev_state);
1271} 1289}
1272 1290
1273long hpte_insert_repeating(unsigned long hash, unsigned long vpn, 1291long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index c2787bf779ca..a90b9c458990 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
215 unsigned long phys) 215 unsigned long phys)
216{ 216{
217 int mapped = htab_bolt_mapping(start, start + page_size, phys, 217 int mapped = htab_bolt_mapping(start, start + page_size, phys,
218 PAGE_KERNEL, mmu_vmemmap_psize, 218 pgprot_val(PAGE_KERNEL),
219 mmu_vmemmap_psize,
219 mmu_kernel_ssize); 220 mmu_kernel_ssize);
220 BUG_ON(mapped < 0); 221 BUG_ON(mapped < 0);
221} 222}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index c627843c5b2e..426180b84978 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -13,11 +13,13 @@
13#include <linux/perf_event.h> 13#include <linux/perf_event.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/hardirq.h> 15#include <linux/hardirq.h>
16#include <linux/uaccess.h>
16#include <asm/reg.h> 17#include <asm/reg.h>
17#include <asm/pmc.h> 18#include <asm/pmc.h>
18#include <asm/machdep.h> 19#include <asm/machdep.h>
19#include <asm/firmware.h> 20#include <asm/firmware.h>
20#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/code-patching.h>
21 23
22#define BHRB_MAX_ENTRIES 32 24#define BHRB_MAX_ENTRIES 32
23#define BHRB_TARGET 0x0000000000000002 25#define BHRB_TARGET 0x0000000000000002
@@ -100,6 +102,10 @@ static inline int siar_valid(struct pt_regs *regs)
100 return 1; 102 return 1;
101} 103}
102 104
105static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
106static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
107void power_pmu_flush_branch_stack(void) {}
108static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
103#endif /* CONFIG_PPC32 */ 109#endif /* CONFIG_PPC32 */
104 110
105static bool regs_use_siar(struct pt_regs *regs) 111static bool regs_use_siar(struct pt_regs *regs)
@@ -308,6 +314,159 @@ static inline int siar_valid(struct pt_regs *regs)
308 return 1; 314 return 1;
309} 315}
310 316
317
318/* Reset all possible BHRB entries */
319static void power_pmu_bhrb_reset(void)
320{
321 asm volatile(PPC_CLRBHRB);
322}
323
324static void power_pmu_bhrb_enable(struct perf_event *event)
325{
326 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
327
328 if (!ppmu->bhrb_nr)
329 return;
330
331 /* Clear BHRB if we changed task context to avoid data leaks */
332 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
333 power_pmu_bhrb_reset();
334 cpuhw->bhrb_context = event->ctx;
335 }
336 cpuhw->bhrb_users++;
337}
338
339static void power_pmu_bhrb_disable(struct perf_event *event)
340{
341 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
342
343 if (!ppmu->bhrb_nr)
344 return;
345
346 cpuhw->bhrb_users--;
347 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
348
349 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
350 /* BHRB cannot be turned off when other
351 * events are active on the PMU.
352 */
353
354 /* avoid stale pointer */
355 cpuhw->bhrb_context = NULL;
356 }
357}
358
359/* Called from ctxsw to prevent one process's branch entries to
360 * mingle with the other process's entries during context switch.
361 */
362void power_pmu_flush_branch_stack(void)
363{
364 if (ppmu->bhrb_nr)
365 power_pmu_bhrb_reset();
366}
367/* Calculate the to address for a branch */
368static __u64 power_pmu_bhrb_to(u64 addr)
369{
370 unsigned int instr;
371 int ret;
372 __u64 target;
373
374 if (is_kernel_addr(addr))
375 return branch_target((unsigned int *)addr);
376
377 /* Userspace: need copy instruction here then translate it */
378 pagefault_disable();
379 ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
380 if (ret) {
381 pagefault_enable();
382 return 0;
383 }
384 pagefault_enable();
385
386 target = branch_target(&instr);
387 if ((!target) || (instr & BRANCH_ABSOLUTE))
388 return target;
389
390 /* Translate relative branch target from kernel to user address */
391 return target - (unsigned long)&instr + addr;
392}
393
394/* Processing BHRB entries */
395void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
396{
397 u64 val;
398 u64 addr;
399 int r_index, u_index, pred;
400
401 r_index = 0;
402 u_index = 0;
403 while (r_index < ppmu->bhrb_nr) {
404 /* Assembly read function */
405 val = read_bhrb(r_index++);
406 if (!val)
407 /* Terminal marker: End of valid BHRB entries */
408 break;
409 else {
410 addr = val & BHRB_EA;
411 pred = val & BHRB_PREDICTION;
412
413 if (!addr)
414 /* invalid entry */
415 continue;
416
417 /* Branches are read most recent first (ie. mfbhrb 0 is
418 * the most recent branch).
419 * There are two types of valid entries:
420 * 1) a target entry which is the to address of a
421 * computed goto like a blr,bctr,btar. The next
422 * entry read from the bhrb will be branch
423 * corresponding to this target (ie. the actual
424 * blr/bctr/btar instruction).
425 * 2) a from address which is an actual branch. If a
426 * target entry proceeds this, then this is the
427 * matching branch for that target. If this is not
428 * following a target entry, then this is a branch
429 * where the target is given as an immediate field
430 * in the instruction (ie. an i or b form branch).
431 * In this case we need to read the instruction from
432 * memory to determine the target/to address.
433 */
434
435 if (val & BHRB_TARGET) {
436 /* Target branches use two entries
437 * (ie. computed gotos/XL form)
438 */
439 cpuhw->bhrb_entries[u_index].to = addr;
440 cpuhw->bhrb_entries[u_index].mispred = pred;
441 cpuhw->bhrb_entries[u_index].predicted = ~pred;
442
443 /* Get from address in next entry */
444 val = read_bhrb(r_index++);
445 addr = val & BHRB_EA;
446 if (val & BHRB_TARGET) {
447 /* Shouldn't have two targets in a
448 row.. Reset index and try again */
449 r_index--;
450 addr = 0;
451 }
452 cpuhw->bhrb_entries[u_index].from = addr;
453 } else {
454 /* Branches to immediate field
455 (ie I or B form) */
456 cpuhw->bhrb_entries[u_index].from = addr;
457 cpuhw->bhrb_entries[u_index].to =
458 power_pmu_bhrb_to(addr);
459 cpuhw->bhrb_entries[u_index].mispred = pred;
460 cpuhw->bhrb_entries[u_index].predicted = ~pred;
461 }
462 u_index++;
463
464 }
465 }
466 cpuhw->bhrb_stack.nr = u_index;
467 return;
468}
469
311#endif /* CONFIG_PPC64 */ 470#endif /* CONFIG_PPC64 */
312 471
313static void perf_event_interrupt(struct pt_regs *regs); 472static void perf_event_interrupt(struct pt_regs *regs);
@@ -904,47 +1063,6 @@ static int collect_events(struct perf_event *group, int max_count,
904 return n; 1063 return n;
905} 1064}
906 1065
907/* Reset all possible BHRB entries */
908static void power_pmu_bhrb_reset(void)
909{
910 asm volatile(PPC_CLRBHRB);
911}
912
913void power_pmu_bhrb_enable(struct perf_event *event)
914{
915 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
916
917 if (!ppmu->bhrb_nr)
918 return;
919
920 /* Clear BHRB if we changed task context to avoid data leaks */
921 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
922 power_pmu_bhrb_reset();
923 cpuhw->bhrb_context = event->ctx;
924 }
925 cpuhw->bhrb_users++;
926}
927
928void power_pmu_bhrb_disable(struct perf_event *event)
929{
930 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
931
932 if (!ppmu->bhrb_nr)
933 return;
934
935 cpuhw->bhrb_users--;
936 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
937
938 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
939 /* BHRB cannot be turned off when other
940 * events are active on the PMU.
941 */
942
943 /* avoid stale pointer */
944 cpuhw->bhrb_context = NULL;
945 }
946}
947
948/* 1066/*
949 * Add a event to the PMU. 1067 * Add a event to the PMU.
950 * If all events are not already frozen, then we disable and 1068 * If all events are not already frozen, then we disable and
@@ -1180,15 +1298,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
1180 return 0; 1298 return 0;
1181} 1299}
1182 1300
1183/* Called from ctxsw to prevent one process's branch entries to
1184 * mingle with the other process's entries during context switch.
1185 */
1186void power_pmu_flush_branch_stack(void)
1187{
1188 if (ppmu->bhrb_nr)
1189 power_pmu_bhrb_reset();
1190}
1191
1192/* 1301/*
1193 * Return 1 if we might be able to put event on a limited PMC, 1302 * Return 1 if we might be able to put event on a limited PMC,
1194 * or 0 if not. 1303 * or 0 if not.
@@ -1458,77 +1567,6 @@ struct pmu power_pmu = {
1458 .flush_branch_stack = power_pmu_flush_branch_stack, 1567 .flush_branch_stack = power_pmu_flush_branch_stack,
1459}; 1568};
1460 1569
1461/* Processing BHRB entries */
1462void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
1463{
1464 u64 val;
1465 u64 addr;
1466 int r_index, u_index, target, pred;
1467
1468 r_index = 0;
1469 u_index = 0;
1470 while (r_index < ppmu->bhrb_nr) {
1471 /* Assembly read function */
1472 val = read_bhrb(r_index);
1473
1474 /* Terminal marker: End of valid BHRB entries */
1475 if (val == 0) {
1476 break;
1477 } else {
1478 /* BHRB field break up */
1479 addr = val & BHRB_EA;
1480 pred = val & BHRB_PREDICTION;
1481 target = val & BHRB_TARGET;
1482
1483 /* Probable Missed entry: Not applicable for POWER8 */
1484 if ((addr == 0) && (target == 0) && (pred == 1)) {
1485 r_index++;
1486 continue;
1487 }
1488
1489 /* Real Missed entry: Power8 based missed entry */
1490 if ((addr == 0) && (target == 1) && (pred == 1)) {
1491 r_index++;
1492 continue;
1493 }
1494
1495 /* Reserved condition: Not a valid entry */
1496 if ((addr == 0) && (target == 1) && (pred == 0)) {
1497 r_index++;
1498 continue;
1499 }
1500
1501 /* Is a target address */
1502 if (val & BHRB_TARGET) {
1503 /* First address cannot be a target address */
1504 if (r_index == 0) {
1505 r_index++;
1506 continue;
1507 }
1508
1509 /* Update target address for the previous entry */
1510 cpuhw->bhrb_entries[u_index - 1].to = addr;
1511 cpuhw->bhrb_entries[u_index - 1].mispred = pred;
1512 cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
1513
1514 /* Dont increment u_index */
1515 r_index++;
1516 } else {
1517 /* Update address, flags for current entry */
1518 cpuhw->bhrb_entries[u_index].from = addr;
1519 cpuhw->bhrb_entries[u_index].mispred = pred;
1520 cpuhw->bhrb_entries[u_index].predicted = ~pred;
1521
1522 /* Successfully popullated one entry */
1523 u_index++;
1524 r_index++;
1525 }
1526 }
1527 }
1528 cpuhw->bhrb_stack.nr = u_index;
1529 return;
1530}
1531
1532/* 1570/*
1533 * A counter has overflowed; update its count and record 1571 * A counter has overflowed; update its count and record
1534 * things if requested. Note that interrupts are hard-disabled 1572 * things if requested. Note that interrupts are hard-disabled
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index a881232a3cce..b62aab3e22ec 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON
128 128
129config RTAS_PROC 129config RTAS_PROC
130 bool "Proc interface to RTAS" 130 bool "Proc interface to RTAS"
131 depends on PPC_RTAS 131 depends on PPC_RTAS && PROC_FS
132 default y 132 default y
133 133
134config RTAS_FLASH 134config RTAS_FLASH
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index ade4463226c6..628c564ceadb 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -15,6 +15,7 @@
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/slab.h>
18#include <asm/opal.h> 19#include <asm/opal.h>
19#include <asm/firmware.h> 20#include <asm/firmware.h>
20 21
@@ -28,6 +29,8 @@ struct opal {
28static struct device_node *opal_node; 29static struct device_node *opal_node;
29static DEFINE_SPINLOCK(opal_write_lock); 30static DEFINE_SPINLOCK(opal_write_lock);
30extern u64 opal_mc_secondary_handler[]; 31extern u64 opal_mc_secondary_handler[];
32static unsigned int *opal_irqs;
33static unsigned int opal_irq_count;
31 34
32int __init early_init_dt_scan_opal(unsigned long node, 35int __init early_init_dt_scan_opal(unsigned long node,
33 const char *uname, int depth, void *data) 36 const char *uname, int depth, void *data)
@@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
53 opal.entry, entryp, entrysz); 56 opal.entry, entryp, entrysz);
54 57
55 powerpc_firmware_features |= FW_FEATURE_OPAL; 58 powerpc_firmware_features |= FW_FEATURE_OPAL;
56 if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { 59 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
60 powerpc_firmware_features |= FW_FEATURE_OPALv2;
61 powerpc_firmware_features |= FW_FEATURE_OPALv3;
62 printk("OPAL V3 detected !\n");
63 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
57 powerpc_firmware_features |= FW_FEATURE_OPALv2; 64 powerpc_firmware_features |= FW_FEATURE_OPALv2;
58 printk("OPAL V2 detected !\n"); 65 printk("OPAL V2 detected !\n");
59 } else { 66 } else {
@@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
144 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { 151 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
145 len = total_len; 152 len = total_len;
146 rc = opal_console_write(vtermno, &len, data); 153 rc = opal_console_write(vtermno, &len, data);
154
155 /* Closed or other error drop */
156 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
157 rc != OPAL_BUSY_EVENT) {
158 written = total_len;
159 break;
160 }
147 if (rc == OPAL_SUCCESS) { 161 if (rc == OPAL_SUCCESS) {
148 total_len -= len; 162 total_len -= len;
149 data += len; 163 data += len;
@@ -316,6 +330,8 @@ static int __init opal_init(void)
316 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); 330 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
317 pr_debug("opal: Found %d interrupts reserved for OPAL\n", 331 pr_debug("opal: Found %d interrupts reserved for OPAL\n",
318 irqs ? (irqlen / 4) : 0); 332 irqs ? (irqlen / 4) : 0);
333 opal_irq_count = irqlen / 4;
334 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
319 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { 335 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
320 unsigned int hwirq = be32_to_cpup(irqs); 336 unsigned int hwirq = be32_to_cpup(irqs);
321 unsigned int irq = irq_create_mapping(NULL, hwirq); 337 unsigned int irq = irq_create_mapping(NULL, hwirq);
@@ -327,7 +343,19 @@ static int __init opal_init(void)
327 if (rc) 343 if (rc)
328 pr_warning("opal: Error %d requesting irq %d" 344 pr_warning("opal: Error %d requesting irq %d"
329 " (0x%x)\n", rc, irq, hwirq); 345 " (0x%x)\n", rc, irq, hwirq);
346 opal_irqs[i] = irq;
330 } 347 }
331 return 0; 348 return 0;
332} 349}
333subsys_initcall(opal_init); 350subsys_initcall(opal_init);
351
352void opal_shutdown(void)
353{
354 unsigned int i;
355
356 for (i = 0; i < opal_irq_count; i++) {
357 if (opal_irqs[i])
358 free_irq(opal_irqs[i], 0);
359 opal_irqs[i] = 0;
360 }
361}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 1da578b7c1bf..3937aaae5bc4 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1048,6 +1048,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1048 return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; 1048 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1049} 1049}
1050 1050
1051static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
1052{
1053 opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
1054 OPAL_ASSERT_RESET);
1055}
1056
1051void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) 1057void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
1052{ 1058{
1053 struct pci_controller *hose; 1059 struct pci_controller *hose;
@@ -1178,6 +1184,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
1178 /* Setup TCEs */ 1184 /* Setup TCEs */
1179 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; 1185 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1180 1186
1187 /* Setup shutdown function for kexec */
1188 phb->shutdown = pnv_pci_ioda_shutdown;
1189
1181 /* Setup MSI support */ 1190 /* Setup MSI support */
1182 pnv_pci_init_ioda_msis(phb); 1191 pnv_pci_init_ioda_msis(phb);
1183 1192
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 55dfca844ddf..163bd7422f1c 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -450,6 +450,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
450 pnv_pci_dma_fallback_setup(hose, pdev); 450 pnv_pci_dma_fallback_setup(hose, pdev);
451} 451}
452 452
453void pnv_pci_shutdown(void)
454{
455 struct pci_controller *hose;
456
457 list_for_each_entry(hose, &hose_list, list_node) {
458 struct pnv_phb *phb = hose->private_data;
459
460 if (phb && phb->shutdown)
461 phb->shutdown(phb);
462 }
463}
464
453/* Fixup wrong class code in p7ioc and p8 root complex */ 465/* Fixup wrong class code in p7ioc and p8 root complex */
454static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 466static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
455{ 467{
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 48dc4bb856a1..25d76c4df50b 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -86,6 +86,7 @@ struct pnv_phb {
86 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); 86 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
87 void (*fixup_phb)(struct pci_controller *hose); 87 void (*fixup_phb)(struct pci_controller *hose);
88 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); 88 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
89 void (*shutdown)(struct pnv_phb *phb);
89 90
90 union { 91 union {
91 struct { 92 struct {
@@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np);
158extern void pnv_pci_init_ioda2_phb(struct device_node *np); 159extern void pnv_pci_init_ioda2_phb(struct device_node *np);
159extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, 160extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
160 u64 *startp, u64 *endp); 161 u64 *startp, u64 *endp);
162
161#endif /* __POWERNV_PCI_H */ 163#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 8a9df7f9667e..a1c6f83fc391 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { }
9 9
10#ifdef CONFIG_PCI 10#ifdef CONFIG_PCI
11extern void pnv_pci_init(void); 11extern void pnv_pci_init(void);
12extern void pnv_pci_shutdown(void);
12#else 13#else
13static inline void pnv_pci_init(void) { } 14static inline void pnv_pci_init(void) { }
15static inline void pnv_pci_shutdown(void) { }
14#endif 16#endif
15 17
16#endif /* _POWERNV_H */ 18#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index db1ad1c8f68f..d4459bfc92f7 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m)
78 if (root) 78 if (root)
79 model = of_get_property(root, "model", NULL); 79 model = of_get_property(root, "model", NULL);
80 seq_printf(m, "machine\t\t: PowerNV %s\n", model); 80 seq_printf(m, "machine\t\t: PowerNV %s\n", model);
81 if (firmware_has_feature(FW_FEATURE_OPALv2)) 81 if (firmware_has_feature(FW_FEATURE_OPALv3))
82 seq_printf(m, "firmware\t: OPAL v3\n");
83 else if (firmware_has_feature(FW_FEATURE_OPALv2))
82 seq_printf(m, "firmware\t: OPAL v2\n"); 84 seq_printf(m, "firmware\t: OPAL v2\n");
83 else if (firmware_has_feature(FW_FEATURE_OPAL)) 85 else if (firmware_has_feature(FW_FEATURE_OPAL))
84 seq_printf(m, "firmware\t: OPAL v1\n"); 86 seq_printf(m, "firmware\t: OPAL v1\n");
@@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex)
126{ 128{
127} 129}
128 130
131static void pnv_shutdown(void)
132{
133 /* Let the PCI code clear up IODA tables */
134 pnv_pci_shutdown();
135
136 /* And unregister all OPAL interrupts so they don't fire
137 * up while we kexec
138 */
139 opal_shutdown();
140}
141
129#ifdef CONFIG_KEXEC 142#ifdef CONFIG_KEXEC
130static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) 143static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
131{ 144{
@@ -187,6 +200,7 @@ define_machine(powernv) {
187 .init_IRQ = pnv_init_IRQ, 200 .init_IRQ = pnv_init_IRQ,
188 .show_cpuinfo = pnv_show_cpuinfo, 201 .show_cpuinfo = pnv_show_cpuinfo,
189 .progress = pnv_progress, 202 .progress = pnv_progress,
203 .machine_shutdown = pnv_shutdown,
190 .power_save = power7_idle, 204 .power_save = power7_idle,
191 .calibrate_decr = generic_calibrate_decr, 205 .calibrate_decr = generic_calibrate_decr,
192#ifdef CONFIG_KEXEC 206#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 6a3ecca5b725..88c9459c3e07 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr)
71 71
72 BUG_ON(nr < 0 || nr >= NR_CPUS); 72 BUG_ON(nr < 0 || nr >= NR_CPUS);
73 73
74 /* On OPAL v2 the CPU are still spinning inside OPAL itself, 74 /*
75 * get them back now 75 * If we already started or OPALv2 is not supported, we just
76 * kick the CPU via the PACA
76 */ 77 */
77 if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) { 78 if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
78 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); 79 goto kick;
79 rc = opal_start_cpu(pcpu, start_here); 80
81 /*
82 * At this point, the CPU can either be spinning on the way in
83 * from kexec or be inside OPAL waiting to be started for the
84 * first time. OPAL v3 allows us to query OPAL to know if it
85 * has the CPUs, so we do that
86 */
87 if (firmware_has_feature(FW_FEATURE_OPALv3)) {
88 uint8_t status;
89
90 rc = opal_query_cpu_status(pcpu, &status);
80 if (rc != OPAL_SUCCESS) { 91 if (rc != OPAL_SUCCESS) {
81 pr_warn("OPAL Error %ld starting CPU %d\n", 92 pr_warn("OPAL Error %ld querying CPU %d state\n",
82 rc, nr); 93 rc, nr);
83 return -ENODEV; 94 return -ENODEV;
84 } 95 }
96
97 /*
98 * Already started, just kick it, probably coming from
99 * kexec and spinning
100 */
101 if (status == OPAL_THREAD_STARTED)
102 goto kick;
103
104 /*
105 * Available/inactive, let's kick it
106 */
107 if (status == OPAL_THREAD_INACTIVE) {
108 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
109 nr, pcpu);
110 rc = opal_start_cpu(pcpu, start_here);
111 if (rc != OPAL_SUCCESS) {
112 pr_warn("OPAL Error %ld starting CPU %d\n",
113 rc, nr);
114 return -ENODEV;
115 }
116 } else {
117 /*
118 * An unavailable CPU (or any other unknown status)
119 * shouldn't be started. It should also
120 * not be in the possible map but currently it can
121 * happen
122 */
123 pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
124 " (status %d)...\n", nr, pcpu, status);
125 return -ENODEV;
126 }
127 } else {
128 /*
129 * On OPAL v2, we just kick it and hope for the best,
130 * we must not test the error from opal_start_cpu() or
131 * we would fail to get CPUs from kexec.
132 */
133 opal_start_cpu(pcpu, start_here);
85 } 134 }
135 kick:
86 return smp_generic_kick_cpu(nr); 136 return smp_generic_kick_cpu(nr);
87} 137}
88 138
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 9a0941bc4d31..023b288f895b 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -18,6 +18,7 @@ config PPC_PSERIES
18 select PPC_PCI_CHOICE if EXPERT 18 select PPC_PCI_CHOICE if EXPERT
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING
21 default y 22 default y
22 23
23config PPC_SPLPAR 24config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 47226e04126d..5f997e79d570 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -16,6 +16,7 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19#include <linux/cpu.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/suspend.h> 21#include <linux/suspend.h>
21#include <linux/stat.h> 22#include <linux/stat.h>
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
126 struct device_attribute *attr, 127 struct device_attribute *attr,
127 const char *buf, size_t count) 128 const char *buf, size_t count)
128{ 129{
130 cpumask_var_t offline_mask;
129 int rc; 131 int rc;
130 132
131 if (!capable(CAP_SYS_ADMIN)) 133 if (!capable(CAP_SYS_ADMIN))
132 return -EPERM; 134 return -EPERM;
133 135
136 if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
137 return -ENOMEM;
138
134 stream_id = simple_strtoul(buf, NULL, 16); 139 stream_id = simple_strtoul(buf, NULL, 16);
135 140
136 do { 141 do {
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
140 } while (rc == -EAGAIN); 145 } while (rc == -EAGAIN);
141 146
142 if (!rc) { 147 if (!rc) {
148 /* All present CPUs must be online */
149 cpumask_andnot(offline_mask, cpu_present_mask,
150 cpu_online_mask);
151 rc = rtas_online_cpus_mask(offline_mask);
152 if (rc) {
153 pr_err("%s: Could not bring present CPUs online.\n",
154 __func__);
155 goto out;
156 }
157
143 stop_topology_update(); 158 stop_topology_update();
144 rc = pm_suspend(PM_SUSPEND_MEM); 159 rc = pm_suspend(PM_SUSPEND_MEM);
145 start_topology_update(); 160 start_topology_update();
161
162 /* Take down CPUs not online prior to suspend */
163 if (!rtas_offline_cpus_mask(offline_mask))
164 pr_warn("%s: Could not restore CPUs to offline "
165 "state.\n", __func__);
146 } 166 }
147 167
148 stream_id = 0; 168 stream_id = 0;
149 169
150 if (!rc) 170 if (!rc)
151 rc = count; 171 rc = count;
172out:
173 free_cpumask_var(offline_mask);
152 return rc; 174 return rc;
153} 175}
154 176
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 97fe82ee8633..2d3b1dd9571d 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); 361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
362 wsp_ics_set_xive(ics, hw_irq, xive); 362 wsp_ics_set_xive(ics, hw_irq, xive);
363 363
364 return 0; 364 return IRQ_SET_MASK_OK;
365} 365}
366 366
367static struct irq_chip wsp_irq_chip = { 367static struct irq_chip wsp_irq_chip = {
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index b0a518e97599..99464a7bdb3b 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -64,6 +64,8 @@ endif
64 64
65obj-$(CONFIG_PPC_SCOM) += scom.o 65obj-$(CONFIG_PPC_SCOM) += scom.o
66 66
67obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
68
67subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror 69subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
68 70
69obj-$(CONFIG_PPC_XICS) += xics/ 71obj-$(CONFIG_PPC_XICS) += xics/
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 6e0e1005227f..9cd0e60716fe 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
81 ev_int_set_config(src, config, prio, cpuid); 81 ev_int_set_config(src, config, prio, cpuid);
82 spin_unlock_irqrestore(&ehv_pic_lock, flags); 82 spin_unlock_irqrestore(&ehv_pic_lock, flags);
83 83
84 return 0; 84 return IRQ_SET_MASK_OK;
85} 85}
86 86
87static unsigned int ehv_pic_type_to_vecpri(unsigned int type) 87static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index ee21b5e71aec..0a13ecb270c7 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
836 mpic_physmask(mask)); 836 mpic_physmask(mask));
837 } 837 }
838 838
839 return 0; 839 return IRQ_SET_MASK_OK;
840} 840}
841 841
842static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) 842static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
new file mode 100644
index 000000000000..ce5a7b489e4b
--- /dev/null
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -0,0 +1,105 @@
1/*
2 * A udbg backend which logs messages and reads input from in memory
3 * buffers.
4 *
5 * The console output can be read from memcons_output which is a
6 * circular buffer whose next write position is stored in memcons.output_pos.
7 *
8 * Input may be passed by writing into the memcons_input buffer when it is
9 * empty. The input buffer is empty when both input_pos == input_start and
10 * *input_start == '\0'.
11 *
12 * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
13 * Copyright (C) 2013 Alistair Popple, IBM Corp
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <asm/barrier.h>
24#include <asm/page.h>
25#include <asm/processor.h>
26#include <asm/udbg.h>
27
28struct memcons {
29 char *output_start;
30 char *output_pos;
31 char *output_end;
32 char *input_start;
33 char *input_pos;
34 char *input_end;
35};
36
37static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
38static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
39
40struct memcons memcons = {
41 .output_start = memcons_output,
42 .output_pos = memcons_output,
43 .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
44 .input_start = memcons_input,
45 .input_pos = memcons_input,
46 .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
47};
48
49void memcons_putc(char c)
50{
51 char *new_output_pos;
52
53 *memcons.output_pos = c;
54 wmb();
55 new_output_pos = memcons.output_pos + 1;
56 if (new_output_pos >= memcons.output_end)
57 new_output_pos = memcons.output_start;
58
59 memcons.output_pos = new_output_pos;
60}
61
62int memcons_getc_poll(void)
63{
64 char c;
65 char *new_input_pos;
66
67 if (*memcons.input_pos) {
68 c = *memcons.input_pos;
69
70 new_input_pos = memcons.input_pos + 1;
71 if (new_input_pos >= memcons.input_end)
72 new_input_pos = memcons.input_start;
73 else if (*new_input_pos == '\0')
74 new_input_pos = memcons.input_start;
75
76 *memcons.input_pos = '\0';
77 wmb();
78 memcons.input_pos = new_input_pos;
79 return c;
80 }
81
82 return -1;
83}
84
85int memcons_getc(void)
86{
87 int c;
88
89 while (1) {
90 c = memcons_getc_poll();
91 if (c == -1)
92 cpu_relax();
93 else
94 break;
95 }
96
97 return c;
98}
99
100void udbg_init_memcons(void)
101{
102 udbg_putc = memcons_putc;
103 udbg_getc = memcons_getc;
104 udbg_getc_poll = memcons_getc_poll;
105}
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index f7e8609df0d5..39d72212655e 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
148 __func__, d->irq, hw_irq, server, rc); 148 __func__, d->irq, hw_irq, server, rc);
149 return -1; 149 return -1;
150 } 150 }
151 return 0; 151 return IRQ_SET_MASK_OK;
152} 152}
153 153
154static struct irq_chip ics_opal_irq_chip = { 154static struct irq_chip ics_opal_irq_chip = {