aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/cputable.h17
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h16
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h11
-rw-r--r--arch/powerpc/include/asm/processor.h13
-rw-r--r--arch/powerpc/include/asm/reg.h11
-rw-r--r--arch/powerpc/include/asm/signal.h3
-rw-r--r--arch/powerpc/include/asm/tm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h18
-rw-r--r--arch/powerpc/kernel/cputable.c6
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S92
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c18
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/kernel/signal.c40
-rw-r--r--arch/powerpc/kernel/signal.h2
-rw-r--r--arch/powerpc/kernel/signal_32.c10
-rw-r--r--arch/powerpc/kernel/signal_64.c23
-rw-r--r--arch/powerpc/kernel/traps.c39
-rw-r--r--arch/powerpc/kvm/44x_tlb.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c29
-rw-r--r--arch/powerpc/kvm/booke.c18
-rw-r--r--arch/powerpc/kvm/e500_mmu.c5
-rw-r--r--arch/powerpc/kvm/e500mc.c2
-rw-r--r--arch/powerpc/lib/copypage_power7.S19
-rw-r--r--arch/powerpc/lib/copyuser_power7.S12
-rw-r--r--arch/powerpc/mm/hash_native_64.c30
-rw-r--r--arch/powerpc/perf/core-book3s.c69
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c12
-rw-r--r--arch/powerpc/sysdev/mpic.c4
37 files changed, 337 insertions, 245 deletions
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 26807e5aff51..6f3887d884d2 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -176,6 +176,7 @@ extern const char *powerpc_base_platform;
176#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) 176#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
177#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) 177#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
178#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) 178#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
179#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
179 180
180#ifndef __ASSEMBLY__ 181#ifndef __ASSEMBLY__
181 182
@@ -394,19 +395,20 @@ extern const char *powerpc_base_platform;
394 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ 395 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
395 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ 396 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
396 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ 397 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
397 CPU_FTR_HVMODE) 398 CPU_FTR_HVMODE | CPU_FTR_DABRX)
398#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 399#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
399 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 400 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
400 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 401 CPU_FTR_MMCRA | CPU_FTR_SMT | \
401 CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ 402 CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
402 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) 403 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
403#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 404#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
404 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 405 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
405 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 406 CPU_FTR_MMCRA | CPU_FTR_SMT | \
406 CPU_FTR_COHERENT_ICACHE | \ 407 CPU_FTR_COHERENT_ICACHE | \
407 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 408 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
408 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ 409 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
409 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) 410 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
411 CPU_FTR_DABRX)
410#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 412#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
411 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ 413 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
412 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 414 CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -415,7 +417,7 @@ extern const char *powerpc_base_platform;
415 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 417 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
416 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 418 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
417 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ 419 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
418 CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR) 420 CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
419#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 421#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
420 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ 422 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
421 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 423 CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -430,14 +432,15 @@ extern const char *powerpc_base_platform;
430 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 432 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
431 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 433 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
432 CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ 434 CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
433 CPU_FTR_UNALIGNED_LD_STD) 435 CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
434#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 436#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
435 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ 437 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
436 CPU_FTR_PURR | CPU_FTR_REAL_LE) 438 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
437#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) 439#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
438 440
439#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ 441#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
440 CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) 442 CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
443 CPU_FTR_ICSWX | CPU_FTR_DABRX )
441 444
442#ifdef __powerpc64__ 445#ifdef __powerpc64__
443#ifdef CONFIG_PPC_BOOK3E 446#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8e5fae8beaf6..46793b58a761 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -513,7 +513,7 @@ label##_common: \
513 */ 513 */
514#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ 514#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
515 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ 515 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
516 FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) 516 FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
517 517
518/* 518/*
519 * When the idle code in power4_idle puts the CPU into NAP mode, 519 * When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index cf4df8e2139a..0c7f2bfcf134 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -264,6 +264,7 @@
264#define H_GET_MPP 0x2D4 264#define H_GET_MPP 0x2D4
265#define H_HOME_NODE_ASSOCIATIVITY 0x2EC 265#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
266#define H_BEST_ENERGY 0x2F4 266#define H_BEST_ENERGY 0x2F4
267#define H_XIRR_X 0x2FC
267#define H_RANDOM 0x300 268#define H_RANDOM 0x300
268#define H_COP 0x304 269#define H_COP 0x304
269#define H_GET_MPP_X 0x314 270#define H_GET_MPP_X 0x314
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index b9dd382cb349..851bac7afa4b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -54,8 +54,16 @@
54#define BOOKE_INTERRUPT_DEBUG 15 54#define BOOKE_INTERRUPT_DEBUG 15
55 55
56/* E500 */ 56/* E500 */
57#define BOOKE_INTERRUPT_SPE_UNAVAIL 32 57#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
58#define BOOKE_INTERRUPT_SPE_FP_DATA 33 58#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
59/*
60 * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
61 */
62#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
63#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
64#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
65#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
66 BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
59#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 67#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
60#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 68#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
61#define BOOKE_INTERRUPT_DOORBELL 36 69#define BOOKE_INTERRUPT_DOORBELL 36
@@ -67,10 +75,6 @@
67#define BOOKE_INTERRUPT_HV_SYSCALL 40 75#define BOOKE_INTERRUPT_HV_SYSCALL 40
68#define BOOKE_INTERRUPT_HV_PRIV 41 76#define BOOKE_INTERRUPT_HV_PRIV 41
69 77
70/* altivec */
71#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
72#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
73
74/* book3s */ 78/* book3s */
75 79
76#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 80#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index cea8496091ff..2f1b6c5f8174 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
523#define PPC440EP_ERR42 523#define PPC440EP_ERR42
524#endif 524#endif
525 525
526/* The following stops all load and store data streams associated with stream
527 * ID (ie. streams created explicitly). The embedded and server mnemonics for
528 * dcbt are different so we use machine "power4" here explicitly.
529 */
530#define DCBT_STOP_ALL_STREAM_IDS(scratch) \
531.machine push ; \
532.machine "power4" ; \
533 lis scratch,0x60000000@h; \
534 dcbt r0,scratch,0b01010; \
535.machine pop
536
526/* 537/*
527 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them 538 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
528 * keep the address intact to be compatible with code shared with 539 * keep the address intact to be compatible with code shared with
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 594db6bc093c..14a658363698 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -409,21 +409,16 @@ static inline void prefetchw(const void *x)
409#endif 409#endif
410 410
411#ifdef CONFIG_PPC64 411#ifdef CONFIG_PPC64
412static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) 412static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
413{ 413{
414 unsigned long sp;
415
416 if (is_32) 414 if (is_32)
417 sp = regs->gpr[1] & 0x0ffffffffUL; 415 return sp & 0x0ffffffffUL;
418 else
419 sp = regs->gpr[1];
420
421 return sp; 416 return sp;
422} 417}
423#else 418#else
424static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) 419static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
425{ 420{
426 return regs->gpr[1]; 421 return sp;
427} 422}
428#endif 423#endif
429 424
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a6136515c7f2..4a9e408644fe 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -111,17 +111,6 @@
111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) 111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) 112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
113 113
114/* Reason codes describing kernel causes for transaction aborts. By
115 convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
116 the failure is persistent.
117*/
118#define TM_CAUSE_RESCHED 0xfe
119#define TM_CAUSE_TLBI 0xfc
120#define TM_CAUSE_FAC_UNAV 0xfa
121#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */
122#define TM_CAUSE_MISC 0xf6
123#define TM_CAUSE_SIGNAL 0xf4
124
125#if defined(CONFIG_PPC_BOOK3S_64) 114#if defined(CONFIG_PPC_BOOK3S_64)
126#define MSR_64BIT MSR_SF 115#define MSR_64BIT MSR_SF
127 116
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index fbe66c463891..9322c28aebd2 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -3,5 +3,8 @@
3 3
4#define __ARCH_HAS_SA_RESTORER 4#define __ARCH_HAS_SA_RESTORER
5#include <uapi/asm/signal.h> 5#include <uapi/asm/signal.h>
6#include <uapi/asm/ptrace.h>
7
8extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
6 9
7#endif /* _ASM_POWERPC_SIGNAL_H */ 10#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 4b4449abf3f8..9dfbc34bdbf5 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -5,6 +5,8 @@
5 * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. 5 * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
6 */ 6 */
7 7
8#include <uapi/asm/tm.h>
9
8#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 10#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
9extern void do_load_up_transact_fpu(struct thread_struct *thread); 11extern void do_load_up_transact_fpu(struct thread_struct *thread);
10extern void do_load_up_transact_altivec(struct thread_struct *thread); 12extern void do_load_up_transact_altivec(struct thread_struct *thread);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f7bca6370745..5182c8622b54 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -40,6 +40,7 @@ header-y += statfs.h
40header-y += swab.h 40header-y += swab.h
41header-y += termbits.h 41header-y += termbits.h
42header-y += termios.h 42header-y += termios.h
43header-y += tm.h
43header-y += types.h 44header-y += types.h
44header-y += ucontext.h 45header-y += ucontext.h
45header-y += unistd.h 46header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644
index 000000000000..85059a00f560
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_POWERPC_TM_H
2#define _ASM_POWERPC_TM_H
3
4/* Reason codes describing kernel causes for transaction aborts. By
5 * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
6 * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
7 */
8#define TM_CAUSE_PERSISTENT 0x01
9#define TM_CAUSE_RESCHED 0xde
10#define TM_CAUSE_TLBI 0xdc
11#define TM_CAUSE_FAC_UNAV 0xda
12#define TM_CAUSE_SYSCALL 0xd8 /* future use */
13#define TM_CAUSE_MISC 0xd6 /* future use */
14#define TM_CAUSE_SIGNAL 0xd4
15#define TM_CAUSE_ALIGNMENT 0xd2
16#define TM_CAUSE_EMULATE 0xd0
17
18#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c60bbec25c1f..2a45d0f04385 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -452,7 +452,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
452 .mmu_features = MMU_FTRS_POWER8, 452 .mmu_features = MMU_FTRS_POWER8,
453 .icache_bsize = 128, 453 .icache_bsize = 128,
454 .dcache_bsize = 128, 454 .dcache_bsize = 128,
455 .oprofile_type = PPC_OPROFILE_POWER4, 455 .oprofile_type = PPC_OPROFILE_INVALID,
456 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 456 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
457 .cpu_setup = __setup_cpu_power8, 457 .cpu_setup = __setup_cpu_power8,
458 .cpu_restore = __restore_cpu_power8, 458 .cpu_restore = __restore_cpu_power8,
@@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
482 .cpu_name = "POWER7+ (raw)", 482 .cpu_name = "POWER7+ (raw)",
483 .cpu_features = CPU_FTRS_POWER7, 483 .cpu_features = CPU_FTRS_POWER7,
484 .cpu_user_features = COMMON_USER_POWER7, 484 .cpu_user_features = COMMON_USER_POWER7,
485 .cpu_user_features = COMMON_USER2_POWER7, 485 .cpu_user_features2 = COMMON_USER2_POWER7,
486 .mmu_features = MMU_FTRS_POWER7, 486 .mmu_features = MMU_FTRS_POWER7,
487 .icache_bsize = 128, 487 .icache_bsize = 128,
488 .dcache_bsize = 128, 488 .dcache_bsize = 128,
@@ -507,7 +507,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
507 .num_pmcs = 6, 507 .num_pmcs = 6,
508 .pmc_type = PPC_PMC_IBM, 508 .pmc_type = PPC_PMC_IBM,
509 .oprofile_cpu_type = "ppc64/power8", 509 .oprofile_cpu_type = "ppc64/power8",
510 .oprofile_type = PPC_OPROFILE_POWER4, 510 .oprofile_type = PPC_OPROFILE_INVALID,
511 .cpu_setup = __setup_cpu_power8, 511 .cpu_setup = __setup_cpu_power8,
512 .cpu_restore = __restore_cpu_power8, 512 .cpu_restore = __restore_cpu_power8,
513 .platform = "power8", 513 .platform = "power8",
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index d22e73e4618b..22b45a4955cd 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -849,7 +849,7 @@ resume_kernel:
849 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ 849 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
850 CURRENT_THREAD_INFO(r9, r1) 850 CURRENT_THREAD_INFO(r9, r1)
851 lwz r8,TI_FLAGS(r9) 851 lwz r8,TI_FLAGS(r9)
852 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h 852 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
853 beq+ 1f 853 beq+ 1f
854 854
855 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 855 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0e9095e47b5b..8741c854e03d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -465,20 +465,6 @@ BEGIN_FTR_SECTION
465 std r0, THREAD_EBBHR(r3) 465 std r0, THREAD_EBBHR(r3)
466 mfspr r0, SPRN_EBBRR 466 mfspr r0, SPRN_EBBRR
467 std r0, THREAD_EBBRR(r3) 467 std r0, THREAD_EBBRR(r3)
468
469 /* PMU registers made user read/(write) by EBB */
470 mfspr r0, SPRN_SIAR
471 std r0, THREAD_SIAR(r3)
472 mfspr r0, SPRN_SDAR
473 std r0, THREAD_SDAR(r3)
474 mfspr r0, SPRN_SIER
475 std r0, THREAD_SIER(r3)
476 mfspr r0, SPRN_MMCR0
477 std r0, THREAD_MMCR0(r3)
478 mfspr r0, SPRN_MMCR2
479 std r0, THREAD_MMCR2(r3)
480 mfspr r0, SPRN_MMCRA
481 std r0, THREAD_MMCRA(r3)
482END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 468END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
483#endif 469#endif
484 470
@@ -501,6 +487,13 @@ BEGIN_FTR_SECTION
501 ldarx r6,0,r1 487 ldarx r6,0,r1
502END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) 488END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
503 489
490#ifdef CONFIG_PPC_BOOK3S
491/* Cancel all explict user streams as they will have no use after context
492 * switch and will stop the HW from creating streams itself
493 */
494 DCBT_STOP_ALL_STREAM_IDS(r6)
495#endif
496
504 addi r6,r4,-THREAD /* Convert THREAD to 'current' */ 497 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
505 std r6,PACACURRENT(r13) /* Set new 'current' */ 498 std r6,PACACURRENT(r13) /* Set new 'current' */
506 499
@@ -574,20 +567,6 @@ BEGIN_FTR_SECTION
574 ld r0, THREAD_EBBRR(r4) 567 ld r0, THREAD_EBBRR(r4)
575 mtspr SPRN_EBBRR, r0 568 mtspr SPRN_EBBRR, r0
576 569
577 /* PMU registers made user read/(write) by EBB */
578 ld r0, THREAD_SIAR(r4)
579 mtspr SPRN_SIAR, r0
580 ld r0, THREAD_SDAR(r4)
581 mtspr SPRN_SDAR, r0
582 ld r0, THREAD_SIER(r4)
583 mtspr SPRN_SIER, r0
584 ld r0, THREAD_MMCR0(r4)
585 mtspr SPRN_MMCR0, r0
586 ld r0, THREAD_MMCR2(r4)
587 mtspr SPRN_MMCR2, r0
588 ld r0, THREAD_MMCRA(r4)
589 mtspr SPRN_MMCRA, r0
590
591 ld r0,THREAD_TAR(r4) 570 ld r0,THREAD_TAR(r4)
592 mtspr SPRN_TAR,r0 571 mtspr SPRN_TAR,r0
593END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 572END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e6eba1bf61ad..40e4a17c8ba0 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -454,38 +454,14 @@ BEGIN_FTR_SECTION
454 xori r10,r10,(MSR_FE0|MSR_FE1) 454 xori r10,r10,(MSR_FE0|MSR_FE1)
455 mtmsrd r10 455 mtmsrd r10
456 sync 456 sync
457 fmr 0,0 457
458 fmr 1,1 458#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
459 fmr 2,2 459#define FMR4(n) FMR2(n) ; FMR2(n+2)
460 fmr 3,3 460#define FMR8(n) FMR4(n) ; FMR4(n+4)
461 fmr 4,4 461#define FMR16(n) FMR8(n) ; FMR8(n+8)
462 fmr 5,5 462#define FMR32(n) FMR16(n) ; FMR16(n+16)
463 fmr 6,6 463 FMR32(0)
464 fmr 7,7 464
465 fmr 8,8
466 fmr 9,9
467 fmr 10,10
468 fmr 11,11
469 fmr 12,12
470 fmr 13,13
471 fmr 14,14
472 fmr 15,15
473 fmr 16,16
474 fmr 17,17
475 fmr 18,18
476 fmr 19,19
477 fmr 20,20
478 fmr 21,21
479 fmr 22,22
480 fmr 23,23
481 fmr 24,24
482 fmr 25,25
483 fmr 26,26
484 fmr 27,27
485 fmr 28,28
486 fmr 29,29
487 fmr 30,30
488 fmr 31,31
489FTR_SECTION_ELSE 465FTR_SECTION_ELSE
490/* 466/*
491 * To denormalise we need to move a copy of the register to itself. 467 * To denormalise we need to move a copy of the register to itself.
@@ -495,39 +471,25 @@ FTR_SECTION_ELSE
495 oris r10,r10,MSR_VSX@h 471 oris r10,r10,MSR_VSX@h
496 mtmsrd r10 472 mtmsrd r10
497 sync 473 sync
498 XVCPSGNDP(0,0,0) 474
499 XVCPSGNDP(1,1,1) 475#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
500 XVCPSGNDP(2,2,2) 476#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
501 XVCPSGNDP(3,3,3) 477#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
502 XVCPSGNDP(4,4,4) 478#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
503 XVCPSGNDP(5,5,5) 479#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
504 XVCPSGNDP(6,6,6) 480 XVCPSGNDP32(0)
505 XVCPSGNDP(7,7,7) 481
506 XVCPSGNDP(8,8,8)
507 XVCPSGNDP(9,9,9)
508 XVCPSGNDP(10,10,10)
509 XVCPSGNDP(11,11,11)
510 XVCPSGNDP(12,12,12)
511 XVCPSGNDP(13,13,13)
512 XVCPSGNDP(14,14,14)
513 XVCPSGNDP(15,15,15)
514 XVCPSGNDP(16,16,16)
515 XVCPSGNDP(17,17,17)
516 XVCPSGNDP(18,18,18)
517 XVCPSGNDP(19,19,19)
518 XVCPSGNDP(20,20,20)
519 XVCPSGNDP(21,21,21)
520 XVCPSGNDP(22,22,22)
521 XVCPSGNDP(23,23,23)
522 XVCPSGNDP(24,24,24)
523 XVCPSGNDP(25,25,25)
524 XVCPSGNDP(26,26,26)
525 XVCPSGNDP(27,27,27)
526 XVCPSGNDP(28,28,28)
527 XVCPSGNDP(29,29,29)
528 XVCPSGNDP(30,30,30)
529 XVCPSGNDP(31,31,31)
530ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 482ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
483
484BEGIN_FTR_SECTION
485 b denorm_done
486END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
487/*
488 * To denormalise we need to move a copy of the register to itself.
489 * For POWER8 we need to do that for all 64 VSX registers
490 */
491 XVCPSGNDP32(32)
492denorm_done:
531 mtspr SPRN_HSRR0,r11 493 mtspr SPRN_HSRR0,r11
532 mtcrf 0x80,r9 494 mtcrf 0x80,r9
533 ld r9,PACA_EXGEN+EX_R9(r13) 495 ld r9,PACA_EXGEN+EX_R9(r13)
@@ -721,7 +683,7 @@ machine_check_common:
721 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 683 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
722 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 684 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
723 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 685 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
724 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) 686 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
725 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 687 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
726#ifdef CONFIG_PPC_DOORBELL 688#ifdef CONFIG_PPC_DOORBELL
727 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) 689 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5cbcf4d5a808..ea185e0b3cae 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
162 * in case we also had a rollover while hard disabled 162 * in case we also had a rollover while hard disabled
163 */ 163 */
164 local_paca->irq_happened &= ~PACA_IRQ_DEC; 164 local_paca->irq_happened &= ~PACA_IRQ_DEC;
165 if (decrementer_check_overflow()) 165 if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
166 return 0x900; 166 return 0x900;
167 167
168 /* Finally check if an external interrupt happened */ 168 /* Finally check if an external interrupt happened */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e9acf50dd5b2..eabeec991016 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -657,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
657 * ranges. However, some machines (thanks Apple !) tend to split their 657 * ranges. However, some machines (thanks Apple !) tend to split their
658 * space into lots of small contiguous ranges. So we have to coalesce. 658 * space into lots of small contiguous ranges. So we have to coalesce.
659 * 659 *
660 * - We can only cope with all memory ranges having the same offset
661 * between CPU addresses and PCI addresses. Unfortunately, some bridges
662 * are setup for a large 1:1 mapping along with a small "window" which
663 * maps PCI address 0 to some arbitrary high address of the CPU space in
664 * order to give access to the ISA memory hole.
665 * The way out of here that I've chosen for now is to always set the
666 * offset based on the first resource found, then override it if we
667 * have a different offset and the previous was set by an ISA hole.
668 *
669 * - Some busses have IO space not starting at 0, which causes trouble with 660 * - Some busses have IO space not starting at 0, which causes trouble with
670 * the way we do our IO resource renumbering. The code somewhat deals with 661 * the way we do our IO resource renumbering. The code somewhat deals with
671 * it for 64 bits but I would expect problems on 32 bits. 662 * it for 64 bits but I would expect problems on 32 bits.
@@ -680,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
680 int rlen; 671 int rlen;
681 int pna = of_n_addr_cells(dev); 672 int pna = of_n_addr_cells(dev);
682 int np = pna + 5; 673 int np = pna + 5;
683 int memno = 0, isa_hole = -1; 674 int memno = 0;
684 u32 pci_space; 675 u32 pci_space;
685 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; 676 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
686 unsigned long long isa_mb = 0;
687 struct resource *res; 677 struct resource *res;
688 678
689 printk(KERN_INFO "PCI host bridge %s %s ranges:\n", 679 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
@@ -777,8 +767,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
777 } 767 }
778 /* Handles ISA memory hole space here */ 768 /* Handles ISA memory hole space here */
779 if (pci_addr == 0) { 769 if (pci_addr == 0) {
780 isa_mb = cpu_addr;
781 isa_hole = memno;
782 if (primary || isa_mem_base == 0) 770 if (primary || isa_mem_base == 0)
783 isa_mem_base = cpu_addr; 771 isa_mem_base = cpu_addr;
784 hose->isa_mem_phys = cpu_addr; 772 hose->isa_mem_phys = cpu_addr;
@@ -839,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
839 } 827 }
840 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 828 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
841 struct resource *res = dev->resource + i; 829 struct resource *res = dev->resource + i;
830 struct pci_bus_region reg;
842 if (!res->flags) 831 if (!res->flags)
843 continue; 832 continue;
844 833
@@ -847,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
847 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set 836 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
848 * since in that case, we don't want to re-assign anything 837 * since in that case, we don't want to re-assign anything
849 */ 838 */
839 pcibios_resource_to_bus(dev, &reg, res);
850 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || 840 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
851 (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { 841 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
852 /* Only print message if not re-assigning */ 842 /* Only print message if not re-assigning */
853 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) 843 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
854 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " 844 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a902723fdc69..076d1242507a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
399static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 399static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
400{ 400{
401 mtspr(SPRN_DABR, dabr); 401 mtspr(SPRN_DABR, dabr);
402 mtspr(SPRN_DABRX, dabrx); 402 if (cpu_has_feature(CPU_FTR_DABRX))
403 mtspr(SPRN_DABRX, dabrx);
403 return 0; 404 return 0;
404} 405}
405#else 406#else
@@ -1368,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1368 1369
1369#ifdef CONFIG_PPC64 1370#ifdef CONFIG_PPC64
1370/* Called with hard IRQs off */ 1371/* Called with hard IRQs off */
1371void __ppc64_runlatch_on(void) 1372void notrace __ppc64_runlatch_on(void)
1372{ 1373{
1373 struct thread_info *ti = current_thread_info(); 1374 struct thread_info *ti = current_thread_info();
1374 unsigned long ctrl; 1375 unsigned long ctrl;
@@ -1381,7 +1382,7 @@ void __ppc64_runlatch_on(void)
1381} 1382}
1382 1383
1383/* Called with hard IRQs off */ 1384/* Called with hard IRQs off */
1384void __ppc64_runlatch_off(void) 1385void notrace __ppc64_runlatch_off(void)
1385{ 1386{
1386 struct thread_info *ti = current_thread_info(); 1387 struct thread_info *ti = current_thread_info();
1387 unsigned long ctrl; 1388 unsigned long ctrl;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 577a8aa69c6e..457e97aa2945 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -18,6 +18,7 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/unistd.h> 19#include <asm/unistd.h>
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/tm.h>
21 22
22#include "signal.h" 23#include "signal.h"
23 24
@@ -30,13 +31,13 @@ int show_unhandled_signals = 1;
30/* 31/*
31 * Allocate space for the signal frame 32 * Allocate space for the signal frame
32 */ 33 */
33void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 34void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
34 size_t frame_size, int is_32) 35 size_t frame_size, int is_32)
35{ 36{
36 unsigned long oldsp, newsp; 37 unsigned long oldsp, newsp;
37 38
38 /* Default to using normal stack */ 39 /* Default to using normal stack */
39 oldsp = get_clean_sp(regs, is_32); 40 oldsp = get_clean_sp(sp, is_32);
40 41
41 /* Check for alt stack */ 42 /* Check for alt stack */
42 if ((ka->sa.sa_flags & SA_ONSTACK) && 43 if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -175,3 +176,38 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
175 176
176 user_enter(); 177 user_enter();
177} 178}
179
180unsigned long get_tm_stackpointer(struct pt_regs *regs)
181{
182 /* When in an active transaction that takes a signal, we need to be
183 * careful with the stack. It's possible that the stack has moved back
184 * up after the tbegin. The obvious case here is when the tbegin is
185 * called inside a function that returns before a tend. In this case,
186 * the stack is part of the checkpointed transactional memory state.
187 * If we write over this non transactionally or in suspend, we are in
188 * trouble because if we get a tm abort, the program counter and stack
189 * pointer will be back at the tbegin but our in memory stack won't be
190 * valid anymore.
191 *
192 * To avoid this, when taking a signal in an active transaction, we
193 * need to use the stack pointer from the checkpointed state, rather
194 * than the speculated state. This ensures that the signal context
195 * (written tm suspended) will be written below the stack required for
196 * the rollback. The transaction is aborted becuase of the treclaim,
197 * so any memory written between the tbegin and the signal will be
198 * rolled back anyway.
199 *
200 * For signals taken in non-TM or suspended mode, we use the
201 * normal/non-checkpointed stack pointer.
202 */
203
204#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
205 if (MSR_TM_ACTIVE(regs->msr)) {
206 tm_enable();
207 tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
208 if (MSR_TM_TRANSACTIONAL(regs->msr))
209 return current->thread.ckpt_regs.gpr[1];
210 }
211#endif
212 return regs->gpr[1];
213}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index ec84c901ceab..c69b9aeb9f23 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -12,7 +12,7 @@
12 12
13extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); 13extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
14 14
15extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 15extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
16 size_t frame_size, int is_32); 16 size_t frame_size, int is_32);
17 17
18extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, 18extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 95068bf569ad..201385c3a1ae 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
503{ 503{
504 unsigned long msr = regs->msr; 504 unsigned long msr = regs->msr;
505 505
506 /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
507 * thread.transact_fpr[], thread.transact_vr[], etc.
508 */
509 tm_enable();
510 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
511
512 /* Make sure floating point registers are stored in regs */ 506 /* Make sure floating point registers are stored in regs */
513 flush_fp_to_thread(current); 507 flush_fp_to_thread(current);
514 508
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
965 959
966 /* Set up Signal Frame */ 960 /* Set up Signal Frame */
967 /* Put a Real Time Context onto stack */ 961 /* Put a Real Time Context onto stack */
968 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1); 962 rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
969 addr = rt_sf; 963 addr = rt_sf;
970 if (unlikely(rt_sf == NULL)) 964 if (unlikely(rt_sf == NULL))
971 goto badframe; 965 goto badframe;
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1403 unsigned long tramp; 1397 unsigned long tramp;
1404 1398
1405 /* Set up Signal Frame */ 1399 /* Set up Signal Frame */
1406 frame = get_sigframe(ka, regs, sizeof(*frame), 1); 1400 frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
1407 if (unlikely(frame == NULL)) 1401 if (unlikely(frame == NULL))
1408 goto badframe; 1402 goto badframe;
1409 sc = (struct sigcontext __user *) &frame->sctx; 1403 sc = (struct sigcontext __user *) &frame->sctx;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c1794286098c..345947367ec0 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
154 * As above, but Transactional Memory is in use, so deliver sigcontexts 154 * As above, but Transactional Memory is in use, so deliver sigcontexts
155 * containing checkpointed and transactional register states. 155 * containing checkpointed and transactional register states.
156 * 156 *
157 * To do this, we treclaim to gather both sets of registers and set up the 157 * To do this, we treclaim (done before entering here) to gather both sets of
158 * 'normal' sigcontext registers with rolled-back register values such that a 158 * registers and set up the 'normal' sigcontext registers with rolled-back
159 * simple signal handler sees a correct checkpointed register state. 159 * register values such that a simple signal handler sees a correct
160 * If interested, a TM-aware sighandler can examine the transactional registers 160 * checkpointed register state. If interested, a TM-aware sighandler can
161 * in the 2nd sigcontext to determine the real origin of the signal. 161 * examine the transactional registers in the 2nd sigcontext to determine the
162 * real origin of the signal.
162 */ 163 */
163static long setup_tm_sigcontexts(struct sigcontext __user *sc, 164static long setup_tm_sigcontexts(struct sigcontext __user *sc,
164 struct sigcontext __user *tm_sc, 165 struct sigcontext __user *tm_sc,
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
184 185
185 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); 186 BUG_ON(!MSR_TM_ACTIVE(regs->msr));
186 187
187 /* tm_reclaim rolls back all reg states, saving checkpointed (older)
188 * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
189 * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
190 * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
191 * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the
192 * stack, in *regs.
193 */
194 tm_enable();
195 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
196
197 flush_fp_to_thread(current); 188 flush_fp_to_thread(current);
198 189
199#ifdef CONFIG_ALTIVEC 190#ifdef CONFIG_ALTIVEC
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
711 unsigned long newsp = 0; 702 unsigned long newsp = 0;
712 long err = 0; 703 long err = 0;
713 704
714 frame = get_sigframe(ka, regs, sizeof(*frame), 0); 705 frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
715 if (unlikely(frame == NULL)) 706 if (unlikely(frame == NULL))
716 goto badframe; 707 goto badframe;
717 708
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a7a648f6b750..c0e5caf8ccc7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -53,6 +53,7 @@
53#ifdef CONFIG_PPC64 53#ifdef CONFIG_PPC64
54#include <asm/firmware.h> 54#include <asm/firmware.h>
55#include <asm/processor.h> 55#include <asm/processor.h>
56#include <asm/tm.h>
56#endif 57#endif
57#include <asm/kexec.h> 58#include <asm/kexec.h>
58#include <asm/ppc-opcode.h> 59#include <asm/ppc-opcode.h>
@@ -932,6 +933,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
932 return 0; 933 return 0;
933} 934}
934 935
936#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
937static inline bool tm_abort_check(struct pt_regs *regs, int cause)
938{
939 /* If we're emulating a load/store in an active transaction, we cannot
940 * emulate it as the kernel operates in transaction suspended context.
941 * We need to abort the transaction. This creates a persistent TM
942 * abort so tell the user what caused it with a new code.
943 */
944 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
945 tm_enable();
946 tm_abort(cause);
947 return true;
948 }
949 return false;
950}
951#else
952static inline bool tm_abort_check(struct pt_regs *regs, int reason)
953{
954 return false;
955}
956#endif
957
935static int emulate_instruction(struct pt_regs *regs) 958static int emulate_instruction(struct pt_regs *regs)
936{ 959{
937 u32 instword; 960 u32 instword;
@@ -971,6 +994,9 @@ static int emulate_instruction(struct pt_regs *regs)
971 994
972 /* Emulate load/store string insn. */ 995 /* Emulate load/store string insn. */
973 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 996 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
997 if (tm_abort_check(regs,
998 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
999 return -EINVAL;
974 PPC_WARN_EMULATED(string, regs); 1000 PPC_WARN_EMULATED(string, regs);
975 return emulate_string_inst(regs, instword); 1001 return emulate_string_inst(regs, instword);
976 } 1002 }
@@ -1139,6 +1165,16 @@ bail:
1139 exception_exit(prev_state); 1165 exception_exit(prev_state);
1140} 1166}
1141 1167
1168/*
1169 * This occurs when running in hypervisor mode on POWER6 or later
1170 * and an illegal instruction is encountered.
1171 */
1172void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
1173{
1174 regs->msr |= REASON_ILLEGAL;
1175 program_check_exception(regs);
1176}
1177
1142void alignment_exception(struct pt_regs *regs) 1178void alignment_exception(struct pt_regs *regs)
1143{ 1179{
1144 enum ctx_state prev_state = exception_enter(); 1180 enum ctx_state prev_state = exception_enter();
@@ -1148,6 +1184,9 @@ void alignment_exception(struct pt_regs *regs)
1148 if (!arch_irq_disabled_regs(regs)) 1184 if (!arch_irq_disabled_regs(regs))
1149 local_irq_enable(); 1185 local_irq_enable();
1150 1186
1187 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1188 goto bail;
1189
1151 /* we don't implement logging of alignment exceptions */ 1190 /* we don't implement logging of alignment exceptions */
1152 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1191 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1153 fixed = fix_alignment(regs); 1192 fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5dd3ab469976..ed0385448148 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
442 struct kvmppc_44x_tlbe *tlbe; 442 struct kvmppc_44x_tlbe *tlbe;
443 unsigned int gtlb_index; 443 unsigned int gtlb_index;
444 int idx;
444 445
445 gtlb_index = kvmppc_get_gpr(vcpu, ra); 446 gtlb_index = kvmppc_get_gpr(vcpu, ra);
446 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { 447 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
473 return EMULATE_FAIL; 474 return EMULATE_FAIL;
474 } 475 }
475 476
477 idx = srcu_read_lock(&vcpu->kvm->srcu);
478
476 if (tlbe_is_host_safe(vcpu, tlbe)) { 479 if (tlbe_is_host_safe(vcpu, tlbe)) {
477 gva_t eaddr; 480 gva_t eaddr;
478 gpa_t gpaddr; 481 gpa_t gpaddr;
@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
489 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); 492 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
490 } 493 }
491 494
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496
492 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, 497 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
493 tlbe->word2); 498 tlbe->word2);
494 499
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9de24f8e03c7..550f5928b394 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -562,6 +562,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
562 case H_CPPR: 562 case H_CPPR:
563 case H_EOI: 563 case H_EOI:
564 case H_IPI: 564 case H_IPI:
565 case H_IPOLL:
566 case H_XIRR_X:
565 if (kvmppc_xics_enabled(vcpu)) { 567 if (kvmppc_xics_enabled(vcpu)) {
566 ret = kvmppc_xics_hcall(vcpu, req); 568 ret = kvmppc_xics_hcall(vcpu, req);
567 break; 569 break;
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index b24309c6c2d5..da0e0bc268bd 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -257,6 +257,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
257 case H_CPPR: 257 case H_CPPR:
258 case H_EOI: 258 case H_EOI:
259 case H_IPI: 259 case H_IPI:
260 case H_IPOLL:
261 case H_XIRR_X:
260 if (kvmppc_xics_enabled(vcpu)) 262 if (kvmppc_xics_enabled(vcpu))
261 return kvmppc_h_pr_xics_hcall(vcpu, cmd); 263 return kvmppc_h_pr_xics_hcall(vcpu, cmd);
262 break; 264 break;
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index f7a103756618..94c1dd46b83d 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -650,6 +650,23 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
650 return H_SUCCESS; 650 return H_SUCCESS;
651} 651}
652 652
653static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
654{
655 union kvmppc_icp_state state;
656 struct kvmppc_icp *icp;
657
658 icp = vcpu->arch.icp;
659 if (icp->server_num != server) {
660 icp = kvmppc_xics_find_server(vcpu->kvm, server);
661 if (!icp)
662 return H_PARAMETER;
663 }
664 state = ACCESS_ONCE(icp->state);
665 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
666 kvmppc_set_gpr(vcpu, 5, state.mfrr);
667 return H_SUCCESS;
668}
669
653static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 670static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
654{ 671{
655 union kvmppc_icp_state old_state, new_state; 672 union kvmppc_icp_state old_state, new_state;
@@ -787,6 +804,18 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
787 if (!xics || !vcpu->arch.icp) 804 if (!xics || !vcpu->arch.icp)
788 return H_HARDWARE; 805 return H_HARDWARE;
789 806
807 /* These requests don't have real-mode implementations at present */
808 switch (req) {
809 case H_XIRR_X:
810 res = kvmppc_h_xirr(vcpu);
811 kvmppc_set_gpr(vcpu, 4, res);
812 kvmppc_set_gpr(vcpu, 5, get_tb());
813 return rc;
814 case H_IPOLL:
815 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
816 return rc;
817 }
818
790 /* Check for real mode returning too hard */ 819 /* Check for real mode returning too hard */
791 if (xics->real_mode) 820 if (xics->real_mode)
792 return kvmppc_xics_rm_complete(vcpu, req); 821 return kvmppc_xics_rm_complete(vcpu, req);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 1020119226db..5cd7ad0c1176 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -832,6 +832,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
832{ 832{
833 int r = RESUME_HOST; 833 int r = RESUME_HOST;
834 int s; 834 int s;
835 int idx;
836
837#ifdef CONFIG_PPC64
838 WARN_ON(local_paca->irq_happened != 0);
839#endif
840
841 /*
842 * We enter with interrupts disabled in hardware, but
843 * we need to call hard_irq_disable anyway to ensure that
844 * the software state is kept in sync.
845 */
846 hard_irq_disable();
835 847
836 /* update before a new last_exit_type is rewritten */ 848 /* update before a new last_exit_type is rewritten */
837 kvmppc_update_timing_stats(vcpu); 849 kvmppc_update_timing_stats(vcpu);
@@ -1053,6 +1065,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1053 break; 1065 break;
1054 } 1066 }
1055 1067
1068 idx = srcu_read_lock(&vcpu->kvm->srcu);
1069
1056 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1070 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1057 gfn = gpaddr >> PAGE_SHIFT; 1071 gfn = gpaddr >> PAGE_SHIFT;
1058 1072
@@ -1075,6 +1089,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1075 kvmppc_account_exit(vcpu, MMIO_EXITS); 1089 kvmppc_account_exit(vcpu, MMIO_EXITS);
1076 } 1090 }
1077 1091
1092 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1078 break; 1093 break;
1079 } 1094 }
1080 1095
@@ -1098,6 +1113,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1098 1113
1099 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); 1114 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1100 1115
1116 idx = srcu_read_lock(&vcpu->kvm->srcu);
1117
1101 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1118 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1102 gfn = gpaddr >> PAGE_SHIFT; 1119 gfn = gpaddr >> PAGE_SHIFT;
1103 1120
@@ -1114,6 +1131,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1114 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); 1131 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1115 } 1132 }
1116 1133
1134 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1117 break; 1135 break;
1118 } 1136 }
1119 1137
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index c41a5a96b558..6d6f153b6c1d 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
396 struct kvm_book3e_206_tlb_entry *gtlbe; 396 struct kvm_book3e_206_tlb_entry *gtlbe;
397 int tlbsel, esel; 397 int tlbsel, esel;
398 int recal = 0; 398 int recal = 0;
399 int idx;
399 400
400 tlbsel = get_tlb_tlbsel(vcpu); 401 tlbsel = get_tlb_tlbsel(vcpu);
401 esel = get_tlb_esel(vcpu, tlbsel); 402 esel = get_tlb_esel(vcpu, tlbsel);
@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
430 kvmppc_set_tlb1map_range(vcpu, gtlbe); 431 kvmppc_set_tlb1map_range(vcpu, gtlbe);
431 } 432 }
432 433
434 idx = srcu_read_lock(&vcpu->kvm->srcu);
435
433 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 436 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
434 if (tlbe_is_host_safe(vcpu, gtlbe)) { 437 if (tlbe_is_host_safe(vcpu, gtlbe)) {
435 u64 eaddr = get_tlb_eaddr(gtlbe); 438 u64 eaddr = get_tlb_eaddr(gtlbe);
@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
444 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); 447 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
445 } 448 }
446 449
450 srcu_read_unlock(&vcpu->kvm->srcu, idx);
451
447 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 452 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
448 return EMULATE_DONE; 453 return EMULATE_DONE;
449} 454}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 753cc99eff2b..19c8379575f7 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void)
177 r = 0; 177 r = 0;
178 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) 178 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
179 r = 0; 179 r = 0;
180 else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
181 r = 0;
182 else 180 else
183 r = -ENOTSUPP; 181 r = -ENOTSUPP;
184 182
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index 0ef75bf0695c..395c594722a2 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -28,13 +28,14 @@ _GLOBAL(copypage_power7)
28 * aligned we don't need to clear the bottom 7 bits of either 28 * aligned we don't need to clear the bottom 7 bits of either
29 * address. 29 * address.
30 */ 30 */
31 ori r9,r3,1 /* stream=1 */ 31 ori r9,r3,1 /* stream=1 => to */
32 32
33#ifdef CONFIG_PPC_64K_PAGES 33#ifdef CONFIG_PPC_64K_PAGES
34 lis r7,0x0E01 /* depth=7, units=512 */ 34 lis r7,0x0E01 /* depth=7
35 * units/cachelines=512 */
35#else 36#else
36 lis r7,0x0E00 /* depth=7 */ 37 lis r7,0x0E00 /* depth=7 */
37 ori r7,r7,0x1000 /* units=32 */ 38 ori r7,r7,0x1000 /* units/cachelines=32 */
38#endif 39#endif
39 ori r10,r7,1 /* stream=1 */ 40 ori r10,r7,1 /* stream=1 */
40 41
@@ -43,12 +44,14 @@ _GLOBAL(copypage_power7)
43 44
44.machine push 45.machine push
45.machine "power4" 46.machine "power4"
46 dcbt r0,r4,0b01000 47 /* setup read stream 0 */
47 dcbt r0,r7,0b01010 48 dcbt r0,r4,0b01000 /* addr from */
48 dcbtst r0,r9,0b01000 49 dcbt r0,r7,0b01010 /* length and depth from */
49 dcbtst r0,r10,0b01010 50 /* setup write stream 1 */
51 dcbtst r0,r9,0b01000 /* addr to */
52 dcbtst r0,r10,0b01010 /* length and depth to */
50 eieio 53 eieio
51 dcbt r0,r8,0b01010 /* GO */ 54 dcbt r0,r8,0b01010 /* all streams GO */
52.machine pop 55.machine pop
53 56
54#ifdef CONFIG_ALTIVEC 57#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 0d24ff15f5f6..d1f11795a7ad 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -318,12 +318,14 @@ err1; stb r0,0(r3)
318 318
319.machine push 319.machine push
320.machine "power4" 320.machine "power4"
321 dcbt r0,r6,0b01000 321 /* setup read stream 0 */
322 dcbt r0,r7,0b01010 322 dcbt r0,r6,0b01000 /* addr from */
323 dcbtst r0,r9,0b01000 323 dcbt r0,r7,0b01010 /* length and depth from */
324 dcbtst r0,r10,0b01010 324 /* setup write stream 1 */
325 dcbtst r0,r9,0b01000 /* addr to */
326 dcbtst r0,r10,0b01010 /* length and depth to */
325 eieio 327 eieio
326 dcbt r0,r8,0b01010 /* GO */ 328 dcbt r0,r8,0b01010 /* all streams GO */
327.machine pop 329.machine pop
328 330
329 beq cr1,.Lunwind_stack_nonvmx_copy 331 beq cr1,.Lunwind_stack_nonvmx_copy
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 6a2aead5b0e5..4c122c3f1623 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -336,11 +336,18 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
336 336
337 hpte_v = hptep->v; 337 hpte_v = hptep->v;
338 actual_psize = hpte_actual_psize(hptep, psize); 338 actual_psize = hpte_actual_psize(hptep, psize);
339 /*
340 * We need to invalidate the TLB always because hpte_remove doesn't do
341 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
342 * random entry from it. When we do that we don't invalidate the TLB
343 * (hpte_remove) because we assume the old translation is still
344 * technically "valid".
345 */
339 if (actual_psize < 0) { 346 if (actual_psize < 0) {
340 native_unlock_hpte(hptep); 347 actual_psize = psize;
341 return -1; 348 ret = -1;
349 goto err_out;
342 } 350 }
343 /* Even if we miss, we need to invalidate the TLB */
344 if (!HPTE_V_COMPARE(hpte_v, want_v)) { 351 if (!HPTE_V_COMPARE(hpte_v, want_v)) {
345 DBG_LOW(" -> miss\n"); 352 DBG_LOW(" -> miss\n");
346 ret = -1; 353 ret = -1;
@@ -350,6 +357,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
350 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 357 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
351 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); 358 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
352 } 359 }
360err_out:
353 native_unlock_hpte(hptep); 361 native_unlock_hpte(hptep);
354 362
355 /* Ensure it is out of the tlb too. */ 363 /* Ensure it is out of the tlb too. */
@@ -409,7 +417,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
409 hptep = htab_address + slot; 417 hptep = htab_address + slot;
410 actual_psize = hpte_actual_psize(hptep, psize); 418 actual_psize = hpte_actual_psize(hptep, psize);
411 if (actual_psize < 0) 419 if (actual_psize < 0)
412 return; 420 actual_psize = psize;
413 421
414 /* Update the HPTE */ 422 /* Update the HPTE */
415 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 423 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
@@ -437,21 +445,27 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
437 hpte_v = hptep->v; 445 hpte_v = hptep->v;
438 446
439 actual_psize = hpte_actual_psize(hptep, psize); 447 actual_psize = hpte_actual_psize(hptep, psize);
448 /*
449 * We need to invalidate the TLB always because hpte_remove doesn't do
450 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
451 * random entry from it. When we do that we don't invalidate the TLB
452 * (hpte_remove) because we assume the old translation is still
453 * technically "valid".
454 */
440 if (actual_psize < 0) { 455 if (actual_psize < 0) {
456 actual_psize = psize;
441 native_unlock_hpte(hptep); 457 native_unlock_hpte(hptep);
442 local_irq_restore(flags); 458 goto err_out;
443 return;
444 } 459 }
445 /* Even if we miss, we need to invalidate the TLB */
446 if (!HPTE_V_COMPARE(hpte_v, want_v)) 460 if (!HPTE_V_COMPARE(hpte_v, want_v))
447 native_unlock_hpte(hptep); 461 native_unlock_hpte(hptep);
448 else 462 else
449 /* Invalidate the hpte. NOTE: this also unlocks it */ 463 /* Invalidate the hpte. NOTE: this also unlocks it */
450 hptep->v = 0; 464 hptep->v = 0;
451 465
466err_out:
452 /* Invalidate the TLB */ 467 /* Invalidate the TLB */
453 tlbie(vpn, psize, actual_psize, ssize, local); 468 tlbie(vpn, psize, actual_psize, ssize, local);
454
455 local_irq_restore(flags); 469 local_irq_restore(flags);
456} 470}
457 471
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 426180b84978..29c6482890c8 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -110,7 +110,7 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
110 110
111static bool regs_use_siar(struct pt_regs *regs) 111static bool regs_use_siar(struct pt_regs *regs)
112{ 112{
113 return !!(regs->result & 1); 113 return !!regs->result;
114} 114}
115 115
116/* 116/*
@@ -136,22 +136,30 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
136 * If we're not doing instruction sampling, give them the SDAR 136 * If we're not doing instruction sampling, give them the SDAR
137 * (sampled data address). If we are doing instruction sampling, then 137 * (sampled data address). If we are doing instruction sampling, then
138 * only give them the SDAR if it corresponds to the instruction 138 * only give them the SDAR if it corresponds to the instruction
139 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or 139 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
140 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA. 140 * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
141 */ 141 */
142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) 142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
143{ 143{
144 unsigned long mmcra = regs->dsisr; 144 unsigned long mmcra = regs->dsisr;
145 unsigned long sdsync; 145 bool sdar_valid;
146 146
147 if (ppmu->flags & PPMU_SIAR_VALID) 147 if (ppmu->flags & PPMU_HAS_SIER)
148 sdsync = POWER7P_MMCRA_SDAR_VALID; 148 sdar_valid = regs->dar & SIER_SDAR_VALID;
149 else if (ppmu->flags & PPMU_ALT_SIPR) 149 else {
150 sdsync = POWER6_MMCRA_SDSYNC; 150 unsigned long sdsync;
151 else 151
152 sdsync = MMCRA_SDSYNC; 152 if (ppmu->flags & PPMU_SIAR_VALID)
153 sdsync = POWER7P_MMCRA_SDAR_VALID;
154 else if (ppmu->flags & PPMU_ALT_SIPR)
155 sdsync = POWER6_MMCRA_SDSYNC;
156 else
157 sdsync = MMCRA_SDSYNC;
158
159 sdar_valid = mmcra & sdsync;
160 }
153 161
154 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) 162 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
155 *addrp = mfspr(SPRN_SDAR); 163 *addrp = mfspr(SPRN_SDAR);
156} 164}
157 165
@@ -181,11 +189,6 @@ static bool regs_sipr(struct pt_regs *regs)
181 return !!(regs->dsisr & sipr); 189 return !!(regs->dsisr & sipr);
182} 190}
183 191
184static bool regs_no_sipr(struct pt_regs *regs)
185{
186 return !!(regs->result & 2);
187}
188
189static inline u32 perf_flags_from_msr(struct pt_regs *regs) 192static inline u32 perf_flags_from_msr(struct pt_regs *regs)
190{ 193{
191 if (regs->msr & MSR_PR) 194 if (regs->msr & MSR_PR)
@@ -208,7 +211,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
208 * SIAR which should give slightly more reliable 211 * SIAR which should give slightly more reliable
209 * results 212 * results
210 */ 213 */
211 if (regs_no_sipr(regs)) { 214 if (ppmu->flags & PPMU_NO_SIPR) {
212 unsigned long siar = mfspr(SPRN_SIAR); 215 unsigned long siar = mfspr(SPRN_SIAR);
213 if (siar >= PAGE_OFFSET) 216 if (siar >= PAGE_OFFSET)
214 return PERF_RECORD_MISC_KERNEL; 217 return PERF_RECORD_MISC_KERNEL;
@@ -239,22 +242,9 @@ static inline void perf_read_regs(struct pt_regs *regs)
239 int use_siar; 242 int use_siar;
240 243
241 regs->dsisr = mmcra; 244 regs->dsisr = mmcra;
242 regs->result = 0;
243
244 if (ppmu->flags & PPMU_NO_SIPR)
245 regs->result |= 2;
246
247 /*
248 * On power8 if we're in random sampling mode, the SIER is updated.
249 * If we're in continuous sampling mode, we don't have SIPR.
250 */
251 if (ppmu->flags & PPMU_HAS_SIER) {
252 if (marked)
253 regs->dar = mfspr(SPRN_SIER);
254 else
255 regs->result |= 2;
256 }
257 245
246 if (ppmu->flags & PPMU_HAS_SIER)
247 regs->dar = mfspr(SPRN_SIER);
258 248
259 /* 249 /*
260 * If this isn't a PMU exception (eg a software event) the SIAR is 250 * If this isn't a PMU exception (eg a software event) the SIAR is
@@ -279,12 +269,12 @@ static inline void perf_read_regs(struct pt_regs *regs)
279 use_siar = 1; 269 use_siar = 1;
280 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) 270 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
281 use_siar = 0; 271 use_siar = 0;
282 else if (!regs_no_sipr(regs) && regs_sipr(regs)) 272 else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
283 use_siar = 0; 273 use_siar = 0;
284 else 274 else
285 use_siar = 1; 275 use_siar = 1;
286 276
287 regs->result |= use_siar; 277 regs->result = use_siar;
288} 278}
289 279
290/* 280/*
@@ -308,8 +298,13 @@ static inline int siar_valid(struct pt_regs *regs)
308 unsigned long mmcra = regs->dsisr; 298 unsigned long mmcra = regs->dsisr;
309 int marked = mmcra & MMCRA_SAMPLE_ENABLE; 299 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
310 300
311 if ((ppmu->flags & PPMU_SIAR_VALID) && marked) 301 if (marked) {
312 return mmcra & POWER7P_MMCRA_SIAR_VALID; 302 if (ppmu->flags & PPMU_HAS_SIER)
303 return regs->dar & SIER_SIAR_VALID;
304
305 if (ppmu->flags & PPMU_SIAR_VALID)
306 return mmcra & POWER7P_MMCRA_SIAR_VALID;
307 }
313 308
314 return 1; 309 return 1;
315} 310}
@@ -1763,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1763 } 1758 }
1764 } 1759 }
1765 } 1760 }
1766 if ((!found) && printk_ratelimit()) 1761 if (!found && !nmi && printk_ratelimit())
1767 printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); 1762 printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
1768 1763
1769 /* 1764 /*
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 023b288f895b..4459eff7a75a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -19,6 +19,8 @@ config PPC_PSERIES
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG if SMP
23 select HOTPLUG_CPU if SMP
22 default y 24 default y
23 25
24config PPC_SPLPAR 26config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 19506f935737..b456b157d33d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 83 ibm_configure_pe = rtas_token("ibm,configure-pe");
84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 84 ibm_configure_bridge = rtas_token("ibm,configure-bridge");
85 85
86 /* necessary sanity check */ 86 /*
87 * Necessary sanity check. We needn't check "get-config-addr-info"
88 * and its variant since the old firmware probably support address
89 * of domain/bus/slot/function for EEH RTAS operations.
90 */
87 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { 91 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
88 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", 92 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
89 __func__); 93 __func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
102 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", 106 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
103 __func__); 107 __func__);
104 return -EINVAL; 108 return -EINVAL;
105 } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
106 ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
107 pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
108 "<ibm,get-config-addr-info> invalid\n",
109 __func__);
110 return -EINVAL;
111 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 109 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
112 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { 110 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
113 pr_warning("%s: RTAS service <ibm,configure-pe> and " 111 pr_warning("%s: RTAS service <ibm,configure-pe> and "
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0a13ecb270c7..3cc2f9159ab1 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock);
54 54
55#ifdef CONFIG_PPC32 /* XXX for now */ 55#ifdef CONFIG_PPC32 /* XXX for now */
56#ifdef CONFIG_IRQ_ALL_CPUS 56#ifdef CONFIG_IRQ_ALL_CPUS
57#define distribute_irqs (!(mpic->flags & MPIC_SINGLE_DEST_CPU)) 57#define distribute_irqs (1)
58#else 58#else
59#define distribute_irqs (0) 59#define distribute_irqs (0)
60#endif 60#endif
@@ -1703,7 +1703,7 @@ void mpic_setup_this_cpu(void)
1703 * it differently, then we should make sure we also change the default 1703 * it differently, then we should make sure we also change the default
1704 * values of irq_desc[].affinity in irq.c. 1704 * values of irq_desc[].affinity in irq.c.
1705 */ 1705 */
1706 if (distribute_irqs) { 1706 if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
1707 for (i = 0; i < mpic->num_sources ; i++) 1707 for (i = 0; i < mpic->num_sources ; i++)
1708 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1708 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1709 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); 1709 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);