aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h11
-rw-r--r--arch/powerpc/include/asm/processor.h13
-rw-r--r--arch/powerpc/include/asm/reg.h11
-rw-r--r--arch/powerpc/include/asm/signal.h3
-rw-r--r--arch/powerpc/include/asm/tm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h18
-rw-r--r--arch/powerpc/kernel/cputable.c6
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S7
-rw-r--r--arch/powerpc/kernel/pci-common.c14
-rw-r--r--arch/powerpc/kernel/signal.c40
-rw-r--r--arch/powerpc/kernel/signal.h2
-rw-r--r--arch/powerpc/kernel/signal_32.c10
-rw-r--r--arch/powerpc/kernel/signal_64.c23
-rw-r--r--arch/powerpc/kernel/traps.c29
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c29
-rw-r--r--arch/powerpc/lib/copypage_power7.S19
-rw-r--r--arch/powerpc/lib/copyuser_power7.S12
-rw-r--r--arch/powerpc/mm/hash_native_64.c30
-rw-r--r--arch/powerpc/perf/core-book3s.c67
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/sysdev/mpic.c4
26 files changed, 237 insertions, 123 deletions
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index cf4df8e2139a..0c7f2bfcf134 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -264,6 +264,7 @@
264#define H_GET_MPP 0x2D4 264#define H_GET_MPP 0x2D4
265#define H_HOME_NODE_ASSOCIATIVITY 0x2EC 265#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
266#define H_BEST_ENERGY 0x2F4 266#define H_BEST_ENERGY 0x2F4
267#define H_XIRR_X 0x2FC
267#define H_RANDOM 0x300 268#define H_RANDOM 0x300
268#define H_COP 0x304 269#define H_COP 0x304
269#define H_GET_MPP_X 0x314 270#define H_GET_MPP_X 0x314
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index cea8496091ff..2f1b6c5f8174 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
523#define PPC440EP_ERR42 523#define PPC440EP_ERR42
524#endif 524#endif
525 525
526/* The following stops all load and store data streams associated with stream
527 * ID (ie. streams created explicitly). The embedded and server mnemonics for
528 * dcbt are different so we use machine "power4" here explicitly.
529 */
530#define DCBT_STOP_ALL_STREAM_IDS(scratch) \
531.machine push ; \
532.machine "power4" ; \
533 lis scratch,0x60000000@h; \
534 dcbt r0,scratch,0b01010; \
535.machine pop
536
526/* 537/*
527 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them 538 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
528 * keep the address intact to be compatible with code shared with 539 * keep the address intact to be compatible with code shared with
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 594db6bc093c..14a658363698 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -409,21 +409,16 @@ static inline void prefetchw(const void *x)
409#endif 409#endif
410 410
411#ifdef CONFIG_PPC64 411#ifdef CONFIG_PPC64
412static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) 412static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
413{ 413{
414 unsigned long sp;
415
416 if (is_32) 414 if (is_32)
417 sp = regs->gpr[1] & 0x0ffffffffUL; 415 return sp & 0x0ffffffffUL;
418 else
419 sp = regs->gpr[1];
420
421 return sp; 416 return sp;
422} 417}
423#else 418#else
424static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) 419static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
425{ 420{
426 return regs->gpr[1]; 421 return sp;
427} 422}
428#endif 423#endif
429 424
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a6136515c7f2..4a9e408644fe 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -111,17 +111,6 @@
111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) 111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) 112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
113 113
114/* Reason codes describing kernel causes for transaction aborts. By
115 convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
116 the failure is persistent.
117*/
118#define TM_CAUSE_RESCHED 0xfe
119#define TM_CAUSE_TLBI 0xfc
120#define TM_CAUSE_FAC_UNAV 0xfa
121#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */
122#define TM_CAUSE_MISC 0xf6
123#define TM_CAUSE_SIGNAL 0xf4
124
125#if defined(CONFIG_PPC_BOOK3S_64) 114#if defined(CONFIG_PPC_BOOK3S_64)
126#define MSR_64BIT MSR_SF 115#define MSR_64BIT MSR_SF
127 116
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index fbe66c463891..9322c28aebd2 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -3,5 +3,8 @@
3 3
4#define __ARCH_HAS_SA_RESTORER 4#define __ARCH_HAS_SA_RESTORER
5#include <uapi/asm/signal.h> 5#include <uapi/asm/signal.h>
6#include <uapi/asm/ptrace.h>
7
8extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
6 9
7#endif /* _ASM_POWERPC_SIGNAL_H */ 10#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 4b4449abf3f8..9dfbc34bdbf5 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -5,6 +5,8 @@
5 * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. 5 * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
6 */ 6 */
7 7
8#include <uapi/asm/tm.h>
9
8#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 10#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
9extern void do_load_up_transact_fpu(struct thread_struct *thread); 11extern void do_load_up_transact_fpu(struct thread_struct *thread);
10extern void do_load_up_transact_altivec(struct thread_struct *thread); 12extern void do_load_up_transact_altivec(struct thread_struct *thread);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f7bca6370745..5182c8622b54 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -40,6 +40,7 @@ header-y += statfs.h
40header-y += swab.h 40header-y += swab.h
41header-y += termbits.h 41header-y += termbits.h
42header-y += termios.h 42header-y += termios.h
43header-y += tm.h
43header-y += types.h 44header-y += types.h
44header-y += ucontext.h 45header-y += ucontext.h
45header-y += unistd.h 46header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644
index 000000000000..85059a00f560
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_POWERPC_TM_H
2#define _ASM_POWERPC_TM_H
3
4/* Reason codes describing kernel causes for transaction aborts. By
5 * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
6 * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
7 */
8#define TM_CAUSE_PERSISTENT 0x01
9#define TM_CAUSE_RESCHED 0xde
10#define TM_CAUSE_TLBI 0xdc
11#define TM_CAUSE_FAC_UNAV 0xda
12#define TM_CAUSE_SYSCALL 0xd8 /* future use */
13#define TM_CAUSE_MISC 0xd6 /* future use */
14#define TM_CAUSE_SIGNAL 0xd4
15#define TM_CAUSE_ALIGNMENT 0xd2
16#define TM_CAUSE_EMULATE 0xd0
17
18#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c60bbec25c1f..1f0937d7d4b5 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -453,7 +453,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
453 .icache_bsize = 128, 453 .icache_bsize = 128,
454 .dcache_bsize = 128, 454 .dcache_bsize = 128,
455 .oprofile_type = PPC_OPROFILE_POWER4, 455 .oprofile_type = PPC_OPROFILE_POWER4,
456 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 456 .oprofile_cpu_type = 0,
457 .cpu_setup = __setup_cpu_power8, 457 .cpu_setup = __setup_cpu_power8,
458 .cpu_restore = __restore_cpu_power8, 458 .cpu_restore = __restore_cpu_power8,
459 .platform = "power8", 459 .platform = "power8",
@@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
482 .cpu_name = "POWER7+ (raw)", 482 .cpu_name = "POWER7+ (raw)",
483 .cpu_features = CPU_FTRS_POWER7, 483 .cpu_features = CPU_FTRS_POWER7,
484 .cpu_user_features = COMMON_USER_POWER7, 484 .cpu_user_features = COMMON_USER_POWER7,
485 .cpu_user_features = COMMON_USER2_POWER7, 485 .cpu_user_features2 = COMMON_USER2_POWER7,
486 .mmu_features = MMU_FTRS_POWER7, 486 .mmu_features = MMU_FTRS_POWER7,
487 .icache_bsize = 128, 487 .icache_bsize = 128,
488 .dcache_bsize = 128, 488 .dcache_bsize = 128,
@@ -506,7 +506,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
506 .dcache_bsize = 128, 506 .dcache_bsize = 128,
507 .num_pmcs = 6, 507 .num_pmcs = 6,
508 .pmc_type = PPC_PMC_IBM, 508 .pmc_type = PPC_PMC_IBM,
509 .oprofile_cpu_type = "ppc64/power8", 509 .oprofile_cpu_type = 0,
510 .oprofile_type = PPC_OPROFILE_POWER4, 510 .oprofile_type = PPC_OPROFILE_POWER4,
511 .cpu_setup = __setup_cpu_power8, 511 .cpu_setup = __setup_cpu_power8,
512 .cpu_restore = __restore_cpu_power8, 512 .cpu_restore = __restore_cpu_power8,
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index d22e73e4618b..22b45a4955cd 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -849,7 +849,7 @@ resume_kernel:
849 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ 849 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
850 CURRENT_THREAD_INFO(r9, r1) 850 CURRENT_THREAD_INFO(r9, r1)
851 lwz r8,TI_FLAGS(r9) 851 lwz r8,TI_FLAGS(r9)
852 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h 852 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
853 beq+ 1f 853 beq+ 1f
854 854
855 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 855 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0e9095e47b5b..246b11c4fe7e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -501,6 +501,13 @@ BEGIN_FTR_SECTION
501 ldarx r6,0,r1 501 ldarx r6,0,r1
502END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) 502END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
503 503
504#ifdef CONFIG_PPC_BOOK3S
505/* Cancel all explict user streams as they will have no use after context
506 * switch and will stop the HW from creating streams itself
507 */
508 DCBT_STOP_ALL_STREAM_IDS(r6)
509#endif
510
504 addi r6,r4,-THREAD /* Convert THREAD to 'current' */ 511 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
505 std r6,PACACURRENT(r13) /* Set new 'current' */ 512 std r6,PACACURRENT(r13) /* Set new 'current' */
506 513
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e9acf50dd5b2..7f2273cc3c7d 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -657,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
657 * ranges. However, some machines (thanks Apple !) tend to split their 657 * ranges. However, some machines (thanks Apple !) tend to split their
658 * space into lots of small contiguous ranges. So we have to coalesce. 658 * space into lots of small contiguous ranges. So we have to coalesce.
659 * 659 *
660 * - We can only cope with all memory ranges having the same offset
661 * between CPU addresses and PCI addresses. Unfortunately, some bridges
662 * are setup for a large 1:1 mapping along with a small "window" which
663 * maps PCI address 0 to some arbitrary high address of the CPU space in
664 * order to give access to the ISA memory hole.
665 * The way out of here that I've chosen for now is to always set the
666 * offset based on the first resource found, then override it if we
667 * have a different offset and the previous was set by an ISA hole.
668 *
669 * - Some busses have IO space not starting at 0, which causes trouble with 660 * - Some busses have IO space not starting at 0, which causes trouble with
670 * the way we do our IO resource renumbering. The code somewhat deals with 661 * the way we do our IO resource renumbering. The code somewhat deals with
671 * it for 64 bits but I would expect problems on 32 bits. 662 * it for 64 bits but I would expect problems on 32 bits.
@@ -680,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
680 int rlen; 671 int rlen;
681 int pna = of_n_addr_cells(dev); 672 int pna = of_n_addr_cells(dev);
682 int np = pna + 5; 673 int np = pna + 5;
683 int memno = 0, isa_hole = -1; 674 int memno = 0;
684 u32 pci_space; 675 u32 pci_space;
685 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; 676 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
686 unsigned long long isa_mb = 0;
687 struct resource *res; 677 struct resource *res;
688 678
689 printk(KERN_INFO "PCI host bridge %s %s ranges:\n", 679 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
@@ -777,8 +767,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
777 } 767 }
778 /* Handles ISA memory hole space here */ 768 /* Handles ISA memory hole space here */
779 if (pci_addr == 0) { 769 if (pci_addr == 0) {
780 isa_mb = cpu_addr;
781 isa_hole = memno;
782 if (primary || isa_mem_base == 0) 770 if (primary || isa_mem_base == 0)
783 isa_mem_base = cpu_addr; 771 isa_mem_base = cpu_addr;
784 hose->isa_mem_phys = cpu_addr; 772 hose->isa_mem_phys = cpu_addr;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 577a8aa69c6e..457e97aa2945 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -18,6 +18,7 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/unistd.h> 19#include <asm/unistd.h>
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/tm.h>
21 22
22#include "signal.h" 23#include "signal.h"
23 24
@@ -30,13 +31,13 @@ int show_unhandled_signals = 1;
30/* 31/*
31 * Allocate space for the signal frame 32 * Allocate space for the signal frame
32 */ 33 */
33void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 34void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
34 size_t frame_size, int is_32) 35 size_t frame_size, int is_32)
35{ 36{
36 unsigned long oldsp, newsp; 37 unsigned long oldsp, newsp;
37 38
38 /* Default to using normal stack */ 39 /* Default to using normal stack */
39 oldsp = get_clean_sp(regs, is_32); 40 oldsp = get_clean_sp(sp, is_32);
40 41
41 /* Check for alt stack */ 42 /* Check for alt stack */
42 if ((ka->sa.sa_flags & SA_ONSTACK) && 43 if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -175,3 +176,38 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
175 176
176 user_enter(); 177 user_enter();
177} 178}
179
180unsigned long get_tm_stackpointer(struct pt_regs *regs)
181{
182 /* When in an active transaction that takes a signal, we need to be
183 * careful with the stack. It's possible that the stack has moved back
184 * up after the tbegin. The obvious case here is when the tbegin is
185 * called inside a function that returns before a tend. In this case,
186 * the stack is part of the checkpointed transactional memory state.
187 * If we write over this non transactionally or in suspend, we are in
188 * trouble because if we get a tm abort, the program counter and stack
189 * pointer will be back at the tbegin but our in memory stack won't be
190 * valid anymore.
191 *
192 * To avoid this, when taking a signal in an active transaction, we
193 * need to use the stack pointer from the checkpointed state, rather
194 * than the speculated state. This ensures that the signal context
195 * (written tm suspended) will be written below the stack required for
196 * the rollback. The transaction is aborted becuase of the treclaim,
197 * so any memory written between the tbegin and the signal will be
198 * rolled back anyway.
199 *
200 * For signals taken in non-TM or suspended mode, we use the
201 * normal/non-checkpointed stack pointer.
202 */
203
204#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
205 if (MSR_TM_ACTIVE(regs->msr)) {
206 tm_enable();
207 tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
208 if (MSR_TM_TRANSACTIONAL(regs->msr))
209 return current->thread.ckpt_regs.gpr[1];
210 }
211#endif
212 return regs->gpr[1];
213}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index ec84c901ceab..c69b9aeb9f23 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -12,7 +12,7 @@
12 12
13extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); 13extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
14 14
15extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 15extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
16 size_t frame_size, int is_32); 16 size_t frame_size, int is_32);
17 17
18extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, 18extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 95068bf569ad..201385c3a1ae 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
503{ 503{
504 unsigned long msr = regs->msr; 504 unsigned long msr = regs->msr;
505 505
506 /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
507 * thread.transact_fpr[], thread.transact_vr[], etc.
508 */
509 tm_enable();
510 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
511
512 /* Make sure floating point registers are stored in regs */ 506 /* Make sure floating point registers are stored in regs */
513 flush_fp_to_thread(current); 507 flush_fp_to_thread(current);
514 508
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
965 959
966 /* Set up Signal Frame */ 960 /* Set up Signal Frame */
967 /* Put a Real Time Context onto stack */ 961 /* Put a Real Time Context onto stack */
968 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1); 962 rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
969 addr = rt_sf; 963 addr = rt_sf;
970 if (unlikely(rt_sf == NULL)) 964 if (unlikely(rt_sf == NULL))
971 goto badframe; 965 goto badframe;
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1403 unsigned long tramp; 1397 unsigned long tramp;
1404 1398
1405 /* Set up Signal Frame */ 1399 /* Set up Signal Frame */
1406 frame = get_sigframe(ka, regs, sizeof(*frame), 1); 1400 frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
1407 if (unlikely(frame == NULL)) 1401 if (unlikely(frame == NULL))
1408 goto badframe; 1402 goto badframe;
1409 sc = (struct sigcontext __user *) &frame->sctx; 1403 sc = (struct sigcontext __user *) &frame->sctx;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c1794286098c..345947367ec0 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
154 * As above, but Transactional Memory is in use, so deliver sigcontexts 154 * As above, but Transactional Memory is in use, so deliver sigcontexts
155 * containing checkpointed and transactional register states. 155 * containing checkpointed and transactional register states.
156 * 156 *
157 * To do this, we treclaim to gather both sets of registers and set up the 157 * To do this, we treclaim (done before entering here) to gather both sets of
158 * 'normal' sigcontext registers with rolled-back register values such that a 158 * registers and set up the 'normal' sigcontext registers with rolled-back
159 * simple signal handler sees a correct checkpointed register state. 159 * register values such that a simple signal handler sees a correct
160 * If interested, a TM-aware sighandler can examine the transactional registers 160 * checkpointed register state. If interested, a TM-aware sighandler can
161 * in the 2nd sigcontext to determine the real origin of the signal. 161 * examine the transactional registers in the 2nd sigcontext to determine the
162 * real origin of the signal.
162 */ 163 */
163static long setup_tm_sigcontexts(struct sigcontext __user *sc, 164static long setup_tm_sigcontexts(struct sigcontext __user *sc,
164 struct sigcontext __user *tm_sc, 165 struct sigcontext __user *tm_sc,
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
184 185
185 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); 186 BUG_ON(!MSR_TM_ACTIVE(regs->msr));
186 187
187 /* tm_reclaim rolls back all reg states, saving checkpointed (older)
188 * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
189 * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
190 * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
191 * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the
192 * stack, in *regs.
193 */
194 tm_enable();
195 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
196
197 flush_fp_to_thread(current); 188 flush_fp_to_thread(current);
198 189
199#ifdef CONFIG_ALTIVEC 190#ifdef CONFIG_ALTIVEC
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
711 unsigned long newsp = 0; 702 unsigned long newsp = 0;
712 long err = 0; 703 long err = 0;
713 704
714 frame = get_sigframe(ka, regs, sizeof(*frame), 0); 705 frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
715 if (unlikely(frame == NULL)) 706 if (unlikely(frame == NULL))
716 goto badframe; 707 goto badframe;
717 708
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a7a648f6b750..f18c79c324ef 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -53,6 +53,7 @@
53#ifdef CONFIG_PPC64 53#ifdef CONFIG_PPC64
54#include <asm/firmware.h> 54#include <asm/firmware.h>
55#include <asm/processor.h> 55#include <asm/processor.h>
56#include <asm/tm.h>
56#endif 57#endif
57#include <asm/kexec.h> 58#include <asm/kexec.h>
58#include <asm/ppc-opcode.h> 59#include <asm/ppc-opcode.h>
@@ -932,6 +933,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
932 return 0; 933 return 0;
933} 934}
934 935
936#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
937static inline bool tm_abort_check(struct pt_regs *regs, int cause)
938{
939 /* If we're emulating a load/store in an active transaction, we cannot
940 * emulate it as the kernel operates in transaction suspended context.
941 * We need to abort the transaction. This creates a persistent TM
942 * abort so tell the user what caused it with a new code.
943 */
944 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
945 tm_enable();
946 tm_abort(cause);
947 return true;
948 }
949 return false;
950}
951#else
952static inline bool tm_abort_check(struct pt_regs *regs, int reason)
953{
954 return false;
955}
956#endif
957
935static int emulate_instruction(struct pt_regs *regs) 958static int emulate_instruction(struct pt_regs *regs)
936{ 959{
937 u32 instword; 960 u32 instword;
@@ -971,6 +994,9 @@ static int emulate_instruction(struct pt_regs *regs)
971 994
972 /* Emulate load/store string insn. */ 995 /* Emulate load/store string insn. */
973 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 996 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
997 if (tm_abort_check(regs,
998 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
999 return -EINVAL;
974 PPC_WARN_EMULATED(string, regs); 1000 PPC_WARN_EMULATED(string, regs);
975 return emulate_string_inst(regs, instword); 1001 return emulate_string_inst(regs, instword);
976 } 1002 }
@@ -1148,6 +1174,9 @@ void alignment_exception(struct pt_regs *regs)
1148 if (!arch_irq_disabled_regs(regs)) 1174 if (!arch_irq_disabled_regs(regs))
1149 local_irq_enable(); 1175 local_irq_enable();
1150 1176
1177 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1178 goto bail;
1179
1151 /* we don't implement logging of alignment exceptions */ 1180 /* we don't implement logging of alignment exceptions */
1152 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1181 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1153 fixed = fix_alignment(regs); 1182 fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9de24f8e03c7..550f5928b394 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -562,6 +562,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
562 case H_CPPR: 562 case H_CPPR:
563 case H_EOI: 563 case H_EOI:
564 case H_IPI: 564 case H_IPI:
565 case H_IPOLL:
566 case H_XIRR_X:
565 if (kvmppc_xics_enabled(vcpu)) { 567 if (kvmppc_xics_enabled(vcpu)) {
566 ret = kvmppc_xics_hcall(vcpu, req); 568 ret = kvmppc_xics_hcall(vcpu, req);
567 break; 569 break;
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index b24309c6c2d5..da0e0bc268bd 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -257,6 +257,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
257 case H_CPPR: 257 case H_CPPR:
258 case H_EOI: 258 case H_EOI:
259 case H_IPI: 259 case H_IPI:
260 case H_IPOLL:
261 case H_XIRR_X:
260 if (kvmppc_xics_enabled(vcpu)) 262 if (kvmppc_xics_enabled(vcpu))
261 return kvmppc_h_pr_xics_hcall(vcpu, cmd); 263 return kvmppc_h_pr_xics_hcall(vcpu, cmd);
262 break; 264 break;
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index f7a103756618..94c1dd46b83d 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -650,6 +650,23 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
650 return H_SUCCESS; 650 return H_SUCCESS;
651} 651}
652 652
653static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
654{
655 union kvmppc_icp_state state;
656 struct kvmppc_icp *icp;
657
658 icp = vcpu->arch.icp;
659 if (icp->server_num != server) {
660 icp = kvmppc_xics_find_server(vcpu->kvm, server);
661 if (!icp)
662 return H_PARAMETER;
663 }
664 state = ACCESS_ONCE(icp->state);
665 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
666 kvmppc_set_gpr(vcpu, 5, state.mfrr);
667 return H_SUCCESS;
668}
669
653static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 670static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
654{ 671{
655 union kvmppc_icp_state old_state, new_state; 672 union kvmppc_icp_state old_state, new_state;
@@ -787,6 +804,18 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
787 if (!xics || !vcpu->arch.icp) 804 if (!xics || !vcpu->arch.icp)
788 return H_HARDWARE; 805 return H_HARDWARE;
789 806
807 /* These requests don't have real-mode implementations at present */
808 switch (req) {
809 case H_XIRR_X:
810 res = kvmppc_h_xirr(vcpu);
811 kvmppc_set_gpr(vcpu, 4, res);
812 kvmppc_set_gpr(vcpu, 5, get_tb());
813 return rc;
814 case H_IPOLL:
815 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
816 return rc;
817 }
818
790 /* Check for real mode returning too hard */ 819 /* Check for real mode returning too hard */
791 if (xics->real_mode) 820 if (xics->real_mode)
792 return kvmppc_xics_rm_complete(vcpu, req); 821 return kvmppc_xics_rm_complete(vcpu, req);
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index 0ef75bf0695c..395c594722a2 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -28,13 +28,14 @@ _GLOBAL(copypage_power7)
28 * aligned we don't need to clear the bottom 7 bits of either 28 * aligned we don't need to clear the bottom 7 bits of either
29 * address. 29 * address.
30 */ 30 */
31 ori r9,r3,1 /* stream=1 */ 31 ori r9,r3,1 /* stream=1 => to */
32 32
33#ifdef CONFIG_PPC_64K_PAGES 33#ifdef CONFIG_PPC_64K_PAGES
34 lis r7,0x0E01 /* depth=7, units=512 */ 34 lis r7,0x0E01 /* depth=7
35 * units/cachelines=512 */
35#else 36#else
36 lis r7,0x0E00 /* depth=7 */ 37 lis r7,0x0E00 /* depth=7 */
37 ori r7,r7,0x1000 /* units=32 */ 38 ori r7,r7,0x1000 /* units/cachelines=32 */
38#endif 39#endif
39 ori r10,r7,1 /* stream=1 */ 40 ori r10,r7,1 /* stream=1 */
40 41
@@ -43,12 +44,14 @@ _GLOBAL(copypage_power7)
43 44
44.machine push 45.machine push
45.machine "power4" 46.machine "power4"
46 dcbt r0,r4,0b01000 47 /* setup read stream 0 */
47 dcbt r0,r7,0b01010 48 dcbt r0,r4,0b01000 /* addr from */
48 dcbtst r0,r9,0b01000 49 dcbt r0,r7,0b01010 /* length and depth from */
49 dcbtst r0,r10,0b01010 50 /* setup write stream 1 */
51 dcbtst r0,r9,0b01000 /* addr to */
52 dcbtst r0,r10,0b01010 /* length and depth to */
50 eieio 53 eieio
51 dcbt r0,r8,0b01010 /* GO */ 54 dcbt r0,r8,0b01010 /* all streams GO */
52.machine pop 55.machine pop
53 56
54#ifdef CONFIG_ALTIVEC 57#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 0d24ff15f5f6..d1f11795a7ad 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -318,12 +318,14 @@ err1; stb r0,0(r3)
318 318
319.machine push 319.machine push
320.machine "power4" 320.machine "power4"
321 dcbt r0,r6,0b01000 321 /* setup read stream 0 */
322 dcbt r0,r7,0b01010 322 dcbt r0,r6,0b01000 /* addr from */
323 dcbtst r0,r9,0b01000 323 dcbt r0,r7,0b01010 /* length and depth from */
324 dcbtst r0,r10,0b01010 324 /* setup write stream 1 */
325 dcbtst r0,r9,0b01000 /* addr to */
326 dcbtst r0,r10,0b01010 /* length and depth to */
325 eieio 327 eieio
326 dcbt r0,r8,0b01010 /* GO */ 328 dcbt r0,r8,0b01010 /* all streams GO */
327.machine pop 329.machine pop
328 330
329 beq cr1,.Lunwind_stack_nonvmx_copy 331 beq cr1,.Lunwind_stack_nonvmx_copy
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 6a2aead5b0e5..4c122c3f1623 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -336,11 +336,18 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
336 336
337 hpte_v = hptep->v; 337 hpte_v = hptep->v;
338 actual_psize = hpte_actual_psize(hptep, psize); 338 actual_psize = hpte_actual_psize(hptep, psize);
339 /*
340 * We need to invalidate the TLB always because hpte_remove doesn't do
341 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
342 * random entry from it. When we do that we don't invalidate the TLB
343 * (hpte_remove) because we assume the old translation is still
344 * technically "valid".
345 */
339 if (actual_psize < 0) { 346 if (actual_psize < 0) {
340 native_unlock_hpte(hptep); 347 actual_psize = psize;
341 return -1; 348 ret = -1;
349 goto err_out;
342 } 350 }
343 /* Even if we miss, we need to invalidate the TLB */
344 if (!HPTE_V_COMPARE(hpte_v, want_v)) { 351 if (!HPTE_V_COMPARE(hpte_v, want_v)) {
345 DBG_LOW(" -> miss\n"); 352 DBG_LOW(" -> miss\n");
346 ret = -1; 353 ret = -1;
@@ -350,6 +357,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
350 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 357 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
351 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); 358 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
352 } 359 }
360err_out:
353 native_unlock_hpte(hptep); 361 native_unlock_hpte(hptep);
354 362
355 /* Ensure it is out of the tlb too. */ 363 /* Ensure it is out of the tlb too. */
@@ -409,7 +417,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
409 hptep = htab_address + slot; 417 hptep = htab_address + slot;
410 actual_psize = hpte_actual_psize(hptep, psize); 418 actual_psize = hpte_actual_psize(hptep, psize);
411 if (actual_psize < 0) 419 if (actual_psize < 0)
412 return; 420 actual_psize = psize;
413 421
414 /* Update the HPTE */ 422 /* Update the HPTE */
415 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 423 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
@@ -437,21 +445,27 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
437 hpte_v = hptep->v; 445 hpte_v = hptep->v;
438 446
439 actual_psize = hpte_actual_psize(hptep, psize); 447 actual_psize = hpte_actual_psize(hptep, psize);
448 /*
449 * We need to invalidate the TLB always because hpte_remove doesn't do
450 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
451 * random entry from it. When we do that we don't invalidate the TLB
452 * (hpte_remove) because we assume the old translation is still
453 * technically "valid".
454 */
440 if (actual_psize < 0) { 455 if (actual_psize < 0) {
456 actual_psize = psize;
441 native_unlock_hpte(hptep); 457 native_unlock_hpte(hptep);
442 local_irq_restore(flags); 458 goto err_out;
443 return;
444 } 459 }
445 /* Even if we miss, we need to invalidate the TLB */
446 if (!HPTE_V_COMPARE(hpte_v, want_v)) 460 if (!HPTE_V_COMPARE(hpte_v, want_v))
447 native_unlock_hpte(hptep); 461 native_unlock_hpte(hptep);
448 else 462 else
449 /* Invalidate the hpte. NOTE: this also unlocks it */ 463 /* Invalidate the hpte. NOTE: this also unlocks it */
450 hptep->v = 0; 464 hptep->v = 0;
451 465
466err_out:
452 /* Invalidate the TLB */ 467 /* Invalidate the TLB */
453 tlbie(vpn, psize, actual_psize, ssize, local); 468 tlbie(vpn, psize, actual_psize, ssize, local);
454
455 local_irq_restore(flags); 469 local_irq_restore(flags);
456} 470}
457 471
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 426180b84978..845c867444e6 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -110,7 +110,7 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
110 110
111static bool regs_use_siar(struct pt_regs *regs) 111static bool regs_use_siar(struct pt_regs *regs)
112{ 112{
113 return !!(regs->result & 1); 113 return !!regs->result;
114} 114}
115 115
116/* 116/*
@@ -136,22 +136,30 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
136 * If we're not doing instruction sampling, give them the SDAR 136 * If we're not doing instruction sampling, give them the SDAR
137 * (sampled data address). If we are doing instruction sampling, then 137 * (sampled data address). If we are doing instruction sampling, then
138 * only give them the SDAR if it corresponds to the instruction 138 * only give them the SDAR if it corresponds to the instruction
139 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or 139 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
140 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA. 140 * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
141 */ 141 */
142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) 142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
143{ 143{
144 unsigned long mmcra = regs->dsisr; 144 unsigned long mmcra = regs->dsisr;
145 unsigned long sdsync; 145 bool sdar_valid;
146 146
147 if (ppmu->flags & PPMU_SIAR_VALID) 147 if (ppmu->flags & PPMU_HAS_SIER)
148 sdsync = POWER7P_MMCRA_SDAR_VALID; 148 sdar_valid = regs->dar & SIER_SDAR_VALID;
149 else if (ppmu->flags & PPMU_ALT_SIPR) 149 else {
150 sdsync = POWER6_MMCRA_SDSYNC; 150 unsigned long sdsync;
151 else 151
152 sdsync = MMCRA_SDSYNC; 152 if (ppmu->flags & PPMU_SIAR_VALID)
153 sdsync = POWER7P_MMCRA_SDAR_VALID;
154 else if (ppmu->flags & PPMU_ALT_SIPR)
155 sdsync = POWER6_MMCRA_SDSYNC;
156 else
157 sdsync = MMCRA_SDSYNC;
158
159 sdar_valid = mmcra & sdsync;
160 }
153 161
154 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) 162 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
155 *addrp = mfspr(SPRN_SDAR); 163 *addrp = mfspr(SPRN_SDAR);
156} 164}
157 165
@@ -181,11 +189,6 @@ static bool regs_sipr(struct pt_regs *regs)
181 return !!(regs->dsisr & sipr); 189 return !!(regs->dsisr & sipr);
182} 190}
183 191
184static bool regs_no_sipr(struct pt_regs *regs)
185{
186 return !!(regs->result & 2);
187}
188
189static inline u32 perf_flags_from_msr(struct pt_regs *regs) 192static inline u32 perf_flags_from_msr(struct pt_regs *regs)
190{ 193{
191 if (regs->msr & MSR_PR) 194 if (regs->msr & MSR_PR)
@@ -208,7 +211,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
208 * SIAR which should give slightly more reliable 211 * SIAR which should give slightly more reliable
209 * results 212 * results
210 */ 213 */
211 if (regs_no_sipr(regs)) { 214 if (ppmu->flags & PPMU_NO_SIPR) {
212 unsigned long siar = mfspr(SPRN_SIAR); 215 unsigned long siar = mfspr(SPRN_SIAR);
213 if (siar >= PAGE_OFFSET) 216 if (siar >= PAGE_OFFSET)
214 return PERF_RECORD_MISC_KERNEL; 217 return PERF_RECORD_MISC_KERNEL;
@@ -239,22 +242,9 @@ static inline void perf_read_regs(struct pt_regs *regs)
239 int use_siar; 242 int use_siar;
240 243
241 regs->dsisr = mmcra; 244 regs->dsisr = mmcra;
242 regs->result = 0;
243
244 if (ppmu->flags & PPMU_NO_SIPR)
245 regs->result |= 2;
246
247 /*
248 * On power8 if we're in random sampling mode, the SIER is updated.
249 * If we're in continuous sampling mode, we don't have SIPR.
250 */
251 if (ppmu->flags & PPMU_HAS_SIER) {
252 if (marked)
253 regs->dar = mfspr(SPRN_SIER);
254 else
255 regs->result |= 2;
256 }
257 245
246 if (ppmu->flags & PPMU_HAS_SIER)
247 regs->dar = mfspr(SPRN_SIER);
258 248
259 /* 249 /*
260 * If this isn't a PMU exception (eg a software event) the SIAR is 250 * If this isn't a PMU exception (eg a software event) the SIAR is
@@ -279,12 +269,12 @@ static inline void perf_read_regs(struct pt_regs *regs)
279 use_siar = 1; 269 use_siar = 1;
280 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) 270 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
281 use_siar = 0; 271 use_siar = 0;
282 else if (!regs_no_sipr(regs) && regs_sipr(regs)) 272 else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
283 use_siar = 0; 273 use_siar = 0;
284 else 274 else
285 use_siar = 1; 275 use_siar = 1;
286 276
287 regs->result |= use_siar; 277 regs->result = use_siar;
288} 278}
289 279
290/* 280/*
@@ -308,8 +298,13 @@ static inline int siar_valid(struct pt_regs *regs)
308 unsigned long mmcra = regs->dsisr; 298 unsigned long mmcra = regs->dsisr;
309 int marked = mmcra & MMCRA_SAMPLE_ENABLE; 299 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
310 300
311 if ((ppmu->flags & PPMU_SIAR_VALID) && marked) 301 if (marked) {
312 return mmcra & POWER7P_MMCRA_SIAR_VALID; 302 if (ppmu->flags & PPMU_HAS_SIER)
303 return regs->dar & SIER_SIAR_VALID;
304
305 if (ppmu->flags & PPMU_SIAR_VALID)
306 return mmcra & POWER7P_MMCRA_SIAR_VALID;
307 }
313 308
314 return 1; 309 return 1;
315} 310}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 023b288f895b..4459eff7a75a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -19,6 +19,8 @@ config PPC_PSERIES
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG if SMP
23 select HOTPLUG_CPU if SMP
22 default y 24 default y
23 25
24config PPC_SPLPAR 26config PPC_SPLPAR
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0a13ecb270c7..3cc2f9159ab1 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock);
54 54
55#ifdef CONFIG_PPC32 /* XXX for now */ 55#ifdef CONFIG_PPC32 /* XXX for now */
56#ifdef CONFIG_IRQ_ALL_CPUS 56#ifdef CONFIG_IRQ_ALL_CPUS
57#define distribute_irqs (!(mpic->flags & MPIC_SINGLE_DEST_CPU)) 57#define distribute_irqs (1)
58#else 58#else
59#define distribute_irqs (0) 59#define distribute_irqs (0)
60#endif 60#endif
@@ -1703,7 +1703,7 @@ void mpic_setup_this_cpu(void)
1703 * it differently, then we should make sure we also change the default 1703 * it differently, then we should make sure we also change the default
1704 * values of irq_desc[].affinity in irq.c. 1704 * values of irq_desc[].affinity in irq.c.
1705 */ 1705 */
1706 if (distribute_irqs) { 1706 if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
1707 for (i = 0; i < mpic->num_sources ; i++) 1707 for (i = 0; i < mpic->num_sources ; i++)
1708 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1708 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1709 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); 1709 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);