aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/archrandom.h18
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h5
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h12
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/tm.h4
-rw-r--r--arch/powerpc/kernel/ftrace.c7
-rw-r--r--arch/powerpc/kernel/prom.c3
-rw-r--r--arch/powerpc/kernel/sysfs.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c28
-rw-r--r--arch/powerpc/kvm/book3s_hv.c159
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S22
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S187
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c7
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c7
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c2
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c2
22 files changed, 412 insertions, 82 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f3d7846bc9b2..6c03a94991ad 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -90,6 +90,7 @@ config PPC
90 select BINFMT_ELF 90 select BINFMT_ELF
91 select OF 91 select OF
92 select OF_EARLY_FLATTREE 92 select OF_EARLY_FLATTREE
93 select OF_RESERVED_MEM
93 select HAVE_FTRACE_MCOUNT_RECORD 94 select HAVE_FTRACE_MCOUNT_RECORD
94 select HAVE_DYNAMIC_FTRACE 95 select HAVE_DYNAMIC_FTRACE
95 select HAVE_FUNCTION_TRACER 96 select HAVE_FUNCTION_TRACER
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index d853d163ba47..bde531103638 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -25,8 +25,26 @@ static inline int arch_get_random_int(unsigned int *v)
25 return rc; 25 return rc;
26} 26}
27 27
28static inline int arch_has_random(void)
29{
30 return !!ppc_md.get_random_long;
31}
32
28int powernv_get_random_long(unsigned long *v); 33int powernv_get_random_long(unsigned long *v);
29 34
35static inline int arch_get_random_seed_long(unsigned long *v)
36{
37 return 0;
38}
39static inline int arch_get_random_seed_int(unsigned int *v)
40{
41 return 0;
42}
43static inline int arch_has_random_seed(void)
44{
45 return 0;
46}
47
30#endif /* CONFIG_ARCH_RANDOM */ 48#endif /* CONFIG_ARCH_RANDOM */
31 49
32#endif /* _ASM_POWERPC_ARCHRANDOM_H */ 50#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 83851aabfdc8..bb1e38a23ac7 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -304,6 +304,11 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
304 return vcpu->arch.fault_dar; 304 return vcpu->arch.fault_dar;
305} 305}
306 306
307static inline bool is_kvmppc_resume_guest(int r)
308{
309 return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
310}
311
307/* Magic register values loaded into r3 and r4 before the 'sc' assembly 312/* Magic register values loaded into r3 and r4 before the 'sc' assembly
308 * instruction for the OSI hypercalls */ 313 * instruction for the OSI hypercalls */
309#define OSI_SC_MAGIC_R3 0x113724FA 314#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index bf0fa8b0a883..51388befeddb 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -289,6 +289,18 @@ static inline void note_hpte_modification(struct kvm *kvm,
289 if (atomic_read(&kvm->arch.hpte_mod_interest)) 289 if (atomic_read(&kvm->arch.hpte_mod_interest))
290 rev->guest_rpte |= HPTE_GR_MODIFIED; 290 rev->guest_rpte |= HPTE_GR_MODIFIED;
291} 291}
292
293/*
294 * Like kvm_memslots(), but for use in real mode when we can't do
295 * any RCU stuff (since the secondary threads are offline from the
296 * kernel's point of view), and we can't print anything.
297 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
298 */
299static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
300{
301 return rcu_dereference_raw_notrace(kvm->memslots);
302}
303
292#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 304#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
293 305
294#endif /* __ASM_KVM_BOOK3S_64_H__ */ 306#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index f3a91dc02c98..821725c1bf46 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -94,7 +94,7 @@ struct kvmppc_host_state {
94 unsigned long xics_phys; 94 unsigned long xics_phys;
95 u32 saved_xirr; 95 u32 saved_xirr;
96 u64 dabr; 96 u64 dabr;
97 u64 host_mmcr[3]; 97 u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
98 u32 host_pmc[8]; 98 u32 host_pmc[8];
99 u64 host_purr; 99 u64 host_purr;
100 u64 host_spurr; 100 u64 host_spurr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index fcd53f0d34ba..4096f16502a9 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -129,6 +129,8 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
129 struct kvm_create_spapr_tce *args); 129 struct kvm_create_spapr_tce *args);
130extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 130extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
131 unsigned long ioba, unsigned long tce); 131 unsigned long ioba, unsigned long tce);
132extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
133 unsigned long ioba);
132extern struct kvm_rma_info *kvm_alloc_rma(void); 134extern struct kvm_rma_info *kvm_alloc_rma(void);
133extern void kvm_release_rma(struct kvm_rma_info *ri); 135extern void kvm_release_rma(struct kvm_rma_info *ri);
134extern struct page *kvm_alloc_hpt(unsigned long nr_pages); 136extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 2189f8f2ca88..e5d2e0bc7e03 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -213,6 +213,7 @@
213#define SPRN_ACOP 0x1F /* Available Coprocessor Register */ 213#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
214#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ 214#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
215#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */ 215#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
216#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
216#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */ 217#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
217#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ 218#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
218#define SPRN_CTRLF 0x088 219#define SPRN_CTRLF 0x088
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 0c9f8b74dd97..c22d704b6d41 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -7,6 +7,8 @@
7 7
8#include <uapi/asm/tm.h> 8#include <uapi/asm/tm.h>
9 9
10#ifndef __ASSEMBLY__
11
10#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 12#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11extern void do_load_up_transact_fpu(struct thread_struct *thread); 13extern void do_load_up_transact_fpu(struct thread_struct *thread);
12extern void do_load_up_transact_altivec(struct thread_struct *thread); 14extern void do_load_up_transact_altivec(struct thread_struct *thread);
@@ -21,3 +23,5 @@ extern void tm_recheckpoint(struct thread_struct *thread,
21extern void tm_abort(uint8_t cause); 23extern void tm_abort(uint8_t cause);
22extern void tm_save_sprs(struct thread_struct *thread); 24extern void tm_save_sprs(struct thread_struct *thread);
23extern void tm_restore_sprs(struct thread_struct *thread); 25extern void tm_restore_sprs(struct thread_struct *thread);
26
27#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index b0ded97ee4e1..6a014c763cc7 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -532,13 +532,8 @@ void arch_ftrace_update_code(int command)
532 ftrace_disable_ftrace_graph_caller(); 532 ftrace_disable_ftrace_graph_caller();
533} 533}
534 534
535int __init ftrace_dyn_arch_init(void *data) 535int __init ftrace_dyn_arch_init(void)
536{ 536{
537 /* caller expects data to be zero */
538 unsigned long *p = data;
539
540 *p = 0;
541
542 return 0; 537 return 0;
543} 538}
544#endif /* CONFIG_DYNAMIC_FTRACE */ 539#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index d711b7eb05aa..dd72bebd708a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -33,6 +33,7 @@
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/memblock.h> 34#include <linux/memblock.h>
35#include <linux/of.h> 35#include <linux/of.h>
36#include <linux/of_fdt.h>
36 37
37#include <asm/prom.h> 38#include <asm/prom.h>
38#include <asm/rtas.h> 39#include <asm/rtas.h>
@@ -588,6 +589,8 @@ static void __init early_reserve_mem_dt(void)
588 memblock_reserve(base, size); 589 memblock_reserve(base, size);
589 } 590 }
590 } 591 }
592
593 early_init_fdt_scan_reserved_mem();
591} 594}
592 595
593static void __init early_reserve_mem(void) 596static void __init early_reserve_mem(void)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 97e1dc917683..d90d4b7810d6 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -975,7 +975,8 @@ static int __init topology_init(void)
975 int cpu; 975 int cpu;
976 976
977 register_nodes(); 977 register_nodes();
978 register_cpu_notifier(&sysfs_cpu_nb); 978
979 cpu_notifier_register_begin();
979 980
980 for_each_possible_cpu(cpu) { 981 for_each_possible_cpu(cpu) {
981 struct cpu *c = &per_cpu(cpu_devices, cpu); 982 struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -999,6 +1000,11 @@ static int __init topology_init(void)
999 if (cpu_online(cpu)) 1000 if (cpu_online(cpu))
1000 register_cpu_online(cpu); 1001 register_cpu_online(cpu);
1001 } 1002 }
1003
1004 __register_cpu_notifier(&sysfs_cpu_nb);
1005
1006 cpu_notifier_register_done();
1007
1002#ifdef CONFIG_PPC64 1008#ifdef CONFIG_PPC64
1003 sysfs_create_dscr_default(); 1009 sysfs_create_dscr_default();
1004#endif /* CONFIG_PPC64 */ 1010#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 303ece75b8e4..fb25ebc0af0c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,14 @@ int kvmppc_mmu_hv_init(void)
262 262
263static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) 263static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
264{ 264{
265 kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); 265 unsigned long msr = vcpu->arch.intr_msr;
266
267 /* If transactional, change to suspend mode on IRQ delivery */
268 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
269 msr |= MSR_TS_S;
270 else
271 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
272 kvmppc_set_msr(vcpu, msr);
266} 273}
267 274
268/* 275/*
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 2c25f5412bdb..89e96b3e0039 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -75,3 +75,31 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
75 return H_TOO_HARD; 75 return H_TOO_HARD;
76} 76}
77EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); 77EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
78
79long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
80 unsigned long ioba)
81{
82 struct kvm *kvm = vcpu->kvm;
83 struct kvmppc_spapr_tce_table *stt;
84
85 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
86 if (stt->liobn == liobn) {
87 unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
88 struct page *page;
89 u64 *tbl;
90
91 if (ioba >= stt->window_size)
92 return H_PARAMETER;
93
94 page = stt->pages[idx / TCES_PER_PAGE];
95 tbl = (u64 *)page_address(page);
96
97 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
98 return H_SUCCESS;
99 }
100 }
101
102 /* Didn't find the liobn, punt it to userspace */
103 return H_TOO_HARD;
104}
105EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 17fc9496b6ac..8227dba5af0f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -86,7 +86,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
86 86
87 /* CPU points to the first thread of the core */ 87 /* CPU points to the first thread of the core */
88 if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { 88 if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
89#ifdef CONFIG_KVM_XICS 89#ifdef CONFIG_PPC_ICP_NATIVE
90 int real_cpu = cpu + vcpu->arch.ptid; 90 int real_cpu = cpu + vcpu->arch.ptid;
91 if (paca[real_cpu].kvm_hstate.xics_phys) 91 if (paca[real_cpu].kvm_hstate.xics_phys)
92 xics_wake_cpu(real_cpu); 92 xics_wake_cpu(real_cpu);
@@ -879,17 +879,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
879 case KVM_REG_PPC_IAMR: 879 case KVM_REG_PPC_IAMR:
880 *val = get_reg_val(id, vcpu->arch.iamr); 880 *val = get_reg_val(id, vcpu->arch.iamr);
881 break; 881 break;
882#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
883 case KVM_REG_PPC_TFHAR:
884 *val = get_reg_val(id, vcpu->arch.tfhar);
885 break;
886 case KVM_REG_PPC_TFIAR:
887 *val = get_reg_val(id, vcpu->arch.tfiar);
888 break;
889 case KVM_REG_PPC_TEXASR:
890 *val = get_reg_val(id, vcpu->arch.texasr);
891 break;
892#endif
893 case KVM_REG_PPC_FSCR: 882 case KVM_REG_PPC_FSCR:
894 *val = get_reg_val(id, vcpu->arch.fscr); 883 *val = get_reg_val(id, vcpu->arch.fscr);
895 break; 884 break;
@@ -970,6 +959,69 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
970 case KVM_REG_PPC_PPR: 959 case KVM_REG_PPC_PPR:
971 *val = get_reg_val(id, vcpu->arch.ppr); 960 *val = get_reg_val(id, vcpu->arch.ppr);
972 break; 961 break;
962#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
963 case KVM_REG_PPC_TFHAR:
964 *val = get_reg_val(id, vcpu->arch.tfhar);
965 break;
966 case KVM_REG_PPC_TFIAR:
967 *val = get_reg_val(id, vcpu->arch.tfiar);
968 break;
969 case KVM_REG_PPC_TEXASR:
970 *val = get_reg_val(id, vcpu->arch.texasr);
971 break;
972 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
973 i = id - KVM_REG_PPC_TM_GPR0;
974 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
975 break;
976 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
977 {
978 int j;
979 i = id - KVM_REG_PPC_TM_VSR0;
980 if (i < 32)
981 for (j = 0; j < TS_FPRWIDTH; j++)
982 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
983 else {
984 if (cpu_has_feature(CPU_FTR_ALTIVEC))
985 val->vval = vcpu->arch.vr_tm.vr[i-32];
986 else
987 r = -ENXIO;
988 }
989 break;
990 }
991 case KVM_REG_PPC_TM_CR:
992 *val = get_reg_val(id, vcpu->arch.cr_tm);
993 break;
994 case KVM_REG_PPC_TM_LR:
995 *val = get_reg_val(id, vcpu->arch.lr_tm);
996 break;
997 case KVM_REG_PPC_TM_CTR:
998 *val = get_reg_val(id, vcpu->arch.ctr_tm);
999 break;
1000 case KVM_REG_PPC_TM_FPSCR:
1001 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1002 break;
1003 case KVM_REG_PPC_TM_AMR:
1004 *val = get_reg_val(id, vcpu->arch.amr_tm);
1005 break;
1006 case KVM_REG_PPC_TM_PPR:
1007 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1008 break;
1009 case KVM_REG_PPC_TM_VRSAVE:
1010 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1011 break;
1012 case KVM_REG_PPC_TM_VSCR:
1013 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1014 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1015 else
1016 r = -ENXIO;
1017 break;
1018 case KVM_REG_PPC_TM_DSCR:
1019 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1020 break;
1021 case KVM_REG_PPC_TM_TAR:
1022 *val = get_reg_val(id, vcpu->arch.tar_tm);
1023 break;
1024#endif
973 case KVM_REG_PPC_ARCH_COMPAT: 1025 case KVM_REG_PPC_ARCH_COMPAT:
974 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); 1026 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
975 break; 1027 break;
@@ -1039,17 +1091,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1039 case KVM_REG_PPC_IAMR: 1091 case KVM_REG_PPC_IAMR:
1040 vcpu->arch.iamr = set_reg_val(id, *val); 1092 vcpu->arch.iamr = set_reg_val(id, *val);
1041 break; 1093 break;
1042#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1043 case KVM_REG_PPC_TFHAR:
1044 vcpu->arch.tfhar = set_reg_val(id, *val);
1045 break;
1046 case KVM_REG_PPC_TFIAR:
1047 vcpu->arch.tfiar = set_reg_val(id, *val);
1048 break;
1049 case KVM_REG_PPC_TEXASR:
1050 vcpu->arch.texasr = set_reg_val(id, *val);
1051 break;
1052#endif
1053 case KVM_REG_PPC_FSCR: 1094 case KVM_REG_PPC_FSCR:
1054 vcpu->arch.fscr = set_reg_val(id, *val); 1095 vcpu->arch.fscr = set_reg_val(id, *val);
1055 break; 1096 break;
@@ -1144,6 +1185,68 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1144 case KVM_REG_PPC_PPR: 1185 case KVM_REG_PPC_PPR:
1145 vcpu->arch.ppr = set_reg_val(id, *val); 1186 vcpu->arch.ppr = set_reg_val(id, *val);
1146 break; 1187 break;
1188#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1189 case KVM_REG_PPC_TFHAR:
1190 vcpu->arch.tfhar = set_reg_val(id, *val);
1191 break;
1192 case KVM_REG_PPC_TFIAR:
1193 vcpu->arch.tfiar = set_reg_val(id, *val);
1194 break;
1195 case KVM_REG_PPC_TEXASR:
1196 vcpu->arch.texasr = set_reg_val(id, *val);
1197 break;
1198 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1199 i = id - KVM_REG_PPC_TM_GPR0;
1200 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1201 break;
1202 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1203 {
1204 int j;
1205 i = id - KVM_REG_PPC_TM_VSR0;
1206 if (i < 32)
1207 for (j = 0; j < TS_FPRWIDTH; j++)
1208 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1209 else
1210 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1211 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1212 else
1213 r = -ENXIO;
1214 break;
1215 }
1216 case KVM_REG_PPC_TM_CR:
1217 vcpu->arch.cr_tm = set_reg_val(id, *val);
1218 break;
1219 case KVM_REG_PPC_TM_LR:
1220 vcpu->arch.lr_tm = set_reg_val(id, *val);
1221 break;
1222 case KVM_REG_PPC_TM_CTR:
1223 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1224 break;
1225 case KVM_REG_PPC_TM_FPSCR:
1226 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1227 break;
1228 case KVM_REG_PPC_TM_AMR:
1229 vcpu->arch.amr_tm = set_reg_val(id, *val);
1230 break;
1231 case KVM_REG_PPC_TM_PPR:
1232 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1233 break;
1234 case KVM_REG_PPC_TM_VRSAVE:
1235 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1236 break;
1237 case KVM_REG_PPC_TM_VSCR:
1238 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1239 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1240 else
1241 r = - ENXIO;
1242 break;
1243 case KVM_REG_PPC_TM_DSCR:
1244 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1245 break;
1246 case KVM_REG_PPC_TM_TAR:
1247 vcpu->arch.tar_tm = set_reg_val(id, *val);
1248 break;
1249#endif
1147 case KVM_REG_PPC_ARCH_COMPAT: 1250 case KVM_REG_PPC_ARCH_COMPAT:
1148 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); 1251 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1149 break; 1252 break;
@@ -1360,9 +1463,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1360 smp_wmb(); 1463 smp_wmb();
1361#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 1464#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1362 if (cpu != smp_processor_id()) { 1465 if (cpu != smp_processor_id()) {
1363#ifdef CONFIG_KVM_XICS
1364 xics_wake_cpu(cpu); 1466 xics_wake_cpu(cpu);
1365#endif
1366 if (vcpu->arch.ptid) 1467 if (vcpu->arch.ptid)
1367 ++vc->n_woken; 1468 ++vc->n_woken;
1368 } 1469 }
@@ -1530,7 +1631,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1530 vcpu->arch.trap = 0; 1631 vcpu->arch.trap = 0;
1531 1632
1532 if (vcpu->arch.ceded) { 1633 if (vcpu->arch.ceded) {
1533 if (ret != RESUME_GUEST) 1634 if (!is_kvmppc_resume_guest(ret))
1534 kvmppc_end_cede(vcpu); 1635 kvmppc_end_cede(vcpu);
1535 else 1636 else
1536 kvmppc_set_timer(vcpu); 1637 kvmppc_set_timer(vcpu);
@@ -1541,7 +1642,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1541 vc->vcore_state = VCORE_INACTIVE; 1642 vc->vcore_state = VCORE_INACTIVE;
1542 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, 1643 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1543 arch.run_list) { 1644 arch.run_list) {
1544 if (vcpu->arch.ret != RESUME_GUEST) { 1645 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
1545 kvmppc_remove_runnable(vc, vcpu); 1646 kvmppc_remove_runnable(vc, vcpu);
1546 wake_up(&vcpu->arch.cpu_run); 1647 wake_up(&vcpu->arch.cpu_run);
1547 } 1648 }
@@ -1731,7 +1832,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
1731 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 1832 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
1732 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 1833 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1733 } 1834 }
1734 } while (r == RESUME_GUEST); 1835 } while (is_kvmppc_resume_guest(r));
1735 1836
1736 out: 1837 out:
1737 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 1838 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
@@ -2366,7 +2467,7 @@ static int kvmppc_book3s_init_hv(void)
2366 */ 2467 */
2367 r = kvmppc_core_check_processor_compat_hv(); 2468 r = kvmppc_core_check_processor_compat_hv();
2368 if (r < 0) 2469 if (r < 0)
2369 return r; 2470 return -ENODEV;
2370 2471
2371 kvm_ops_hv.owner = THIS_MODULE; 2472 kvm_ops_hv.owner = THIS_MODULE;
2372 kvmppc_hv_ops = &kvm_ops_hv; 2473 kvmppc_hv_ops = &kvm_ops_hv;
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index e873796b1a29..e18e3cfc32de 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -71,6 +71,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
71 mtmsrd r10,1 71 mtmsrd r10,1
72 72
73 /* Save host PMU registers */ 73 /* Save host PMU registers */
74BEGIN_FTR_SECTION
75 /* Work around P8 PMAE bug */
76 li r3, -1
77 clrrdi r3, r3, 10
78 mfspr r8, SPRN_MMCR2
79 mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */
80 isync
81END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
74 li r3, 1 82 li r3, 1
75 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 83 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
76 mfspr r7, SPRN_MMCR0 /* save MMCR0 */ 84 mfspr r7, SPRN_MMCR0 /* save MMCR0 */
@@ -87,9 +95,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
87 cmpwi r5, 0 95 cmpwi r5, 0
88 beq 31f /* skip if not */ 96 beq 31f /* skip if not */
89 mfspr r5, SPRN_MMCR1 97 mfspr r5, SPRN_MMCR1
98 mfspr r9, SPRN_SIAR
99 mfspr r10, SPRN_SDAR
90 std r7, HSTATE_MMCR(r13) 100 std r7, HSTATE_MMCR(r13)
91 std r5, HSTATE_MMCR + 8(r13) 101 std r5, HSTATE_MMCR + 8(r13)
92 std r6, HSTATE_MMCR + 16(r13) 102 std r6, HSTATE_MMCR + 16(r13)
103 std r9, HSTATE_MMCR + 24(r13)
104 std r10, HSTATE_MMCR + 32(r13)
105BEGIN_FTR_SECTION
106 mfspr r9, SPRN_SIER
107 std r8, HSTATE_MMCR + 40(r13)
108 std r9, HSTATE_MMCR + 48(r13)
109END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
93 mfspr r3, SPRN_PMC1 110 mfspr r3, SPRN_PMC1
94 mfspr r5, SPRN_PMC2 111 mfspr r5, SPRN_PMC2
95 mfspr r6, SPRN_PMC3 112 mfspr r6, SPRN_PMC3
@@ -110,6 +127,11 @@ BEGIN_FTR_SECTION
110 stw r10, HSTATE_PMC + 24(r13) 127 stw r10, HSTATE_PMC + 24(r13)
111 stw r11, HSTATE_PMC + 28(r13) 128 stw r11, HSTATE_PMC + 28(r13)
112END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
130BEGIN_FTR_SECTION
131 mfspr r9, SPRN_SIER
132 std r8, HSTATE_MMCR + 40(r13)
133 std r9, HSTATE_MMCR + 48(r13)
134END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
11331: 13531:
114 136
115 /* 137 /*
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 37fb3caa4c80..1d6c56ad5b60 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -111,7 +111,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
111 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); 111 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
112 ptel = rev->guest_rpte |= rcbits; 112 ptel = rev->guest_rpte |= rcbits;
113 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); 113 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
114 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 114 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
115 if (!memslot) 115 if (!memslot)
116 return; 116 return;
117 117
@@ -192,7 +192,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
192 /* Find the memslot (if any) for this address */ 192 /* Find the memslot (if any) for this address */
193 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); 193 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
194 gfn = gpa >> PAGE_SHIFT; 194 gfn = gpa >> PAGE_SHIFT;
195 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 195 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
196 pa = 0; 196 pa = 0;
197 is_io = ~0ul; 197 is_io = ~0ul;
198 rmap = NULL; 198 rmap = NULL;
@@ -670,7 +670,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
670 670
671 psize = hpte_page_size(v, r); 671 psize = hpte_page_size(v, r);
672 gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 672 gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
673 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 673 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
674 if (memslot) { 674 if (memslot) {
675 hva = __gfn_to_hva_memslot(memslot, gfn); 675 hva = __gfn_to_hva_memslot(memslot, gfn);
676 pte = lookup_linux_pte_and_update(pgdir, hva, 676 pte = lookup_linux_pte_and_update(pgdir, hva,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 53d647f8e741..ffbb871c2bd8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -28,6 +28,9 @@
28#include <asm/exception-64s.h> 28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h> 29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h> 30#include <asm/mmu-hash64.h>
31#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
31 34
32#ifdef __LITTLE_ENDIAN__ 35#ifdef __LITTLE_ENDIAN__
33#error Need to fix lppaca and SLB shadow accesses in little endian mode 36#error Need to fix lppaca and SLB shadow accesses in little endian mode
@@ -106,8 +109,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
106 ld r3, HSTATE_MMCR(r13) 109 ld r3, HSTATE_MMCR(r13)
107 ld r4, HSTATE_MMCR + 8(r13) 110 ld r4, HSTATE_MMCR + 8(r13)
108 ld r5, HSTATE_MMCR + 16(r13) 111 ld r5, HSTATE_MMCR + 16(r13)
112 ld r6, HSTATE_MMCR + 24(r13)
113 ld r7, HSTATE_MMCR + 32(r13)
109 mtspr SPRN_MMCR1, r4 114 mtspr SPRN_MMCR1, r4
110 mtspr SPRN_MMCRA, r5 115 mtspr SPRN_MMCRA, r5
116 mtspr SPRN_SIAR, r6
117 mtspr SPRN_SDAR, r7
118BEGIN_FTR_SECTION
119 ld r8, HSTATE_MMCR + 40(r13)
120 ld r9, HSTATE_MMCR + 48(r13)
121 mtspr SPRN_MMCR2, r8
122 mtspr SPRN_SIER, r9
123END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
111 mtspr SPRN_MMCR0, r3 124 mtspr SPRN_MMCR0, r3
112 isync 125 isync
11323: 12623:
@@ -597,6 +610,116 @@ BEGIN_FTR_SECTION
597 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) 610 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
598END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 611END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
599 612
613#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
614BEGIN_FTR_SECTION
615 b skip_tm
616END_FTR_SECTION_IFCLR(CPU_FTR_TM)
617
618 /* Turn on TM/FP/VSX/VMX so we can restore them. */
619 mfmsr r5
620 li r6, MSR_TM >> 32
621 sldi r6, r6, 32
622 or r5, r5, r6
623 ori r5, r5, MSR_FP
624 oris r5, r5, (MSR_VEC | MSR_VSX)@h
625 mtmsrd r5
626
627 /*
628 * The user may change these outside of a transaction, so they must
629 * always be context switched.
630 */
631 ld r5, VCPU_TFHAR(r4)
632 ld r6, VCPU_TFIAR(r4)
633 ld r7, VCPU_TEXASR(r4)
634 mtspr SPRN_TFHAR, r5
635 mtspr SPRN_TFIAR, r6
636 mtspr SPRN_TEXASR, r7
637
638 ld r5, VCPU_MSR(r4)
639 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
640 beq skip_tm /* TM not active in guest */
641
642 /* Make sure the failure summary is set, otherwise we'll program check
643 * when we trechkpt. It's possible that this might have been not set
644 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
645 * host.
646 */
647 oris r7, r7, (TEXASR_FS)@h
648 mtspr SPRN_TEXASR, r7
649
650 /*
651 * We need to load up the checkpointed state for the guest.
652 * We need to do this early as it will blow away any GPRs, VSRs and
653 * some SPRs.
654 */
655
656 mr r31, r4
657 addi r3, r31, VCPU_FPRS_TM
658 bl .load_fp_state
659 addi r3, r31, VCPU_VRS_TM
660 bl .load_vr_state
661 mr r4, r31
662 lwz r7, VCPU_VRSAVE_TM(r4)
663 mtspr SPRN_VRSAVE, r7
664
665 ld r5, VCPU_LR_TM(r4)
666 lwz r6, VCPU_CR_TM(r4)
667 ld r7, VCPU_CTR_TM(r4)
668 ld r8, VCPU_AMR_TM(r4)
669 ld r9, VCPU_TAR_TM(r4)
670 mtlr r5
671 mtcr r6
672 mtctr r7
673 mtspr SPRN_AMR, r8
674 mtspr SPRN_TAR, r9
675
676 /*
677 * Load up PPR and DSCR values but don't put them in the actual SPRs
678 * till the last moment to avoid running with userspace PPR and DSCR for
679 * too long.
680 */
681 ld r29, VCPU_DSCR_TM(r4)
682 ld r30, VCPU_PPR_TM(r4)
683
684 std r2, PACATMSCRATCH(r13) /* Save TOC */
685
686 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
687 li r5, 0
688 mtmsrd r5, 1
689
690 /* Load GPRs r0-r28 */
691 reg = 0
692 .rept 29
693 ld reg, VCPU_GPRS_TM(reg)(r31)
694 reg = reg + 1
695 .endr
696
697 mtspr SPRN_DSCR, r29
698 mtspr SPRN_PPR, r30
699
700 /* Load final GPRs */
701 ld 29, VCPU_GPRS_TM(29)(r31)
702 ld 30, VCPU_GPRS_TM(30)(r31)
703 ld 31, VCPU_GPRS_TM(31)(r31)
704
705 /* TM checkpointed state is now setup. All GPRs are now volatile. */
706 TRECHKPT
707
708 /* Now let's get back the state we need. */
709 HMT_MEDIUM
710 GET_PACA(r13)
711 ld r29, HSTATE_DSCR(r13)
712 mtspr SPRN_DSCR, r29
713 ld r4, HSTATE_KVM_VCPU(r13)
714 ld r1, HSTATE_HOST_R1(r13)
715 ld r2, PACATMSCRATCH(r13)
716
717 /* Set the MSR RI since we have our registers back. */
718 li r5, MSR_RI
719 mtmsrd r5, 1
720skip_tm:
721#endif
722
600 /* Load guest PMU registers */ 723 /* Load guest PMU registers */
601 /* R4 is live here (vcpu pointer) */ 724 /* R4 is live here (vcpu pointer) */
602 li r3, 1 725 li r3, 1
@@ -704,14 +827,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
704 ld r6, VCPU_VTB(r4) 827 ld r6, VCPU_VTB(r4)
705 mtspr SPRN_IC, r5 828 mtspr SPRN_IC, r5
706 mtspr SPRN_VTB, r6 829 mtspr SPRN_VTB, r6
707#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
708 ld r5, VCPU_TFHAR(r4)
709 ld r6, VCPU_TFIAR(r4)
710 ld r7, VCPU_TEXASR(r4)
711 mtspr SPRN_TFHAR, r5
712 mtspr SPRN_TFIAR, r6
713 mtspr SPRN_TEXASR, r7
714#endif
715 ld r8, VCPU_EBBHR(r4) 830 ld r8, VCPU_EBBHR(r4)
716 mtspr SPRN_EBBHR, r8 831 mtspr SPRN_EBBHR, r8
717 ld r5, VCPU_EBBRR(r4) 832 ld r5, VCPU_EBBRR(r4)
@@ -736,6 +851,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
736 * Set the decrementer to the guest decrementer. 851 * Set the decrementer to the guest decrementer.
737 */ 852 */
738 ld r8,VCPU_DEC_EXPIRES(r4) 853 ld r8,VCPU_DEC_EXPIRES(r4)
854 /* r8 is a host timebase value here, convert to guest TB */
855 ld r5,HSTATE_KVM_VCORE(r13)
856 ld r6,VCORE_TB_OFFSET(r5)
857 add r8,r8,r6
739 mftb r7 858 mftb r7
740 subf r3,r7,r8 859 subf r3,r7,r8
741 mtspr SPRN_DEC,r3 860 mtspr SPRN_DEC,r3
@@ -817,7 +936,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
81712: mtspr SPRN_SRR0, r10 93612: mtspr SPRN_SRR0, r10
818 mr r10,r0 937 mr r10,r0
819 mtspr SPRN_SRR1, r11 938 mtspr SPRN_SRR1, r11
820 ld r11, VCPU_INTR_MSR(r4) 939 mr r9, r4
940 bl kvmppc_msr_interrupt
8215: 9415:
822 942
823/* 943/*
@@ -1098,17 +1218,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1098 mftb r6 1218 mftb r6
1099 extsw r5,r5 1219 extsw r5,r5
1100 add r5,r5,r6 1220 add r5,r5,r6
1221 /* r5 is a guest timebase value here, convert to host TB */
1222 ld r3,HSTATE_KVM_VCORE(r13)
1223 ld r4,VCORE_TB_OFFSET(r3)
1224 subf r5,r4,r5
1101 std r5,VCPU_DEC_EXPIRES(r9) 1225 std r5,VCPU_DEC_EXPIRES(r9)
1102 1226
1103BEGIN_FTR_SECTION 1227BEGIN_FTR_SECTION
1104 b 8f 1228 b 8f
1105END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1229END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1106 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1107 mfmsr r8
1108 li r0, 1
1109 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1110 mtmsrd r8
1111
1112 /* Save POWER8-specific registers */ 1230 /* Save POWER8-specific registers */
1113 mfspr r5, SPRN_IAMR 1231 mfspr r5, SPRN_IAMR
1114 mfspr r6, SPRN_PSPB 1232 mfspr r6, SPRN_PSPB
@@ -1122,14 +1240,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1122 std r5, VCPU_IC(r9) 1240 std r5, VCPU_IC(r9)
1123 std r6, VCPU_VTB(r9) 1241 std r6, VCPU_VTB(r9)
1124 std r7, VCPU_TAR(r9) 1242 std r7, VCPU_TAR(r9)
1125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1126 mfspr r5, SPRN_TFHAR
1127 mfspr r6, SPRN_TFIAR
1128 mfspr r7, SPRN_TEXASR
1129 std r5, VCPU_TFHAR(r9)
1130 std r6, VCPU_TFIAR(r9)
1131 std r7, VCPU_TEXASR(r9)
1132#endif
1133 mfspr r8, SPRN_EBBHR 1243 mfspr r8, SPRN_EBBHR
1134 std r8, VCPU_EBBHR(r9) 1244 std r8, VCPU_EBBHR(r9)
1135 mfspr r5, SPRN_EBBRR 1245 mfspr r5, SPRN_EBBRR
@@ -1387,7 +1497,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1387 ld r8,VCORE_TB_OFFSET(r5) 1497 ld r8,VCORE_TB_OFFSET(r5)
1388 cmpdi r8,0 1498 cmpdi r8,0
1389 beq 17f 1499 beq 17f
1390 mftb r6 /* current host timebase */ 1500 mftb r6 /* current guest timebase */
1391 subf r8,r8,r6 1501 subf r8,r8,r6
1392 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1502 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1393 mftb r7 /* check if lower 24 bits overflowed */ 1503 mftb r7 /* check if lower 24 bits overflowed */
@@ -1557,7 +1667,7 @@ kvmppc_hdsi:
1557 mtspr SPRN_SRR0, r10 1667 mtspr SPRN_SRR0, r10
1558 mtspr SPRN_SRR1, r11 1668 mtspr SPRN_SRR1, r11
1559 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1669 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1560 ld r11, VCPU_INTR_MSR(r9) 1670 bl kvmppc_msr_interrupt
1561fast_interrupt_c_return: 1671fast_interrupt_c_return:
15626: ld r7, VCPU_CTR(r9) 16726: ld r7, VCPU_CTR(r9)
1563 lwz r8, VCPU_XER(r9) 1673 lwz r8, VCPU_XER(r9)
@@ -1626,7 +1736,7 @@ kvmppc_hisi:
16261: mtspr SPRN_SRR0, r10 17361: mtspr SPRN_SRR0, r10
1627 mtspr SPRN_SRR1, r11 1737 mtspr SPRN_SRR1, r11
1628 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1738 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1629 ld r11, VCPU_INTR_MSR(r9) 1739 bl kvmppc_msr_interrupt
1630 b fast_interrupt_c_return 1740 b fast_interrupt_c_return
1631 1741
16323: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 17423: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
@@ -1669,7 +1779,7 @@ sc_1_fast_return:
1669 mtspr SPRN_SRR0,r10 1779 mtspr SPRN_SRR0,r10
1670 mtspr SPRN_SRR1,r11 1780 mtspr SPRN_SRR1,r11
1671 li r10, BOOK3S_INTERRUPT_SYSCALL 1781 li r10, BOOK3S_INTERRUPT_SYSCALL
1672 ld r11, VCPU_INTR_MSR(r9) 1782 bl kvmppc_msr_interrupt
1673 mr r4,r9 1783 mr r4,r9
1674 b fast_guest_return 1784 b fast_guest_return
1675 1785
@@ -1691,7 +1801,7 @@ hcall_real_table:
1691 .long 0 /* 0x10 - H_CLEAR_MOD */ 1801 .long 0 /* 0x10 - H_CLEAR_MOD */
1692 .long 0 /* 0x14 - H_CLEAR_REF */ 1802 .long 0 /* 0x14 - H_CLEAR_REF */
1693 .long .kvmppc_h_protect - hcall_real_table 1803 .long .kvmppc_h_protect - hcall_real_table
1694 .long 0 /* 0x1c - H_GET_TCE */ 1804 .long .kvmppc_h_get_tce - hcall_real_table
1695 .long .kvmppc_h_put_tce - hcall_real_table 1805 .long .kvmppc_h_put_tce - hcall_real_table
1696 .long 0 /* 0x24 - H_SET_SPRG0 */ 1806 .long 0 /* 0x24 - H_SET_SPRG0 */
1697 .long .kvmppc_h_set_dabr - hcall_real_table 1807 .long .kvmppc_h_set_dabr - hcall_real_table
@@ -1997,7 +2107,7 @@ machine_check_realmode:
1997 beq mc_cont 2107 beq mc_cont
1998 /* If not, deliver a machine check. SRR0/1 are already set */ 2108 /* If not, deliver a machine check. SRR0/1 are already set */
1999 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2109 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2000 ld r11, VCPU_INTR_MSR(r9) 2110 bl kvmppc_msr_interrupt
2001 b fast_interrupt_c_return 2111 b fast_interrupt_c_return
2002 2112
2003/* 2113/*
@@ -2138,8 +2248,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2138 mfspr r6,SPRN_VRSAVE 2248 mfspr r6,SPRN_VRSAVE
2139 stw r6,VCPU_VRSAVE(r31) 2249 stw r6,VCPU_VRSAVE(r31)
2140 mtlr r30 2250 mtlr r30
2141 mtmsrd r5
2142 isync
2143 blr 2251 blr
2144 2252
2145/* 2253/*
@@ -2186,3 +2294,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2186 */ 2294 */
2187kvmppc_bad_host_intr: 2295kvmppc_bad_host_intr:
2188 b . 2296 b .
2297
2298/*
2299 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2300 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2301 * r11 has the guest MSR value (in/out)
2302 * r9 has a vcpu pointer (in)
2303 * r0 is used as a scratch register
2304 */
2305kvmppc_msr_interrupt:
2306 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2307 cmpwi r0, 2 /* Check if we are in transactional state.. */
2308 ld r11, VCPU_INTR_MSR(r9)
2309 bne 1f
2310 /* ... if transactional, change to suspended */
2311 li r0, 1
23121: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2313 blr
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index cf95cdef73c9..7a053157483b 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -213,8 +213,11 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
213 gpa_t args_phys; 213 gpa_t args_phys;
214 int rc; 214 int rc;
215 215
216 /* r4 contains the guest physical address of the RTAS args */ 216 /*
217 args_phys = kvmppc_get_gpr(vcpu, 4); 217 * r4 contains the guest physical address of the RTAS args
218 * Mask off the top 4 bits since this is a guest real address
219 */
220 args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
218 221
219 rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args)); 222 rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
220 if (rc) 223 if (rc)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 555034f8505e..808ce1cae21a 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -390,9 +390,9 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
390 mark)); 390 mark));
391 break; 391 break;
392 case BPF_S_ANC_RXHASH: 392 case BPF_S_ANC_RXHASH:
393 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); 393 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
394 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 394 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
395 rxhash)); 395 hash));
396 break; 396 break;
397 case BPF_S_ANC_VLAN_TAG: 397 case BPF_S_ANC_VLAN_TAG:
398 case BPF_S_ANC_VLAN_TAG_PRESENT: 398 case BPF_S_ANC_VLAN_TAG_PRESENT:
@@ -689,6 +689,7 @@ void bpf_jit_compile(struct sk_filter *fp)
689 ((u64 *)image)[0] = (u64)code_base; 689 ((u64 *)image)[0] = (u64)code_base;
690 ((u64 *)image)[1] = local_paca->kernel_toc; 690 ((u64 *)image)[1] = local_paca->kernel_toc;
691 fp->bpf_func = (void *)image; 691 fp->bpf_func = (void *)image;
692 fp->jited = 1;
692 } 693 }
693out: 694out:
694 kfree(addrs); 695 kfree(addrs);
@@ -697,7 +698,7 @@ out:
697 698
698void bpf_jit_free(struct sk_filter *fp) 699void bpf_jit_free(struct sk_filter *fp)
699{ 700{
700 if (fp->bpf_func != sk_run_filter) 701 if (fp->jited)
701 module_free(NULL, fp->bpf_func); 702 module_free(NULL, fp->bpf_func);
702 kfree(fp); 703 kfree(fp);
703} 704}
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a8fe5aa3d34f..022b38e6a80b 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -11,7 +11,6 @@
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/kref.h>
15#include <linux/notifier.h> 14#include <linux/notifier.h>
16#include <linux/spinlock.h> 15#include <linux/spinlock.h>
17#include <linux/cpu.h> 16#include <linux/cpu.h>
@@ -87,7 +86,6 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
87 } 86 }
88 87
89 of_node_set_flag(dn, OF_DYNAMIC); 88 of_node_set_flag(dn, OF_DYNAMIC);
90 kref_init(&dn->kref);
91 89
92 return dn; 90 return dn;
93} 91}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index f93cdf55628c..0435bb65d0aa 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -12,7 +12,6 @@
12 */ 12 */
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/kref.h>
16#include <linux/notifier.h> 15#include <linux/notifier.h>
17#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
@@ -70,7 +69,6 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
70 69
71 np->properties = proplist; 70 np->properties = proplist;
72 of_node_set_flag(np, OF_DYNAMIC); 71 of_node_set_flag(np, OF_DYNAMIC);
73 kref_init(&np->kref);
74 72
75 np->parent = derive_parent(path); 73 np->parent = derive_parent(path);
76 if (IS_ERR(np->parent)) { 74 if (IS_ERR(np->parent)) {
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index 0968b66b4cf9..8ba60424be95 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -202,7 +202,7 @@ void __init test_of_node(void)
202 202
203 /* There should really be a struct device_node allocator */ 203 /* There should really be a struct device_node allocator */
204 memset(&of_node, 0, sizeof(of_node)); 204 memset(&of_node, 0, sizeof(of_node));
205 kref_init(&of_node.kref); 205 kref_init(&of_node.kobj.kref);
206 of_node.full_name = node_name; 206 of_node.full_name = node_name;
207 207
208 check(0 == msi_bitmap_alloc(&bmp, size, &of_node)); 208 check(0 == msi_bitmap_alloc(&bmp, size, &of_node));