diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-05-30 08:51:40 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-05-30 08:51:40 -0400 |
commit | 53ea2e462e1b5b898d244bb113993df709107e32 (patch) | |
tree | 6ed864c3915eb9f2595e9103926bb94c3e92e1c8 /arch | |
parent | ee1a725f449d6e631405755f16f3c60c49e8fec5 (diff) | |
parent | d8d164a9850d486cc48081c18831680254688d0f (diff) |
Merge tag 'signed-kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm-next
Patch queue for ppc - 2014-05-30
In this round we have a few nice gems. PR KVM gains initial POWER8 support
as well as LE host awareness, ihe e500 targets can now properly run u-boot,
LE guests now work with PR KVM including KVM hypercalls and HV KVM guests
can now use huge pages.
On top of this there are some bug fixes.
Conflicts:
include/uapi/linux/kvm.h
Diffstat (limited to 'arch')
42 files changed, 1095 insertions, 415 deletions
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h index 856f8deb557a..6330a61b875a 100644 --- a/arch/powerpc/include/asm/disassemble.h +++ b/arch/powerpc/include/asm/disassemble.h | |||
@@ -81,4 +81,38 @@ static inline unsigned int get_oc(u32 inst) | |||
81 | { | 81 | { |
82 | return (inst >> 11) & 0x7fff; | 82 | return (inst >> 11) & 0x7fff; |
83 | } | 83 | } |
84 | |||
85 | #define IS_XFORM(inst) (get_op(inst) == 31) | ||
86 | #define IS_DSFORM(inst) (get_op(inst) >= 56) | ||
87 | |||
88 | /* | ||
89 | * Create a DSISR value from the instruction | ||
90 | */ | ||
91 | static inline unsigned make_dsisr(unsigned instr) | ||
92 | { | ||
93 | unsigned dsisr; | ||
94 | |||
95 | |||
96 | /* bits 6:15 --> 22:31 */ | ||
97 | dsisr = (instr & 0x03ff0000) >> 16; | ||
98 | |||
99 | if (IS_XFORM(instr)) { | ||
100 | /* bits 29:30 --> 15:16 */ | ||
101 | dsisr |= (instr & 0x00000006) << 14; | ||
102 | /* bit 25 --> 17 */ | ||
103 | dsisr |= (instr & 0x00000040) << 8; | ||
104 | /* bits 21:24 --> 18:21 */ | ||
105 | dsisr |= (instr & 0x00000780) << 3; | ||
106 | } else { | ||
107 | /* bit 5 --> 17 */ | ||
108 | dsisr |= (instr & 0x04000000) >> 12; | ||
109 | /* bits 1: 4 --> 18:21 */ | ||
110 | dsisr |= (instr & 0x78000000) >> 17; | ||
111 | /* bits 30:31 --> 12:13 */ | ||
112 | if (IS_DSFORM(instr)) | ||
113 | dsisr |= (instr & 0x00000003) << 18; | ||
114 | } | ||
115 | |||
116 | return dsisr; | ||
117 | } | ||
84 | #endif /* __ASM_PPC_DISASSEMBLE_H__ */ | 118 | #endif /* __ASM_PPC_DISASSEMBLE_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 19eb74a95b59..9601741080e5 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -102,6 +102,7 @@ | |||
102 | #define BOOK3S_INTERRUPT_PERFMON 0xf00 | 102 | #define BOOK3S_INTERRUPT_PERFMON 0xf00 |
103 | #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 | 103 | #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 |
104 | #define BOOK3S_INTERRUPT_VSX 0xf40 | 104 | #define BOOK3S_INTERRUPT_VSX 0xf40 |
105 | #define BOOK3S_INTERRUPT_FAC_UNAVAIL 0xf60 | ||
105 | #define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80 | 106 | #define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80 |
106 | 107 | ||
107 | #define BOOK3S_IRQPRIO_SYSTEM_RESET 0 | 108 | #define BOOK3S_IRQPRIO_SYSTEM_RESET 0 |
@@ -114,14 +115,15 @@ | |||
114 | #define BOOK3S_IRQPRIO_FP_UNAVAIL 7 | 115 | #define BOOK3S_IRQPRIO_FP_UNAVAIL 7 |
115 | #define BOOK3S_IRQPRIO_ALTIVEC 8 | 116 | #define BOOK3S_IRQPRIO_ALTIVEC 8 |
116 | #define BOOK3S_IRQPRIO_VSX 9 | 117 | #define BOOK3S_IRQPRIO_VSX 9 |
117 | #define BOOK3S_IRQPRIO_SYSCALL 10 | 118 | #define BOOK3S_IRQPRIO_FAC_UNAVAIL 10 |
118 | #define BOOK3S_IRQPRIO_MACHINE_CHECK 11 | 119 | #define BOOK3S_IRQPRIO_SYSCALL 11 |
119 | #define BOOK3S_IRQPRIO_DEBUG 12 | 120 | #define BOOK3S_IRQPRIO_MACHINE_CHECK 12 |
120 | #define BOOK3S_IRQPRIO_EXTERNAL 13 | 121 | #define BOOK3S_IRQPRIO_DEBUG 13 |
121 | #define BOOK3S_IRQPRIO_DECREMENTER 14 | 122 | #define BOOK3S_IRQPRIO_EXTERNAL 14 |
122 | #define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15 | 123 | #define BOOK3S_IRQPRIO_DECREMENTER 15 |
123 | #define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 16 | 124 | #define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16 |
124 | #define BOOK3S_IRQPRIO_MAX 17 | 125 | #define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17 |
126 | #define BOOK3S_IRQPRIO_MAX 18 | ||
125 | 127 | ||
126 | #define BOOK3S_HFLAG_DCBZ32 0x1 | 128 | #define BOOK3S_HFLAG_DCBZ32 0x1 |
127 | #define BOOK3S_HFLAG_SLB 0x2 | 129 | #define BOOK3S_HFLAG_SLB 0x2 |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index bb1e38a23ac7..f52f65694527 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -268,9 +268,10 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | |||
268 | return vcpu->arch.pc; | 268 | return vcpu->arch.pc; |
269 | } | 269 | } |
270 | 270 | ||
271 | static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); | ||
271 | static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) | 272 | static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) |
272 | { | 273 | { |
273 | return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); | 274 | return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); |
274 | } | 275 | } |
275 | 276 | ||
276 | static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) | 277 | static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 51388befeddb..fddb72b48ce9 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -77,34 +77,122 @@ static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) | |||
77 | return old == 0; | 77 | return old == 0; |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline int __hpte_actual_psize(unsigned int lp, int psize) | ||
81 | { | ||
82 | int i, shift; | ||
83 | unsigned int mask; | ||
84 | |||
85 | /* start from 1 ignoring MMU_PAGE_4K */ | ||
86 | for (i = 1; i < MMU_PAGE_COUNT; i++) { | ||
87 | |||
88 | /* invalid penc */ | ||
89 | if (mmu_psize_defs[psize].penc[i] == -1) | ||
90 | continue; | ||
91 | /* | ||
92 | * encoding bits per actual page size | ||
93 | * PTE LP actual page size | ||
94 | * rrrr rrrz >=8KB | ||
95 | * rrrr rrzz >=16KB | ||
96 | * rrrr rzzz >=32KB | ||
97 | * rrrr zzzz >=64KB | ||
98 | * ....... | ||
99 | */ | ||
100 | shift = mmu_psize_defs[i].shift - LP_SHIFT; | ||
101 | if (shift > LP_BITS) | ||
102 | shift = LP_BITS; | ||
103 | mask = (1 << shift) - 1; | ||
104 | if ((lp & mask) == mmu_psize_defs[psize].penc[i]) | ||
105 | return i; | ||
106 | } | ||
107 | return -1; | ||
108 | } | ||
109 | |||
80 | static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, | 110 | static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, |
81 | unsigned long pte_index) | 111 | unsigned long pte_index) |
82 | { | 112 | { |
83 | unsigned long rb, va_low; | 113 | int b_psize, a_psize; |
114 | unsigned int penc; | ||
115 | unsigned long rb = 0, va_low, sllp; | ||
116 | unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1); | ||
117 | |||
118 | if (!(v & HPTE_V_LARGE)) { | ||
119 | /* both base and actual psize is 4k */ | ||
120 | b_psize = MMU_PAGE_4K; | ||
121 | a_psize = MMU_PAGE_4K; | ||
122 | } else { | ||
123 | for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) { | ||
124 | |||
125 | /* valid entries have a shift value */ | ||
126 | if (!mmu_psize_defs[b_psize].shift) | ||
127 | continue; | ||
84 | 128 | ||
129 | a_psize = __hpte_actual_psize(lp, b_psize); | ||
130 | if (a_psize != -1) | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | /* | ||
135 | * Ignore the top 14 bits of va | ||
136 | * v have top two bits covering segment size, hence move | ||
137 | * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits. | ||
138 | * AVA field in v also have the lower 23 bits ignored. | ||
139 | * For base page size 4K we need 14 .. 65 bits (so need to | ||
140 | * collect extra 11 bits) | ||
141 | * For others we need 14..14+i | ||
142 | */ | ||
143 | /* This covers 14..54 bits of va*/ | ||
85 | rb = (v & ~0x7fUL) << 16; /* AVA field */ | 144 | rb = (v & ~0x7fUL) << 16; /* AVA field */ |
145 | /* | ||
146 | * AVA in v had cleared lower 23 bits. We need to derive | ||
147 | * that from pteg index | ||
148 | */ | ||
86 | va_low = pte_index >> 3; | 149 | va_low = pte_index >> 3; |
87 | if (v & HPTE_V_SECONDARY) | 150 | if (v & HPTE_V_SECONDARY) |
88 | va_low = ~va_low; | 151 | va_low = ~va_low; |
89 | /* xor vsid from AVA */ | 152 | /* |
153 | * get the vpn bits from va_low using reverse of hashing. | ||
154 | * In v we have va with 23 bits dropped and then left shifted | ||
155 | * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need | ||
156 | * right shift it with (SID_SHIFT - (23 - 7)) | ||
157 | */ | ||
90 | if (!(v & HPTE_V_1TB_SEG)) | 158 | if (!(v & HPTE_V_1TB_SEG)) |
91 | va_low ^= v >> 12; | 159 | va_low ^= v >> (SID_SHIFT - 16); |
92 | else | 160 | else |
93 | va_low ^= v >> 24; | 161 | va_low ^= v >> (SID_SHIFT_1T - 16); |
94 | va_low &= 0x7ff; | 162 | va_low &= 0x7ff; |
95 | if (v & HPTE_V_LARGE) { | 163 | |
96 | rb |= 1; /* L field */ | 164 | switch (b_psize) { |
97 | if (cpu_has_feature(CPU_FTR_ARCH_206) && | 165 | case MMU_PAGE_4K: |
98 | (r & 0xff000)) { | 166 | sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) | |
99 | /* non-16MB large page, must be 64k */ | 167 | ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4); |
100 | /* (masks depend on page size) */ | 168 | rb |= sllp << 5; /* AP field */ |
101 | rb |= 0x1000; /* page encoding in LP field */ | 169 | rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ |
102 | rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ | 170 | break; |
103 | rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */ | 171 | default: |
104 | } | 172 | { |
105 | } else { | 173 | int aval_shift; |
106 | /* 4kB page */ | 174 | /* |
107 | rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ | 175 | * remaining 7bits of AVA/LP fields |
176 | * Also contain the rr bits of LP | ||
177 | */ | ||
178 | rb |= (va_low & 0x7f) << 16; | ||
179 | /* | ||
180 | * Now clear not needed LP bits based on actual psize | ||
181 | */ | ||
182 | rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1); | ||
183 | /* | ||
184 | * AVAL field 58..77 - base_page_shift bits of va | ||
185 | * we have space for 58..64 bits, Missing bits should | ||
186 | * be zero filled. +1 is to take care of L bit shift | ||
187 | */ | ||
188 | aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1; | ||
189 | rb |= ((va_low << aval_shift) & 0xfe); | ||
190 | |||
191 | rb |= 1; /* L field */ | ||
192 | penc = mmu_psize_defs[b_psize].penc[a_psize]; | ||
193 | rb |= penc << 12; /* LP field */ | ||
194 | break; | ||
195 | } | ||
108 | } | 196 | } |
109 | rb |= (v >> 54) & 0x300; /* B field */ | 197 | rb |= (v >> 54) & 0x300; /* B field */ |
110 | return rb; | 198 | return rb; |
@@ -112,14 +200,26 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, | |||
112 | 200 | ||
113 | static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) | 201 | static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) |
114 | { | 202 | { |
203 | int size, a_psize; | ||
204 | /* Look at the 8 bit LP value */ | ||
205 | unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); | ||
206 | |||
115 | /* only handle 4k, 64k and 16M pages for now */ | 207 | /* only handle 4k, 64k and 16M pages for now */ |
116 | if (!(h & HPTE_V_LARGE)) | 208 | if (!(h & HPTE_V_LARGE)) |
117 | return 1ul << 12; /* 4k page */ | 209 | return 1ul << 12; |
118 | if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206)) | 210 | else { |
119 | return 1ul << 16; /* 64k page */ | 211 | for (size = 0; size < MMU_PAGE_COUNT; size++) { |
120 | if ((l & 0xff000) == 0) | 212 | /* valid entries have a shift value */ |
121 | return 1ul << 24; /* 16M page */ | 213 | if (!mmu_psize_defs[size].shift) |
122 | return 0; /* error */ | 214 | continue; |
215 | |||
216 | a_psize = __hpte_actual_psize(lp, size); | ||
217 | if (a_psize != -1) | ||
218 | return 1ul << mmu_psize_defs[a_psize].shift; | ||
219 | } | ||
220 | |||
221 | } | ||
222 | return 0; | ||
123 | } | 223 | } |
124 | 224 | ||
125 | static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) | 225 | static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 821725c1bf46..5bdfb5dd3400 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -104,6 +104,7 @@ struct kvmppc_host_state { | |||
104 | #ifdef CONFIG_PPC_BOOK3S_64 | 104 | #ifdef CONFIG_PPC_BOOK3S_64 |
105 | u64 cfar; | 105 | u64 cfar; |
106 | u64 ppr; | 106 | u64 ppr; |
107 | u64 host_fscr; | ||
107 | #endif | 108 | #endif |
108 | }; | 109 | }; |
109 | 110 | ||
@@ -133,6 +134,7 @@ struct kvmppc_book3s_shadow_vcpu { | |||
133 | u64 esid; | 134 | u64 esid; |
134 | u64 vsid; | 135 | u64 vsid; |
135 | } slb[64]; /* guest SLB */ | 136 | } slb[64]; /* guest SLB */ |
137 | u64 shadow_fscr; | ||
136 | #endif | 138 | #endif |
137 | }; | 139 | }; |
138 | 140 | ||
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index 80d46b5a7efb..c7aed6105ff9 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
@@ -108,9 +108,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
108 | { | 108 | { |
109 | return vcpu->arch.fault_dear; | 109 | return vcpu->arch.fault_dear; |
110 | } | 110 | } |
111 | |||
112 | static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu) | ||
113 | { | ||
114 | return vcpu->arch.shared->msr; | ||
115 | } | ||
116 | #endif /* __ASM_KVM_BOOKE_H__ */ | 111 | #endif /* __ASM_KVM_BOOKE_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1eaea2dea174..bb66d8b8efdf 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -449,7 +449,9 @@ struct kvm_vcpu_arch { | |||
449 | ulong pc; | 449 | ulong pc; |
450 | ulong ctr; | 450 | ulong ctr; |
451 | ulong lr; | 451 | ulong lr; |
452 | #ifdef CONFIG_PPC_BOOK3S | ||
452 | ulong tar; | 453 | ulong tar; |
454 | #endif | ||
453 | 455 | ||
454 | ulong xer; | 456 | ulong xer; |
455 | u32 cr; | 457 | u32 cr; |
@@ -475,6 +477,7 @@ struct kvm_vcpu_arch { | |||
475 | ulong ppr; | 477 | ulong ppr; |
476 | ulong pspb; | 478 | ulong pspb; |
477 | ulong fscr; | 479 | ulong fscr; |
480 | ulong shadow_fscr; | ||
478 | ulong ebbhr; | 481 | ulong ebbhr; |
479 | ulong ebbrr; | 482 | ulong ebbrr; |
480 | ulong bescr; | 483 | ulong bescr; |
@@ -562,6 +565,7 @@ struct kvm_vcpu_arch { | |||
562 | #ifdef CONFIG_PPC_BOOK3S | 565 | #ifdef CONFIG_PPC_BOOK3S |
563 | ulong fault_dar; | 566 | ulong fault_dar; |
564 | u32 fault_dsisr; | 567 | u32 fault_dsisr; |
568 | unsigned long intr_msr; | ||
565 | #endif | 569 | #endif |
566 | 570 | ||
567 | #ifdef CONFIG_BOOKE | 571 | #ifdef CONFIG_BOOKE |
@@ -622,8 +626,12 @@ struct kvm_vcpu_arch { | |||
622 | wait_queue_head_t cpu_run; | 626 | wait_queue_head_t cpu_run; |
623 | 627 | ||
624 | struct kvm_vcpu_arch_shared *shared; | 628 | struct kvm_vcpu_arch_shared *shared; |
629 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) | ||
630 | bool shared_big_endian; | ||
631 | #endif | ||
625 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ | 632 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ |
626 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ | 633 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ |
634 | bool disable_kernel_nx; | ||
627 | 635 | ||
628 | int irq_type; /* one of KVM_IRQ_* */ | 636 | int irq_type; /* one of KVM_IRQ_* */ |
629 | int irq_cpu_id; | 637 | int irq_cpu_id; |
@@ -654,7 +662,6 @@ struct kvm_vcpu_arch { | |||
654 | spinlock_t tbacct_lock; | 662 | spinlock_t tbacct_lock; |
655 | u64 busy_stolen; | 663 | u64 busy_stolen; |
656 | u64 busy_preempt; | 664 | u64 busy_preempt; |
657 | unsigned long intr_msr; | ||
658 | #endif | 665 | #endif |
659 | }; | 666 | }; |
660 | 667 | ||
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 4096f16502a9..4a7cc453be0b 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -449,6 +449,84 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn) | |||
449 | } | 449 | } |
450 | 450 | ||
451 | /* | 451 | /* |
452 | * Shared struct helpers. The shared struct can be little or big endian, | ||
453 | * depending on the guest endianness. So expose helpers to all of them. | ||
454 | */ | ||
455 | static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) | ||
456 | { | ||
457 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) | ||
458 | /* Only Book3S_64 PR supports bi-endian for now */ | ||
459 | return vcpu->arch.shared_big_endian; | ||
460 | #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__) | ||
461 | /* Book3s_64 HV on little endian is always little endian */ | ||
462 | return false; | ||
463 | #else | ||
464 | return true; | ||
465 | #endif | ||
466 | } | ||
467 | |||
468 | #define SHARED_WRAPPER_GET(reg, size) \ | ||
469 | static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ | ||
470 | { \ | ||
471 | if (kvmppc_shared_big_endian(vcpu)) \ | ||
472 | return be##size##_to_cpu(vcpu->arch.shared->reg); \ | ||
473 | else \ | ||
474 | return le##size##_to_cpu(vcpu->arch.shared->reg); \ | ||
475 | } \ | ||
476 | |||
477 | #define SHARED_WRAPPER_SET(reg, size) \ | ||
478 | static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ | ||
479 | { \ | ||
480 | if (kvmppc_shared_big_endian(vcpu)) \ | ||
481 | vcpu->arch.shared->reg = cpu_to_be##size(val); \ | ||
482 | else \ | ||
483 | vcpu->arch.shared->reg = cpu_to_le##size(val); \ | ||
484 | } \ | ||
485 | |||
486 | #define SHARED_WRAPPER(reg, size) \ | ||
487 | SHARED_WRAPPER_GET(reg, size) \ | ||
488 | SHARED_WRAPPER_SET(reg, size) \ | ||
489 | |||
490 | SHARED_WRAPPER(critical, 64) | ||
491 | SHARED_WRAPPER(sprg0, 64) | ||
492 | SHARED_WRAPPER(sprg1, 64) | ||
493 | SHARED_WRAPPER(sprg2, 64) | ||
494 | SHARED_WRAPPER(sprg3, 64) | ||
495 | SHARED_WRAPPER(srr0, 64) | ||
496 | SHARED_WRAPPER(srr1, 64) | ||
497 | SHARED_WRAPPER(dar, 64) | ||
498 | SHARED_WRAPPER_GET(msr, 64) | ||
499 | static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) | ||
500 | { | ||
501 | if (kvmppc_shared_big_endian(vcpu)) | ||
502 | vcpu->arch.shared->msr = cpu_to_be64(val); | ||
503 | else | ||
504 | vcpu->arch.shared->msr = cpu_to_le64(val); | ||
505 | } | ||
506 | SHARED_WRAPPER(dsisr, 32) | ||
507 | SHARED_WRAPPER(int_pending, 32) | ||
508 | SHARED_WRAPPER(sprg4, 64) | ||
509 | SHARED_WRAPPER(sprg5, 64) | ||
510 | SHARED_WRAPPER(sprg6, 64) | ||
511 | SHARED_WRAPPER(sprg7, 64) | ||
512 | |||
513 | static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) | ||
514 | { | ||
515 | if (kvmppc_shared_big_endian(vcpu)) | ||
516 | return be32_to_cpu(vcpu->arch.shared->sr[nr]); | ||
517 | else | ||
518 | return le32_to_cpu(vcpu->arch.shared->sr[nr]); | ||
519 | } | ||
520 | |||
521 | static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val) | ||
522 | { | ||
523 | if (kvmppc_shared_big_endian(vcpu)) | ||
524 | vcpu->arch.shared->sr[nr] = cpu_to_be32(val); | ||
525 | else | ||
526 | vcpu->arch.shared->sr[nr] = cpu_to_le32(val); | ||
527 | } | ||
528 | |||
529 | /* | ||
452 | * Please call after prepare_to_enter. This function puts the lazy ee and irq | 530 | * Please call after prepare_to_enter. This function puts the lazy ee and irq |
453 | * disabled tracking state back to normal mode, without actually enabling | 531 | * disabled tracking state back to normal mode, without actually enabling |
454 | * interrupts. | 532 | * interrupts. |
@@ -485,7 +563,7 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) | |||
485 | msr_64bit = MSR_SF; | 563 | msr_64bit = MSR_SF; |
486 | #endif | 564 | #endif |
487 | 565 | ||
488 | if (!(vcpu->arch.shared->msr & msr_64bit)) | 566 | if (!(kvmppc_get_msr(vcpu) & msr_64bit)) |
489 | ea = (uint32_t)ea; | 567 | ea = (uint32_t)ea; |
490 | 568 | ||
491 | return ea; | 569 | return ea; |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e5d2e0bc7e03..4852bcf270f3 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -670,18 +670,20 @@ | |||
670 | #define MMCR0_PROBLEM_DISABLE MMCR0_FCP | 670 | #define MMCR0_PROBLEM_DISABLE MMCR0_FCP |
671 | #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ | 671 | #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ |
672 | #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ | 672 | #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ |
673 | #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ | 673 | #define MMCR0_PMXE ASM_CONST(0x04000000) /* perf mon exception enable */ |
674 | #define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ | 674 | #define MMCR0_FCECE ASM_CONST(0x02000000) /* freeze ctrs on enabled cond or event */ |
675 | #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ | 675 | #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ |
676 | #define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */ | 676 | #define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */ |
677 | #define MMCR0_EBE 0x00100000UL /* Event based branch enable */ | 677 | #define MMCR0_EBE 0x00100000UL /* Event based branch enable */ |
678 | #define MMCR0_PMCC 0x000c0000UL /* PMC control */ | 678 | #define MMCR0_PMCC 0x000c0000UL /* PMC control */ |
679 | #define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ | 679 | #define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ |
680 | #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ | 680 | #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ |
681 | #define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ | 681 | #define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/ |
682 | #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ | 682 | #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ |
683 | #define MMCR0_PMAO_SYNC 0x00000800UL /* PMU interrupt is synchronous */ | 683 | #define MMCR0_PMAO_SYNC ASM_CONST(0x00000800) /* PMU intr is synchronous */ |
684 | #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ | 684 | #define MMCR0_C56RUN ASM_CONST(0x00000100) /* PMC5/6 count when RUN=0 */ |
685 | /* performance monitor alert has occurred, set to 0 after handling exception */ | ||
686 | #define MMCR0_PMAO ASM_CONST(0x00000080) | ||
685 | #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ | 687 | #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ |
686 | #define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */ | 688 | #define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */ |
687 | #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */ | 689 | #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */ |
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 163c3b05a76e..464f1089b532 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
@@ -583,6 +583,7 @@ | |||
583 | 583 | ||
584 | /* Bit definitions for L1CSR0. */ | 584 | /* Bit definitions for L1CSR0. */ |
585 | #define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ | 585 | #define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ |
586 | #define L1CSR0_CUL 0x00000400 /* Data Cache Unable to Lock */ | ||
586 | #define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ | 587 | #define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ |
587 | #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ | 588 | #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ |
588 | #define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ | 589 | #define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ |
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index a6665be4f3ab..2bc4a9409a93 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h | |||
@@ -545,7 +545,6 @@ struct kvm_get_htab_header { | |||
545 | #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) | 545 | #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) |
546 | #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) | 546 | #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) |
547 | #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) | 547 | #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) |
548 | #define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4) | ||
549 | 548 | ||
550 | #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) | 549 | #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) |
551 | #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) | 550 | #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) |
@@ -555,6 +554,7 @@ struct kvm_get_htab_header { | |||
555 | #define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) | 554 | #define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) |
556 | 555 | ||
557 | #define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) | 556 | #define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) |
557 | #define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9) | ||
558 | 558 | ||
559 | /* Transactional Memory checkpointed state: | 559 | /* Transactional Memory checkpointed state: |
560 | * This is all GPRs, all VSX regs and a subset of SPRs | 560 | * This is all GPRs, all VSX regs and a subset of SPRs |
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h index e3af3286a068..91e42f09b323 100644 --- a/arch/powerpc/include/uapi/asm/kvm_para.h +++ b/arch/powerpc/include/uapi/asm/kvm_para.h | |||
@@ -82,10 +82,16 @@ struct kvm_vcpu_arch_shared { | |||
82 | 82 | ||
83 | #define KVM_FEATURE_MAGIC_PAGE 1 | 83 | #define KVM_FEATURE_MAGIC_PAGE 1 |
84 | 84 | ||
85 | /* Magic page flags from host to guest */ | ||
86 | |||
85 | #define KVM_MAGIC_FEAT_SR (1 << 0) | 87 | #define KVM_MAGIC_FEAT_SR (1 << 0) |
86 | 88 | ||
87 | /* MASn, ESR, PIR, and high SPRGs */ | 89 | /* MASn, ESR, PIR, and high SPRGs */ |
88 | #define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1) | 90 | #define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1) |
89 | 91 | ||
92 | /* Magic page flags from guest to host */ | ||
93 | |||
94 | #define MAGIC_PAGE_FLAG_NOT_MAPPED_NX (1 << 0) | ||
95 | |||
90 | 96 | ||
91 | #endif /* _UAPI__POWERPC_KVM_PARA_H__ */ | 97 | #endif /* _UAPI__POWERPC_KVM_PARA_H__ */ |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 94908af308d8..34f55524d456 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -25,14 +25,13 @@ | |||
25 | #include <asm/cputable.h> | 25 | #include <asm/cputable.h> |
26 | #include <asm/emulated_ops.h> | 26 | #include <asm/emulated_ops.h> |
27 | #include <asm/switch_to.h> | 27 | #include <asm/switch_to.h> |
28 | #include <asm/disassemble.h> | ||
28 | 29 | ||
29 | struct aligninfo { | 30 | struct aligninfo { |
30 | unsigned char len; | 31 | unsigned char len; |
31 | unsigned char flags; | 32 | unsigned char flags; |
32 | }; | 33 | }; |
33 | 34 | ||
34 | #define IS_XFORM(inst) (((inst) >> 26) == 31) | ||
35 | #define IS_DSFORM(inst) (((inst) >> 26) >= 56) | ||
36 | 35 | ||
37 | #define INVALID { 0, 0 } | 36 | #define INVALID { 0, 0 } |
38 | 37 | ||
@@ -192,37 +191,6 @@ static struct aligninfo aligninfo[128] = { | |||
192 | }; | 191 | }; |
193 | 192 | ||
194 | /* | 193 | /* |
195 | * Create a DSISR value from the instruction | ||
196 | */ | ||
197 | static inline unsigned make_dsisr(unsigned instr) | ||
198 | { | ||
199 | unsigned dsisr; | ||
200 | |||
201 | |||
202 | /* bits 6:15 --> 22:31 */ | ||
203 | dsisr = (instr & 0x03ff0000) >> 16; | ||
204 | |||
205 | if (IS_XFORM(instr)) { | ||
206 | /* bits 29:30 --> 15:16 */ | ||
207 | dsisr |= (instr & 0x00000006) << 14; | ||
208 | /* bit 25 --> 17 */ | ||
209 | dsisr |= (instr & 0x00000040) << 8; | ||
210 | /* bits 21:24 --> 18:21 */ | ||
211 | dsisr |= (instr & 0x00000780) << 3; | ||
212 | } else { | ||
213 | /* bit 5 --> 17 */ | ||
214 | dsisr |= (instr & 0x04000000) >> 12; | ||
215 | /* bits 1: 4 --> 18:21 */ | ||
216 | dsisr |= (instr & 0x78000000) >> 17; | ||
217 | /* bits 30:31 --> 12:13 */ | ||
218 | if (IS_DSFORM(instr)) | ||
219 | dsisr |= (instr & 0x00000003) << 18; | ||
220 | } | ||
221 | |||
222 | return dsisr; | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * The dcbz (data cache block zero) instruction | 194 | * The dcbz (data cache block zero) instruction |
227 | * gives an alignment fault if used on non-cacheable | 195 | * gives an alignment fault if used on non-cacheable |
228 | * memory. We handle the fault mainly for the | 196 | * memory. We handle the fault mainly for the |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index dba8140ebc20..93e1465c8496 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #endif | 54 | #endif |
55 | #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S) | 55 | #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S) |
56 | #include <asm/kvm_book3s.h> | 56 | #include <asm/kvm_book3s.h> |
57 | #include <asm/kvm_ppc.h> | ||
57 | #endif | 58 | #endif |
58 | 59 | ||
59 | #ifdef CONFIG_PPC32 | 60 | #ifdef CONFIG_PPC32 |
@@ -445,7 +446,9 @@ int main(void) | |||
445 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | 446 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); |
446 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); | 447 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); |
447 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | 448 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); |
449 | #ifdef CONFIG_PPC_BOOK3S | ||
448 | DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); | 450 | DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); |
451 | #endif | ||
449 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | 452 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
450 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); | 453 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); |
451 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 454 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
@@ -467,6 +470,9 @@ int main(void) | |||
467 | DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); | 470 | DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); |
468 | DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); | 471 | DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); |
469 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); | 472 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); |
473 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) | ||
474 | DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian)); | ||
475 | #endif | ||
470 | 476 | ||
471 | DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); | 477 | DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); |
472 | DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); | 478 | DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); |
@@ -493,7 +499,6 @@ int main(void) | |||
493 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); | 499 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); |
494 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); | 500 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); |
495 | DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); | 501 | DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); |
496 | DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); | ||
497 | #endif | 502 | #endif |
498 | #ifdef CONFIG_PPC_BOOK3S | 503 | #ifdef CONFIG_PPC_BOOK3S |
499 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); | 504 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); |
@@ -528,11 +533,13 @@ int main(void) | |||
528 | DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); | 533 | DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); |
529 | DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); | 534 | DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); |
530 | DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); | 535 | DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); |
536 | DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); | ||
531 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); | 537 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); |
532 | DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); | 538 | DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); |
533 | DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); | 539 | DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); |
534 | DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); | 540 | DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); |
535 | DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); | 541 | DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); |
542 | DEFINE(VCPU_SHADOW_FSCR, offsetof(struct kvm_vcpu, arch.shadow_fscr)); | ||
536 | DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); | 543 | DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); |
537 | DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); | 544 | DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); |
538 | DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); | 545 | DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); |
@@ -614,6 +621,7 @@ int main(void) | |||
614 | #ifdef CONFIG_PPC64 | 621 | #ifdef CONFIG_PPC64 |
615 | SVCPU_FIELD(SVCPU_SLB, slb); | 622 | SVCPU_FIELD(SVCPU_SLB, slb); |
616 | SVCPU_FIELD(SVCPU_SLB_MAX, slb_max); | 623 | SVCPU_FIELD(SVCPU_SLB_MAX, slb_max); |
624 | SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr); | ||
617 | #endif | 625 | #endif |
618 | 626 | ||
619 | HSTATE_FIELD(HSTATE_HOST_R1, host_r1); | 627 | HSTATE_FIELD(HSTATE_HOST_R1, host_r1); |
@@ -649,6 +657,7 @@ int main(void) | |||
649 | #ifdef CONFIG_PPC_BOOK3S_64 | 657 | #ifdef CONFIG_PPC_BOOK3S_64 |
650 | HSTATE_FIELD(HSTATE_CFAR, cfar); | 658 | HSTATE_FIELD(HSTATE_CFAR, cfar); |
651 | HSTATE_FIELD(HSTATE_PPR, ppr); | 659 | HSTATE_FIELD(HSTATE_PPR, ppr); |
660 | HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr); | ||
652 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 661 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
653 | 662 | ||
654 | #else /* CONFIG_PPC_BOOK3S */ | 663 | #else /* CONFIG_PPC_BOOK3S */ |
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 7898be90f2dc..d9b79358b833 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c | |||
@@ -47,9 +47,10 @@ static int __init early_init_dt_scan_epapr(unsigned long node, | |||
47 | return -1; | 47 | return -1; |
48 | 48 | ||
49 | for (i = 0; i < (len / 4); i++) { | 49 | for (i = 0; i < (len / 4); i++) { |
50 | patch_instruction(epapr_hypercall_start + i, insts[i]); | 50 | u32 inst = be32_to_cpu(insts[i]); |
51 | patch_instruction(epapr_hypercall_start + i, inst); | ||
51 | #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) | 52 | #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) |
52 | patch_instruction(epapr_ev_idle_start + i, insts[i]); | 53 | patch_instruction(epapr_ev_idle_start + i, inst); |
53 | #endif | 54 | #endif |
54 | } | 55 | } |
55 | 56 | ||
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 6a0175297b0d..5e6f24f894d9 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c | |||
@@ -417,7 +417,7 @@ static void kvm_map_magic_page(void *data) | |||
417 | ulong out[8]; | 417 | ulong out[8]; |
418 | 418 | ||
419 | in[0] = KVM_MAGIC_PAGE; | 419 | in[0] = KVM_MAGIC_PAGE; |
420 | in[1] = KVM_MAGIC_PAGE; | 420 | in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX; |
421 | 421 | ||
422 | epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); | 422 | epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); |
423 | 423 | ||
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ad302f845e5d..d6e195e8cd4c 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -98,6 +98,9 @@ static inline void free_lppacas(void) { } | |||
98 | /* | 98 | /* |
99 | * 3 persistent SLBs are registered here. The buffer will be zero | 99 | * 3 persistent SLBs are registered here. The buffer will be zero |
100 | * initially, hence will all be invaild until we actually write them. | 100 | * initially, hence will all be invaild until we actually write them. |
101 | * | ||
102 | * If you make the number of persistent SLB entries dynamic, please also | ||
103 | * update PR KVM to flush and restore them accordingly. | ||
101 | */ | 104 | */ |
102 | static struct slb_shadow *slb_shadow; | 105 | static struct slb_shadow *slb_shadow; |
103 | 106 | ||
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 141b2027189a..d6a53b95de94 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -6,7 +6,6 @@ source "virt/kvm/Kconfig" | |||
6 | 6 | ||
7 | menuconfig VIRTUALIZATION | 7 | menuconfig VIRTUALIZATION |
8 | bool "Virtualization" | 8 | bool "Virtualization" |
9 | depends on !CPU_LITTLE_ENDIAN | ||
10 | ---help--- | 9 | ---help--- |
11 | Say Y here to get to see options for using your Linux host to run | 10 | Say Y here to get to see options for using your Linux host to run |
12 | other operating systems inside virtual machines (guests). | 11 | other operating systems inside virtual machines (guests). |
@@ -76,6 +75,7 @@ config KVM_BOOK3S_64 | |||
76 | config KVM_BOOK3S_64_HV | 75 | config KVM_BOOK3S_64_HV |
77 | tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" | 76 | tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" |
78 | depends on KVM_BOOK3S_64 | 77 | depends on KVM_BOOK3S_64 |
78 | depends on !CPU_LITTLE_ENDIAN | ||
79 | select KVM_BOOK3S_HV_POSSIBLE | 79 | select KVM_BOOK3S_HV_POSSIBLE |
80 | select MMU_NOTIFIER | 80 | select MMU_NOTIFIER |
81 | select CMA | 81 | select CMA |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 94e597e6f15c..52c654dbd41a 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -85,9 +85,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | |||
85 | if (is_kvmppc_hv_enabled(vcpu->kvm)) | 85 | if (is_kvmppc_hv_enabled(vcpu->kvm)) |
86 | return; | 86 | return; |
87 | if (pending_now) | 87 | if (pending_now) |
88 | vcpu->arch.shared->int_pending = 1; | 88 | kvmppc_set_int_pending(vcpu, 1); |
89 | else if (old_pending) | 89 | else if (old_pending) |
90 | vcpu->arch.shared->int_pending = 0; | 90 | kvmppc_set_int_pending(vcpu, 0); |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | 93 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) |
@@ -99,11 +99,11 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | |||
99 | if (is_kvmppc_hv_enabled(vcpu->kvm)) | 99 | if (is_kvmppc_hv_enabled(vcpu->kvm)) |
100 | return false; | 100 | return false; |
101 | 101 | ||
102 | crit_raw = vcpu->arch.shared->critical; | 102 | crit_raw = kvmppc_get_critical(vcpu); |
103 | crit_r1 = kvmppc_get_gpr(vcpu, 1); | 103 | crit_r1 = kvmppc_get_gpr(vcpu, 1); |
104 | 104 | ||
105 | /* Truncate crit indicators in 32 bit mode */ | 105 | /* Truncate crit indicators in 32 bit mode */ |
106 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | 106 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
107 | crit_raw &= 0xffffffff; | 107 | crit_raw &= 0xffffffff; |
108 | crit_r1 &= 0xffffffff; | 108 | crit_r1 &= 0xffffffff; |
109 | } | 109 | } |
@@ -111,15 +111,15 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | |||
111 | /* Critical section when crit == r1 */ | 111 | /* Critical section when crit == r1 */ |
112 | crit = (crit_raw == crit_r1); | 112 | crit = (crit_raw == crit_r1); |
113 | /* ... and we're in supervisor mode */ | 113 | /* ... and we're in supervisor mode */ |
114 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | 114 | crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); |
115 | 115 | ||
116 | return crit; | 116 | return crit; |
117 | } | 117 | } |
118 | 118 | ||
119 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 119 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
120 | { | 120 | { |
121 | vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); | 121 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); |
122 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; | 122 | kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); |
123 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); | 123 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); |
124 | vcpu->arch.mmu.reset_msr(vcpu); | 124 | vcpu->arch.mmu.reset_msr(vcpu); |
125 | } | 125 | } |
@@ -145,6 +145,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec) | |||
145 | case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; | 145 | case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; |
146 | case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; | 146 | case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; |
147 | case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; | 147 | case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; |
148 | case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break; | ||
148 | default: prio = BOOK3S_IRQPRIO_MAX; break; | 149 | default: prio = BOOK3S_IRQPRIO_MAX; break; |
149 | } | 150 | } |
150 | 151 | ||
@@ -225,12 +226,12 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
225 | 226 | ||
226 | switch (priority) { | 227 | switch (priority) { |
227 | case BOOK3S_IRQPRIO_DECREMENTER: | 228 | case BOOK3S_IRQPRIO_DECREMENTER: |
228 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; | 229 | deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; |
229 | vec = BOOK3S_INTERRUPT_DECREMENTER; | 230 | vec = BOOK3S_INTERRUPT_DECREMENTER; |
230 | break; | 231 | break; |
231 | case BOOK3S_IRQPRIO_EXTERNAL: | 232 | case BOOK3S_IRQPRIO_EXTERNAL: |
232 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: | 233 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: |
233 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; | 234 | deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; |
234 | vec = BOOK3S_INTERRUPT_EXTERNAL; | 235 | vec = BOOK3S_INTERRUPT_EXTERNAL; |
235 | break; | 236 | break; |
236 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | 237 | case BOOK3S_IRQPRIO_SYSTEM_RESET: |
@@ -275,6 +276,9 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
275 | case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: | 276 | case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: |
276 | vec = BOOK3S_INTERRUPT_PERFMON; | 277 | vec = BOOK3S_INTERRUPT_PERFMON; |
277 | break; | 278 | break; |
279 | case BOOK3S_IRQPRIO_FAC_UNAVAIL: | ||
280 | vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; | ||
281 | break; | ||
278 | default: | 282 | default: |
279 | deliver = 0; | 283 | deliver = 0; |
280 | printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); | 284 | printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); |
@@ -343,7 +347,7 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, | |||
343 | { | 347 | { |
344 | ulong mp_pa = vcpu->arch.magic_page_pa; | 348 | ulong mp_pa = vcpu->arch.magic_page_pa; |
345 | 349 | ||
346 | if (!(vcpu->arch.shared->msr & MSR_SF)) | 350 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
347 | mp_pa = (uint32_t)mp_pa; | 351 | mp_pa = (uint32_t)mp_pa; |
348 | 352 | ||
349 | /* Magic page override */ | 353 | /* Magic page override */ |
@@ -367,7 +371,7 @@ EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); | |||
367 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 371 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, |
368 | bool iswrite, struct kvmppc_pte *pte) | 372 | bool iswrite, struct kvmppc_pte *pte) |
369 | { | 373 | { |
370 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); | 374 | int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); |
371 | int r; | 375 | int r; |
372 | 376 | ||
373 | if (relocated) { | 377 | if (relocated) { |
@@ -498,18 +502,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
498 | regs->ctr = kvmppc_get_ctr(vcpu); | 502 | regs->ctr = kvmppc_get_ctr(vcpu); |
499 | regs->lr = kvmppc_get_lr(vcpu); | 503 | regs->lr = kvmppc_get_lr(vcpu); |
500 | regs->xer = kvmppc_get_xer(vcpu); | 504 | regs->xer = kvmppc_get_xer(vcpu); |
501 | regs->msr = vcpu->arch.shared->msr; | 505 | regs->msr = kvmppc_get_msr(vcpu); |
502 | regs->srr0 = vcpu->arch.shared->srr0; | 506 | regs->srr0 = kvmppc_get_srr0(vcpu); |
503 | regs->srr1 = vcpu->arch.shared->srr1; | 507 | regs->srr1 = kvmppc_get_srr1(vcpu); |
504 | regs->pid = vcpu->arch.pid; | 508 | regs->pid = vcpu->arch.pid; |
505 | regs->sprg0 = vcpu->arch.shared->sprg0; | 509 | regs->sprg0 = kvmppc_get_sprg0(vcpu); |
506 | regs->sprg1 = vcpu->arch.shared->sprg1; | 510 | regs->sprg1 = kvmppc_get_sprg1(vcpu); |
507 | regs->sprg2 = vcpu->arch.shared->sprg2; | 511 | regs->sprg2 = kvmppc_get_sprg2(vcpu); |
508 | regs->sprg3 = vcpu->arch.shared->sprg3; | 512 | regs->sprg3 = kvmppc_get_sprg3(vcpu); |
509 | regs->sprg4 = vcpu->arch.shared->sprg4; | 513 | regs->sprg4 = kvmppc_get_sprg4(vcpu); |
510 | regs->sprg5 = vcpu->arch.shared->sprg5; | 514 | regs->sprg5 = kvmppc_get_sprg5(vcpu); |
511 | regs->sprg6 = vcpu->arch.shared->sprg6; | 515 | regs->sprg6 = kvmppc_get_sprg6(vcpu); |
512 | regs->sprg7 = vcpu->arch.shared->sprg7; | 516 | regs->sprg7 = kvmppc_get_sprg7(vcpu); |
513 | 517 | ||
514 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 518 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
515 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 519 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
@@ -527,16 +531,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
527 | kvmppc_set_lr(vcpu, regs->lr); | 531 | kvmppc_set_lr(vcpu, regs->lr); |
528 | kvmppc_set_xer(vcpu, regs->xer); | 532 | kvmppc_set_xer(vcpu, regs->xer); |
529 | kvmppc_set_msr(vcpu, regs->msr); | 533 | kvmppc_set_msr(vcpu, regs->msr); |
530 | vcpu->arch.shared->srr0 = regs->srr0; | 534 | kvmppc_set_srr0(vcpu, regs->srr0); |
531 | vcpu->arch.shared->srr1 = regs->srr1; | 535 | kvmppc_set_srr1(vcpu, regs->srr1); |
532 | vcpu->arch.shared->sprg0 = regs->sprg0; | 536 | kvmppc_set_sprg0(vcpu, regs->sprg0); |
533 | vcpu->arch.shared->sprg1 = regs->sprg1; | 537 | kvmppc_set_sprg1(vcpu, regs->sprg1); |
534 | vcpu->arch.shared->sprg2 = regs->sprg2; | 538 | kvmppc_set_sprg2(vcpu, regs->sprg2); |
535 | vcpu->arch.shared->sprg3 = regs->sprg3; | 539 | kvmppc_set_sprg3(vcpu, regs->sprg3); |
536 | vcpu->arch.shared->sprg4 = regs->sprg4; | 540 | kvmppc_set_sprg4(vcpu, regs->sprg4); |
537 | vcpu->arch.shared->sprg5 = regs->sprg5; | 541 | kvmppc_set_sprg5(vcpu, regs->sprg5); |
538 | vcpu->arch.shared->sprg6 = regs->sprg6; | 542 | kvmppc_set_sprg6(vcpu, regs->sprg6); |
539 | vcpu->arch.shared->sprg7 = regs->sprg7; | 543 | kvmppc_set_sprg7(vcpu, regs->sprg7); |
540 | 544 | ||
541 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 545 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
542 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 546 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
@@ -570,10 +574,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
570 | r = 0; | 574 | r = 0; |
571 | switch (reg->id) { | 575 | switch (reg->id) { |
572 | case KVM_REG_PPC_DAR: | 576 | case KVM_REG_PPC_DAR: |
573 | val = get_reg_val(reg->id, vcpu->arch.shared->dar); | 577 | val = get_reg_val(reg->id, kvmppc_get_dar(vcpu)); |
574 | break; | 578 | break; |
575 | case KVM_REG_PPC_DSISR: | 579 | case KVM_REG_PPC_DSISR: |
576 | val = get_reg_val(reg->id, vcpu->arch.shared->dsisr); | 580 | val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu)); |
577 | break; | 581 | break; |
578 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: | 582 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: |
579 | i = reg->id - KVM_REG_PPC_FPR0; | 583 | i = reg->id - KVM_REG_PPC_FPR0; |
@@ -627,6 +631,21 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
627 | val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu)); | 631 | val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu)); |
628 | break; | 632 | break; |
629 | #endif /* CONFIG_KVM_XICS */ | 633 | #endif /* CONFIG_KVM_XICS */ |
634 | case KVM_REG_PPC_FSCR: | ||
635 | val = get_reg_val(reg->id, vcpu->arch.fscr); | ||
636 | break; | ||
637 | case KVM_REG_PPC_TAR: | ||
638 | val = get_reg_val(reg->id, vcpu->arch.tar); | ||
639 | break; | ||
640 | case KVM_REG_PPC_EBBHR: | ||
641 | val = get_reg_val(reg->id, vcpu->arch.ebbhr); | ||
642 | break; | ||
643 | case KVM_REG_PPC_EBBRR: | ||
644 | val = get_reg_val(reg->id, vcpu->arch.ebbrr); | ||
645 | break; | ||
646 | case KVM_REG_PPC_BESCR: | ||
647 | val = get_reg_val(reg->id, vcpu->arch.bescr); | ||
648 | break; | ||
630 | default: | 649 | default: |
631 | r = -EINVAL; | 650 | r = -EINVAL; |
632 | break; | 651 | break; |
@@ -660,10 +679,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
660 | r = 0; | 679 | r = 0; |
661 | switch (reg->id) { | 680 | switch (reg->id) { |
662 | case KVM_REG_PPC_DAR: | 681 | case KVM_REG_PPC_DAR: |
663 | vcpu->arch.shared->dar = set_reg_val(reg->id, val); | 682 | kvmppc_set_dar(vcpu, set_reg_val(reg->id, val)); |
664 | break; | 683 | break; |
665 | case KVM_REG_PPC_DSISR: | 684 | case KVM_REG_PPC_DSISR: |
666 | vcpu->arch.shared->dsisr = set_reg_val(reg->id, val); | 685 | kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val)); |
667 | break; | 686 | break; |
668 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: | 687 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: |
669 | i = reg->id - KVM_REG_PPC_FPR0; | 688 | i = reg->id - KVM_REG_PPC_FPR0; |
@@ -716,6 +735,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
716 | set_reg_val(reg->id, val)); | 735 | set_reg_val(reg->id, val)); |
717 | break; | 736 | break; |
718 | #endif /* CONFIG_KVM_XICS */ | 737 | #endif /* CONFIG_KVM_XICS */ |
738 | case KVM_REG_PPC_FSCR: | ||
739 | vcpu->arch.fscr = set_reg_val(reg->id, val); | ||
740 | break; | ||
741 | case KVM_REG_PPC_TAR: | ||
742 | vcpu->arch.tar = set_reg_val(reg->id, val); | ||
743 | break; | ||
744 | case KVM_REG_PPC_EBBHR: | ||
745 | vcpu->arch.ebbhr = set_reg_val(reg->id, val); | ||
746 | break; | ||
747 | case KVM_REG_PPC_EBBRR: | ||
748 | vcpu->arch.ebbrr = set_reg_val(reg->id, val); | ||
749 | break; | ||
750 | case KVM_REG_PPC_BESCR: | ||
751 | vcpu->arch.bescr = set_reg_val(reg->id, val); | ||
752 | break; | ||
719 | default: | 753 | default: |
720 | r = -EINVAL; | 754 | r = -EINVAL; |
721 | break; | 755 | break; |
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 76a64ce6a5b6..93503bbdae43 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
@@ -91,7 +91,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
91 | 91 | ||
92 | static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) | 92 | static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) |
93 | { | 93 | { |
94 | return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf]; | 94 | return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); |
95 | } | 95 | } |
96 | 96 | ||
97 | static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | 97 | static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, |
@@ -131,7 +131,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu, | |||
131 | pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; | 131 | pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; |
132 | 132 | ||
133 | dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", | 133 | dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", |
134 | kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, | 134 | kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg, |
135 | sr_vsid(sre)); | 135 | sr_vsid(sre)); |
136 | 136 | ||
137 | r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); | 137 | r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); |
@@ -160,7 +160,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
160 | else | 160 | else |
161 | bat = &vcpu_book3s->ibat[i]; | 161 | bat = &vcpu_book3s->ibat[i]; |
162 | 162 | ||
163 | if (vcpu->arch.shared->msr & MSR_PR) { | 163 | if (kvmppc_get_msr(vcpu) & MSR_PR) { |
164 | if (!bat->vp) | 164 | if (!bat->vp) |
165 | continue; | 165 | continue; |
166 | } else { | 166 | } else { |
@@ -208,6 +208,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
208 | u32 sre; | 208 | u32 sre; |
209 | hva_t ptegp; | 209 | hva_t ptegp; |
210 | u32 pteg[16]; | 210 | u32 pteg[16]; |
211 | u32 pte0, pte1; | ||
211 | u32 ptem = 0; | 212 | u32 ptem = 0; |
212 | int i; | 213 | int i; |
213 | int found = 0; | 214 | int found = 0; |
@@ -233,14 +234,16 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
233 | } | 234 | } |
234 | 235 | ||
235 | for (i=0; i<16; i+=2) { | 236 | for (i=0; i<16; i+=2) { |
236 | if (ptem == pteg[i]) { | 237 | pte0 = be32_to_cpu(pteg[i]); |
238 | pte1 = be32_to_cpu(pteg[i + 1]); | ||
239 | if (ptem == pte0) { | ||
237 | u8 pp; | 240 | u8 pp; |
238 | 241 | ||
239 | pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); | 242 | pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF); |
240 | pp = pteg[i+1] & 3; | 243 | pp = pte1 & 3; |
241 | 244 | ||
242 | if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) || | 245 | if ((sr_kp(sre) && (kvmppc_get_msr(vcpu) & MSR_PR)) || |
243 | (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR))) | 246 | (sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR))) |
244 | pp |= 4; | 247 | pp |= 4; |
245 | 248 | ||
246 | pte->may_write = false; | 249 | pte->may_write = false; |
@@ -260,7 +263,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
260 | } | 263 | } |
261 | 264 | ||
262 | dprintk_pte("MMU: Found PTE -> %x %x - %x\n", | 265 | dprintk_pte("MMU: Found PTE -> %x %x - %x\n", |
263 | pteg[i], pteg[i+1], pp); | 266 | pte0, pte1, pp); |
264 | found = 1; | 267 | found = 1; |
265 | break; | 268 | break; |
266 | } | 269 | } |
@@ -269,8 +272,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
269 | /* Update PTE C and A bits, so the guest's swapper knows we used the | 272 | /* Update PTE C and A bits, so the guest's swapper knows we used the |
270 | page */ | 273 | page */ |
271 | if (found) { | 274 | if (found) { |
272 | u32 pte_r = pteg[i+1]; | 275 | u32 pte_r = pte1; |
273 | char __user *addr = (char __user *) &pteg[i+1]; | 276 | char __user *addr = (char __user *) (ptegp + (i+1) * sizeof(u32)); |
274 | 277 | ||
275 | /* | 278 | /* |
276 | * Use single-byte writes to update the HPTE, to | 279 | * Use single-byte writes to update the HPTE, to |
@@ -296,7 +299,8 @@ no_page_found: | |||
296 | to_book3s(vcpu)->sdr1, ptegp); | 299 | to_book3s(vcpu)->sdr1, ptegp); |
297 | for (i=0; i<16; i+=2) { | 300 | for (i=0; i<16; i+=2) { |
298 | dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n", | 301 | dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n", |
299 | i, pteg[i], pteg[i+1], ptem); | 302 | i, be32_to_cpu(pteg[i]), |
303 | be32_to_cpu(pteg[i+1]), ptem); | ||
300 | } | 304 | } |
301 | } | 305 | } |
302 | 306 | ||
@@ -316,7 +320,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
316 | /* Magic page override */ | 320 | /* Magic page override */ |
317 | if (unlikely(mp_ea) && | 321 | if (unlikely(mp_ea) && |
318 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && | 322 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && |
319 | !(vcpu->arch.shared->msr & MSR_PR)) { | 323 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
320 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); | 324 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); |
321 | pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); | 325 | pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); |
322 | pte->raddr &= KVM_PAM; | 326 | pte->raddr &= KVM_PAM; |
@@ -341,13 +345,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
341 | 345 | ||
342 | static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) | 346 | static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) |
343 | { | 347 | { |
344 | return vcpu->arch.shared->sr[srnum]; | 348 | return kvmppc_get_sr(vcpu, srnum); |
345 | } | 349 | } |
346 | 350 | ||
347 | static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, | 351 | static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, |
348 | ulong value) | 352 | ulong value) |
349 | { | 353 | { |
350 | vcpu->arch.shared->sr[srnum] = value; | 354 | kvmppc_set_sr(vcpu, srnum, value); |
351 | kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); | 355 | kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); |
352 | } | 356 | } |
353 | 357 | ||
@@ -367,8 +371,9 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
367 | ulong ea = esid << SID_SHIFT; | 371 | ulong ea = esid << SID_SHIFT; |
368 | u32 sr; | 372 | u32 sr; |
369 | u64 gvsid = esid; | 373 | u64 gvsid = esid; |
374 | u64 msr = kvmppc_get_msr(vcpu); | ||
370 | 375 | ||
371 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 376 | if (msr & (MSR_DR|MSR_IR)) { |
372 | sr = find_sr(vcpu, ea); | 377 | sr = find_sr(vcpu, ea); |
373 | if (sr_valid(sr)) | 378 | if (sr_valid(sr)) |
374 | gvsid = sr_vsid(sr); | 379 | gvsid = sr_vsid(sr); |
@@ -377,7 +382,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
377 | /* In case we only have one of MSR_IR or MSR_DR set, let's put | 382 | /* In case we only have one of MSR_IR or MSR_DR set, let's put |
378 | that in the real-mode context (and hope RM doesn't access | 383 | that in the real-mode context (and hope RM doesn't access |
379 | high memory) */ | 384 | high memory) */ |
380 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 385 | switch (msr & (MSR_DR|MSR_IR)) { |
381 | case 0: | 386 | case 0: |
382 | *vsid = VSID_REAL | esid; | 387 | *vsid = VSID_REAL | esid; |
383 | break; | 388 | break; |
@@ -397,7 +402,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
397 | BUG(); | 402 | BUG(); |
398 | } | 403 | } |
399 | 404 | ||
400 | if (vcpu->arch.shared->msr & MSR_PR) | 405 | if (msr & MSR_PR) |
401 | *vsid |= VSID_PR; | 406 | *vsid |= VSID_PR; |
402 | 407 | ||
403 | return 0; | 408 | return 0; |
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 5fac89dfe4cd..678e75370495 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c | |||
@@ -92,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | |||
92 | struct kvmppc_sid_map *map; | 92 | struct kvmppc_sid_map *map; |
93 | u16 sid_map_mask; | 93 | u16 sid_map_mask; |
94 | 94 | ||
95 | if (vcpu->arch.shared->msr & MSR_PR) | 95 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
96 | gvsid |= VSID_PR; | 96 | gvsid |= VSID_PR; |
97 | 97 | ||
98 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | 98 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); |
@@ -279,7 +279,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
279 | u16 sid_map_mask; | 279 | u16 sid_map_mask; |
280 | static int backwards_map = 0; | 280 | static int backwards_map = 0; |
281 | 281 | ||
282 | if (vcpu->arch.shared->msr & MSR_PR) | 282 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
283 | gvsid |= VSID_PR; | 283 | gvsid |= VSID_PR; |
284 | 284 | ||
285 | /* We might get collisions that trap in preceding order, so let's | 285 | /* We might get collisions that trap in preceding order, so let's |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 83da1f868fd5..774a253ca4e1 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) | 39 | static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) |
40 | { | 40 | { |
41 | kvmppc_set_msr(vcpu, MSR_SF); | 41 | kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); |
42 | } | 42 | } |
43 | 43 | ||
44 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( | 44 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( |
@@ -226,7 +226,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
226 | /* Magic page override */ | 226 | /* Magic page override */ |
227 | if (unlikely(mp_ea) && | 227 | if (unlikely(mp_ea) && |
228 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && | 228 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && |
229 | !(vcpu->arch.shared->msr & MSR_PR)) { | 229 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
230 | gpte->eaddr = eaddr; | 230 | gpte->eaddr = eaddr; |
231 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); | 231 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); |
232 | gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); | 232 | gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); |
@@ -269,18 +269,21 @@ do_second: | |||
269 | goto no_page_found; | 269 | goto no_page_found; |
270 | } | 270 | } |
271 | 271 | ||
272 | if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) | 272 | if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) |
273 | key = 4; | 273 | key = 4; |
274 | else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) | 274 | else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) |
275 | key = 4; | 275 | key = 4; |
276 | 276 | ||
277 | for (i=0; i<16; i+=2) { | 277 | for (i=0; i<16; i+=2) { |
278 | u64 pte0 = be64_to_cpu(pteg[i]); | ||
279 | u64 pte1 = be64_to_cpu(pteg[i + 1]); | ||
280 | |||
278 | /* Check all relevant fields of 1st dword */ | 281 | /* Check all relevant fields of 1st dword */ |
279 | if ((pteg[i] & v_mask) == v_val) { | 282 | if ((pte0 & v_mask) == v_val) { |
280 | /* If large page bit is set, check pgsize encoding */ | 283 | /* If large page bit is set, check pgsize encoding */ |
281 | if (slbe->large && | 284 | if (slbe->large && |
282 | (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | 285 | (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { |
283 | pgsize = decode_pagesize(slbe, pteg[i+1]); | 286 | pgsize = decode_pagesize(slbe, pte1); |
284 | if (pgsize < 0) | 287 | if (pgsize < 0) |
285 | continue; | 288 | continue; |
286 | } | 289 | } |
@@ -297,8 +300,8 @@ do_second: | |||
297 | goto do_second; | 300 | goto do_second; |
298 | } | 301 | } |
299 | 302 | ||
300 | v = pteg[i]; | 303 | v = be64_to_cpu(pteg[i]); |
301 | r = pteg[i+1]; | 304 | r = be64_to_cpu(pteg[i+1]); |
302 | pp = (r & HPTE_R_PP) | key; | 305 | pp = (r & HPTE_R_PP) | key; |
303 | if (r & HPTE_R_PP0) | 306 | if (r & HPTE_R_PP0) |
304 | pp |= 8; | 307 | pp |= 8; |
@@ -310,6 +313,9 @@ do_second: | |||
310 | gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); | 313 | gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); |
311 | gpte->page_size = pgsize; | 314 | gpte->page_size = pgsize; |
312 | gpte->may_execute = ((r & HPTE_R_N) ? false : true); | 315 | gpte->may_execute = ((r & HPTE_R_N) ? false : true); |
316 | if (unlikely(vcpu->arch.disable_kernel_nx) && | ||
317 | !(kvmppc_get_msr(vcpu) & MSR_PR)) | ||
318 | gpte->may_execute = true; | ||
313 | gpte->may_read = false; | 319 | gpte->may_read = false; |
314 | gpte->may_write = false; | 320 | gpte->may_write = false; |
315 | 321 | ||
@@ -342,14 +348,14 @@ do_second: | |||
342 | * non-PAPR platforms such as mac99, and this is | 348 | * non-PAPR platforms such as mac99, and this is |
343 | * what real hardware does. | 349 | * what real hardware does. |
344 | */ | 350 | */ |
345 | char __user *addr = (char __user *) &pteg[i+1]; | 351 | char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); |
346 | r |= HPTE_R_R; | 352 | r |= HPTE_R_R; |
347 | put_user(r >> 8, addr + 6); | 353 | put_user(r >> 8, addr + 6); |
348 | } | 354 | } |
349 | if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { | 355 | if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { |
350 | /* Set the dirty flag */ | 356 | /* Set the dirty flag */ |
351 | /* Use a single byte write */ | 357 | /* Use a single byte write */ |
352 | char __user *addr = (char __user *) &pteg[i+1]; | 358 | char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); |
353 | r |= HPTE_R_C; | 359 | r |= HPTE_R_C; |
354 | put_user(r, addr + 7); | 360 | put_user(r, addr + 7); |
355 | } | 361 | } |
@@ -479,7 +485,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) | |||
479 | vcpu->arch.slb[i].origv = 0; | 485 | vcpu->arch.slb[i].origv = 0; |
480 | } | 486 | } |
481 | 487 | ||
482 | if (vcpu->arch.shared->msr & MSR_IR) { | 488 | if (kvmppc_get_msr(vcpu) & MSR_IR) { |
483 | kvmppc_mmu_flush_segments(vcpu); | 489 | kvmppc_mmu_flush_segments(vcpu); |
484 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 490 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
485 | } | 491 | } |
@@ -563,7 +569,7 @@ static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) | |||
563 | { | 569 | { |
564 | ulong mp_ea = vcpu->arch.magic_page_ea; | 570 | ulong mp_ea = vcpu->arch.magic_page_ea; |
565 | 571 | ||
566 | return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) && | 572 | return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) && |
567 | (mp_ea >> SID_SHIFT) == esid; | 573 | (mp_ea >> SID_SHIFT) == esid; |
568 | } | 574 | } |
569 | #endif | 575 | #endif |
@@ -576,8 +582,9 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
576 | u64 gvsid = esid; | 582 | u64 gvsid = esid; |
577 | ulong mp_ea = vcpu->arch.magic_page_ea; | 583 | ulong mp_ea = vcpu->arch.magic_page_ea; |
578 | int pagesize = MMU_PAGE_64K; | 584 | int pagesize = MMU_PAGE_64K; |
585 | u64 msr = kvmppc_get_msr(vcpu); | ||
579 | 586 | ||
580 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 587 | if (msr & (MSR_DR|MSR_IR)) { |
581 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); | 588 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); |
582 | if (slb) { | 589 | if (slb) { |
583 | gvsid = slb->vsid; | 590 | gvsid = slb->vsid; |
@@ -590,7 +597,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
590 | } | 597 | } |
591 | } | 598 | } |
592 | 599 | ||
593 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 600 | switch (msr & (MSR_DR|MSR_IR)) { |
594 | case 0: | 601 | case 0: |
595 | gvsid = VSID_REAL | esid; | 602 | gvsid = VSID_REAL | esid; |
596 | break; | 603 | break; |
@@ -623,7 +630,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
623 | gvsid |= VSID_64K; | 630 | gvsid |= VSID_64K; |
624 | #endif | 631 | #endif |
625 | 632 | ||
626 | if (vcpu->arch.shared->msr & MSR_PR) | 633 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
627 | gvsid |= VSID_PR; | 634 | gvsid |= VSID_PR; |
628 | 635 | ||
629 | *vsid = gvsid; | 636 | *vsid = gvsid; |
@@ -633,7 +640,7 @@ no_slb: | |||
633 | /* Catch magic page case */ | 640 | /* Catch magic page case */ |
634 | if (unlikely(mp_ea) && | 641 | if (unlikely(mp_ea) && |
635 | unlikely(esid == (mp_ea >> SID_SHIFT)) && | 642 | unlikely(esid == (mp_ea >> SID_SHIFT)) && |
636 | !(vcpu->arch.shared->msr & MSR_PR)) { | 643 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
637 | *vsid = VSID_REAL | esid; | 644 | *vsid = VSID_REAL | esid; |
638 | return 0; | 645 | return 0; |
639 | } | 646 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 0d513af62bba..0ac98392f363 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -58,7 +58,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | |||
58 | struct kvmppc_sid_map *map; | 58 | struct kvmppc_sid_map *map; |
59 | u16 sid_map_mask; | 59 | u16 sid_map_mask; |
60 | 60 | ||
61 | if (vcpu->arch.shared->msr & MSR_PR) | 61 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
62 | gvsid |= VSID_PR; | 62 | gvsid |= VSID_PR; |
63 | 63 | ||
64 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | 64 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); |
@@ -230,7 +230,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
230 | u16 sid_map_mask; | 230 | u16 sid_map_mask; |
231 | static int backwards_map = 0; | 231 | static int backwards_map = 0; |
232 | 232 | ||
233 | if (vcpu->arch.shared->msr & MSR_PR) | 233 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
234 | gvsid |= VSID_PR; | 234 | gvsid |= VSID_PR; |
235 | 235 | ||
236 | /* We might get collisions that trap in preceding order, so let's | 236 | /* We might get collisions that trap in preceding order, so let's |
@@ -271,11 +271,8 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | |||
271 | int found_inval = -1; | 271 | int found_inval = -1; |
272 | int r; | 272 | int r; |
273 | 273 | ||
274 | if (!svcpu->slb_max) | ||
275 | svcpu->slb_max = 1; | ||
276 | |||
277 | /* Are we overwriting? */ | 274 | /* Are we overwriting? */ |
278 | for (i = 1; i < svcpu->slb_max; i++) { | 275 | for (i = 0; i < svcpu->slb_max; i++) { |
279 | if (!(svcpu->slb[i].esid & SLB_ESID_V)) | 276 | if (!(svcpu->slb[i].esid & SLB_ESID_V)) |
280 | found_inval = i; | 277 | found_inval = i; |
281 | else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { | 278 | else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { |
@@ -285,7 +282,7 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | |||
285 | } | 282 | } |
286 | 283 | ||
287 | /* Found a spare entry that was invalidated before */ | 284 | /* Found a spare entry that was invalidated before */ |
288 | if (found_inval > 0) { | 285 | if (found_inval >= 0) { |
289 | r = found_inval; | 286 | r = found_inval; |
290 | goto out; | 287 | goto out; |
291 | } | 288 | } |
@@ -359,7 +356,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) | |||
359 | ulong seg_mask = -seg_size; | 356 | ulong seg_mask = -seg_size; |
360 | int i; | 357 | int i; |
361 | 358 | ||
362 | for (i = 1; i < svcpu->slb_max; i++) { | 359 | for (i = 0; i < svcpu->slb_max; i++) { |
363 | if ((svcpu->slb[i].esid & SLB_ESID_V) && | 360 | if ((svcpu->slb[i].esid & SLB_ESID_V) && |
364 | (svcpu->slb[i].esid & seg_mask) == ea) { | 361 | (svcpu->slb[i].esid & seg_mask) == ea) { |
365 | /* Invalidate this entry */ | 362 | /* Invalidate this entry */ |
@@ -373,7 +370,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) | |||
373 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | 370 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) |
374 | { | 371 | { |
375 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 372 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
376 | svcpu->slb_max = 1; | 373 | svcpu->slb_max = 0; |
377 | svcpu->slb[0].esid = 0; | 374 | svcpu->slb[0].esid = 0; |
378 | svcpu_put(svcpu); | 375 | svcpu_put(svcpu); |
379 | } | 376 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index fb25ebc0af0c..80561074078d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -52,7 +52,7 @@ static void kvmppc_rmap_reset(struct kvm *kvm); | |||
52 | 52 | ||
53 | long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) | 53 | long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) |
54 | { | 54 | { |
55 | unsigned long hpt; | 55 | unsigned long hpt = 0; |
56 | struct revmap_entry *rev; | 56 | struct revmap_entry *rev; |
57 | struct page *page = NULL; | 57 | struct page *page = NULL; |
58 | long order = KVM_DEFAULT_HPT_ORDER; | 58 | long order = KVM_DEFAULT_HPT_ORDER; |
@@ -64,22 +64,11 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) | |||
64 | } | 64 | } |
65 | 65 | ||
66 | kvm->arch.hpt_cma_alloc = 0; | 66 | kvm->arch.hpt_cma_alloc = 0; |
67 | /* | 67 | VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER); |
68 | * try first to allocate it from the kernel page allocator. | 68 | page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); |
69 | * We keep the CMA reserved for failed allocation. | 69 | if (page) { |
70 | */ | 70 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); |
71 | hpt = __get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT | | 71 | kvm->arch.hpt_cma_alloc = 1; |
72 | __GFP_NOWARN, order - PAGE_SHIFT); | ||
73 | |||
74 | /* Next try to allocate from the preallocated pool */ | ||
75 | if (!hpt) { | ||
76 | VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER); | ||
77 | page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); | ||
78 | if (page) { | ||
79 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); | ||
80 | kvm->arch.hpt_cma_alloc = 1; | ||
81 | } else | ||
82 | --order; | ||
83 | } | 72 | } |
84 | 73 | ||
85 | /* Lastly try successively smaller sizes from the page allocator */ | 74 | /* Lastly try successively smaller sizes from the page allocator */ |
@@ -596,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
596 | struct kvm *kvm = vcpu->kvm; | 585 | struct kvm *kvm = vcpu->kvm; |
597 | unsigned long *hptep, hpte[3], r; | 586 | unsigned long *hptep, hpte[3], r; |
598 | unsigned long mmu_seq, psize, pte_size; | 587 | unsigned long mmu_seq, psize, pte_size; |
588 | unsigned long gpa_base, gfn_base; | ||
599 | unsigned long gpa, gfn, hva, pfn; | 589 | unsigned long gpa, gfn, hva, pfn; |
600 | struct kvm_memory_slot *memslot; | 590 | struct kvm_memory_slot *memslot; |
601 | unsigned long *rmap; | 591 | unsigned long *rmap; |
@@ -634,7 +624,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
634 | 624 | ||
635 | /* Translate the logical address and get the page */ | 625 | /* Translate the logical address and get the page */ |
636 | psize = hpte_page_size(hpte[0], r); | 626 | psize = hpte_page_size(hpte[0], r); |
637 | gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1)); | 627 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
628 | gfn_base = gpa_base >> PAGE_SHIFT; | ||
629 | gpa = gpa_base | (ea & (psize - 1)); | ||
638 | gfn = gpa >> PAGE_SHIFT; | 630 | gfn = gpa >> PAGE_SHIFT; |
639 | memslot = gfn_to_memslot(kvm, gfn); | 631 | memslot = gfn_to_memslot(kvm, gfn); |
640 | 632 | ||
@@ -646,6 +638,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
646 | if (!kvm->arch.using_mmu_notifiers) | 638 | if (!kvm->arch.using_mmu_notifiers) |
647 | return -EFAULT; /* should never get here */ | 639 | return -EFAULT; /* should never get here */ |
648 | 640 | ||
641 | /* | ||
642 | * This should never happen, because of the slot_is_aligned() | ||
643 | * check in kvmppc_do_h_enter(). | ||
644 | */ | ||
645 | if (gfn_base < memslot->base_gfn) | ||
646 | return -EFAULT; | ||
647 | |||
649 | /* used to check for invalidations in progress */ | 648 | /* used to check for invalidations in progress */ |
650 | mmu_seq = kvm->mmu_notifier_seq; | 649 | mmu_seq = kvm->mmu_notifier_seq; |
651 | smp_rmb(); | 650 | smp_rmb(); |
@@ -738,7 +737,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
738 | goto out_unlock; | 737 | goto out_unlock; |
739 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | 738 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; |
740 | 739 | ||
741 | rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; | 740 | /* Always put the HPTE in the rmap chain for the page base address */ |
741 | rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; | ||
742 | lock_rmap(rmap); | 742 | lock_rmap(rmap); |
743 | 743 | ||
744 | /* Check if we might have been invalidated; let the guest retry if so */ | 744 | /* Check if we might have been invalidated; let the guest retry if so */ |
@@ -1060,22 +1060,33 @@ void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) | |||
1060 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | 1060 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) | 1063 | static int vcpus_running(struct kvm *kvm) |
1064 | { | ||
1065 | return atomic_read(&kvm->arch.vcpus_running) != 0; | ||
1066 | } | ||
1067 | |||
1068 | /* | ||
1069 | * Returns the number of system pages that are dirty. | ||
1070 | * This can be more than 1 if we find a huge-page HPTE. | ||
1071 | */ | ||
1072 | static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | ||
1064 | { | 1073 | { |
1065 | struct revmap_entry *rev = kvm->arch.revmap; | 1074 | struct revmap_entry *rev = kvm->arch.revmap; |
1066 | unsigned long head, i, j; | 1075 | unsigned long head, i, j; |
1076 | unsigned long n; | ||
1077 | unsigned long v, r; | ||
1067 | unsigned long *hptep; | 1078 | unsigned long *hptep; |
1068 | int ret = 0; | 1079 | int npages_dirty = 0; |
1069 | 1080 | ||
1070 | retry: | 1081 | retry: |
1071 | lock_rmap(rmapp); | 1082 | lock_rmap(rmapp); |
1072 | if (*rmapp & KVMPPC_RMAP_CHANGED) { | 1083 | if (*rmapp & KVMPPC_RMAP_CHANGED) { |
1073 | *rmapp &= ~KVMPPC_RMAP_CHANGED; | 1084 | *rmapp &= ~KVMPPC_RMAP_CHANGED; |
1074 | ret = 1; | 1085 | npages_dirty = 1; |
1075 | } | 1086 | } |
1076 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | 1087 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
1077 | unlock_rmap(rmapp); | 1088 | unlock_rmap(rmapp); |
1078 | return ret; | 1089 | return npages_dirty; |
1079 | } | 1090 | } |
1080 | 1091 | ||
1081 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | 1092 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
@@ -1083,7 +1094,22 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) | |||
1083 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | 1094 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); |
1084 | j = rev[i].forw; | 1095 | j = rev[i].forw; |
1085 | 1096 | ||
1086 | if (!(hptep[1] & HPTE_R_C)) | 1097 | /* |
1098 | * Checking the C (changed) bit here is racy since there | ||
1099 | * is no guarantee about when the hardware writes it back. | ||
1100 | * If the HPTE is not writable then it is stable since the | ||
1101 | * page can't be written to, and we would have done a tlbie | ||
1102 | * (which forces the hardware to complete any writeback) | ||
1103 | * when making the HPTE read-only. | ||
1104 | * If vcpus are running then this call is racy anyway | ||
1105 | * since the page could get dirtied subsequently, so we | ||
1106 | * expect there to be a further call which would pick up | ||
1107 | * any delayed C bit writeback. | ||
1108 | * Otherwise we need to do the tlbie even if C==0 in | ||
1109 | * order to pick up any delayed writeback of C. | ||
1110 | */ | ||
1111 | if (!(hptep[1] & HPTE_R_C) && | ||
1112 | (!hpte_is_writable(hptep[1]) || vcpus_running(kvm))) | ||
1087 | continue; | 1113 | continue; |
1088 | 1114 | ||
1089 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | 1115 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
@@ -1095,24 +1121,33 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) | |||
1095 | } | 1121 | } |
1096 | 1122 | ||
1097 | /* Now check and modify the HPTE */ | 1123 | /* Now check and modify the HPTE */ |
1098 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) { | 1124 | if (!(hptep[0] & HPTE_V_VALID)) |
1099 | /* need to make it temporarily absent to clear C */ | 1125 | continue; |
1100 | hptep[0] |= HPTE_V_ABSENT; | 1126 | |
1101 | kvmppc_invalidate_hpte(kvm, hptep, i); | 1127 | /* need to make it temporarily absent so C is stable */ |
1102 | hptep[1] &= ~HPTE_R_C; | 1128 | hptep[0] |= HPTE_V_ABSENT; |
1103 | eieio(); | 1129 | kvmppc_invalidate_hpte(kvm, hptep, i); |
1104 | hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | 1130 | v = hptep[0]; |
1131 | r = hptep[1]; | ||
1132 | if (r & HPTE_R_C) { | ||
1133 | hptep[1] = r & ~HPTE_R_C; | ||
1105 | if (!(rev[i].guest_rpte & HPTE_R_C)) { | 1134 | if (!(rev[i].guest_rpte & HPTE_R_C)) { |
1106 | rev[i].guest_rpte |= HPTE_R_C; | 1135 | rev[i].guest_rpte |= HPTE_R_C; |
1107 | note_hpte_modification(kvm, &rev[i]); | 1136 | note_hpte_modification(kvm, &rev[i]); |
1108 | } | 1137 | } |
1109 | ret = 1; | 1138 | n = hpte_page_size(v, r); |
1139 | n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
1140 | if (n > npages_dirty) | ||
1141 | npages_dirty = n; | ||
1142 | eieio(); | ||
1110 | } | 1143 | } |
1111 | hptep[0] &= ~HPTE_V_HVLOCK; | 1144 | v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); |
1145 | v |= HPTE_V_VALID; | ||
1146 | hptep[0] = v; | ||
1112 | } while ((i = j) != head); | 1147 | } while ((i = j) != head); |
1113 | 1148 | ||
1114 | unlock_rmap(rmapp); | 1149 | unlock_rmap(rmapp); |
1115 | return ret; | 1150 | return npages_dirty; |
1116 | } | 1151 | } |
1117 | 1152 | ||
1118 | static void harvest_vpa_dirty(struct kvmppc_vpa *vpa, | 1153 | static void harvest_vpa_dirty(struct kvmppc_vpa *vpa, |
@@ -1136,15 +1171,22 @@ static void harvest_vpa_dirty(struct kvmppc_vpa *vpa, | |||
1136 | long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, | 1171 | long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, |
1137 | unsigned long *map) | 1172 | unsigned long *map) |
1138 | { | 1173 | { |
1139 | unsigned long i; | 1174 | unsigned long i, j; |
1140 | unsigned long *rmapp; | 1175 | unsigned long *rmapp; |
1141 | struct kvm_vcpu *vcpu; | 1176 | struct kvm_vcpu *vcpu; |
1142 | 1177 | ||
1143 | preempt_disable(); | 1178 | preempt_disable(); |
1144 | rmapp = memslot->arch.rmap; | 1179 | rmapp = memslot->arch.rmap; |
1145 | for (i = 0; i < memslot->npages; ++i) { | 1180 | for (i = 0; i < memslot->npages; ++i) { |
1146 | if (kvm_test_clear_dirty(kvm, rmapp) && map) | 1181 | int npages = kvm_test_clear_dirty_npages(kvm, rmapp); |
1147 | __set_bit_le(i, map); | 1182 | /* |
1183 | * Note that if npages > 0 then i must be a multiple of npages, | ||
1184 | * since we always put huge-page HPTEs in the rmap chain | ||
1185 | * corresponding to their page base address. | ||
1186 | */ | ||
1187 | if (npages && map) | ||
1188 | for (j = i; npages; ++j, --npages) | ||
1189 | __set_bit_le(j, map); | ||
1148 | ++rmapp; | 1190 | ++rmapp; |
1149 | } | 1191 | } |
1150 | 1192 | ||
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 4f12e8f0c718..3589c4e3d49b 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S | |||
@@ -17,30 +17,9 @@ | |||
17 | * Authors: Alexander Graf <agraf@suse.de> | 17 | * Authors: Alexander Graf <agraf@suse.de> |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifdef __LITTLE_ENDIAN__ | 20 | #define SHADOW_SLB_ENTRY_LEN 0x10 |
21 | #error Need to fix SLB shadow accesses in little endian mode | 21 | #define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x) |
22 | #endif | 22 | #define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8) |
23 | |||
24 | #define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10)) | ||
25 | #define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8) | ||
26 | #define UNBOLT_SLB_ENTRY(num) \ | ||
27 | ld r9, SHADOW_SLB_ESID(num)(r12); \ | ||
28 | /* Invalid? Skip. */; \ | ||
29 | rldicl. r0, r9, 37, 63; \ | ||
30 | beq slb_entry_skip_ ## num; \ | ||
31 | xoris r9, r9, SLB_ESID_V@h; \ | ||
32 | std r9, SHADOW_SLB_ESID(num)(r12); \ | ||
33 | slb_entry_skip_ ## num: | ||
34 | |||
35 | #define REBOLT_SLB_ENTRY(num) \ | ||
36 | ld r10, SHADOW_SLB_ESID(num)(r11); \ | ||
37 | cmpdi r10, 0; \ | ||
38 | beq slb_exit_skip_ ## num; \ | ||
39 | oris r10, r10, SLB_ESID_V@h; \ | ||
40 | ld r9, SHADOW_SLB_VSID(num)(r11); \ | ||
41 | slbmte r9, r10; \ | ||
42 | std r10, SHADOW_SLB_ESID(num)(r11); \ | ||
43 | slb_exit_skip_ ## num: | ||
44 | 23 | ||
45 | /****************************************************************************** | 24 | /****************************************************************************** |
46 | * * | 25 | * * |
@@ -64,20 +43,15 @@ slb_exit_skip_ ## num: | |||
64 | * SVCPU[LR] = guest LR | 43 | * SVCPU[LR] = guest LR |
65 | */ | 44 | */ |
66 | 45 | ||
67 | /* Remove LPAR shadow entries */ | 46 | BEGIN_FW_FTR_SECTION |
68 | 47 | ||
69 | #if SLB_NUM_BOLTED == 3 | 48 | /* Declare SLB shadow as 0 entries big */ |
70 | 49 | ||
71 | ld r12, PACA_SLBSHADOWPTR(r13) | 50 | ld r11, PACA_SLBSHADOWPTR(r13) |
51 | li r8, 0 | ||
52 | stb r8, 3(r11) | ||
72 | 53 | ||
73 | /* Remove bolted entries */ | 54 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR) |
74 | UNBOLT_SLB_ENTRY(0) | ||
75 | UNBOLT_SLB_ENTRY(1) | ||
76 | UNBOLT_SLB_ENTRY(2) | ||
77 | |||
78 | #else | ||
79 | #error unknown number of bolted entries | ||
80 | #endif | ||
81 | 55 | ||
82 | /* Flush SLB */ | 56 | /* Flush SLB */ |
83 | 57 | ||
@@ -100,7 +74,7 @@ slb_loop_enter: | |||
100 | 74 | ||
101 | ld r10, 0(r11) | 75 | ld r10, 0(r11) |
102 | 76 | ||
103 | rldicl. r0, r10, 37, 63 | 77 | andis. r9, r10, SLB_ESID_V@h |
104 | beq slb_loop_enter_skip | 78 | beq slb_loop_enter_skip |
105 | 79 | ||
106 | ld r9, 8(r11) | 80 | ld r9, 8(r11) |
@@ -137,23 +111,42 @@ slb_do_enter: | |||
137 | * | 111 | * |
138 | */ | 112 | */ |
139 | 113 | ||
140 | /* Restore bolted entries from the shadow and fix it along the way */ | 114 | /* Remove all SLB entries that are in use. */ |
141 | 115 | ||
142 | /* We don't store anything in entry 0, so we don't need to take care of it */ | 116 | li r0, r0 |
117 | slbmte r0, r0 | ||
143 | slbia | 118 | slbia |
144 | isync | ||
145 | 119 | ||
146 | #if SLB_NUM_BOLTED == 3 | 120 | /* Restore bolted entries from the shadow */ |
147 | 121 | ||
148 | ld r11, PACA_SLBSHADOWPTR(r13) | 122 | ld r11, PACA_SLBSHADOWPTR(r13) |
149 | 123 | ||
150 | REBOLT_SLB_ENTRY(0) | 124 | BEGIN_FW_FTR_SECTION |
151 | REBOLT_SLB_ENTRY(1) | 125 | |
152 | REBOLT_SLB_ENTRY(2) | 126 | /* Declare SLB shadow as SLB_NUM_BOLTED entries big */ |
153 | 127 | ||
154 | #else | 128 | li r8, SLB_NUM_BOLTED |
155 | #error unknown number of bolted entries | 129 | stb r8, 3(r11) |
156 | #endif | 130 | |
131 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR) | ||
132 | |||
133 | /* Manually load all entries from shadow SLB */ | ||
134 | |||
135 | li r8, SLBSHADOW_SAVEAREA | ||
136 | li r7, SLBSHADOW_SAVEAREA + 8 | ||
137 | |||
138 | .rept SLB_NUM_BOLTED | ||
139 | LDX_BE r10, r11, r8 | ||
140 | cmpdi r10, 0 | ||
141 | beq 1f | ||
142 | LDX_BE r9, r11, r7 | ||
143 | slbmte r9, r10 | ||
144 | 1: addi r7, r7, SHADOW_SLB_ENTRY_LEN | ||
145 | addi r8, r8, SHADOW_SLB_ENTRY_LEN | ||
146 | .endr | ||
147 | |||
148 | isync | ||
149 | sync | ||
157 | 150 | ||
158 | slb_do_exit: | 151 | slb_do_exit: |
159 | 152 | ||
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 99d40f8977e8..3f295269af37 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -80,7 +80,7 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) | |||
80 | return false; | 80 | return false; |
81 | 81 | ||
82 | /* Limit user space to its own small SPR set */ | 82 | /* Limit user space to its own small SPR set */ |
83 | if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM) | 83 | if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) |
84 | return false; | 84 | return false; |
85 | 85 | ||
86 | return true; | 86 | return true; |
@@ -94,14 +94,31 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
94 | int rs = get_rs(inst); | 94 | int rs = get_rs(inst); |
95 | int ra = get_ra(inst); | 95 | int ra = get_ra(inst); |
96 | int rb = get_rb(inst); | 96 | int rb = get_rb(inst); |
97 | u32 inst_sc = 0x44000002; | ||
97 | 98 | ||
98 | switch (get_op(inst)) { | 99 | switch (get_op(inst)) { |
100 | case 0: | ||
101 | emulated = EMULATE_FAIL; | ||
102 | if ((kvmppc_get_msr(vcpu) & MSR_LE) && | ||
103 | (inst == swab32(inst_sc))) { | ||
104 | /* | ||
105 | * This is the byte reversed syscall instruction of our | ||
106 | * hypercall handler. Early versions of LE Linux didn't | ||
107 | * swap the instructions correctly and ended up in | ||
108 | * illegal instructions. | ||
109 | * Just always fail hypercalls on these broken systems. | ||
110 | */ | ||
111 | kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); | ||
112 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); | ||
113 | emulated = EMULATE_DONE; | ||
114 | } | ||
115 | break; | ||
99 | case 19: | 116 | case 19: |
100 | switch (get_xop(inst)) { | 117 | switch (get_xop(inst)) { |
101 | case OP_19_XOP_RFID: | 118 | case OP_19_XOP_RFID: |
102 | case OP_19_XOP_RFI: | 119 | case OP_19_XOP_RFI: |
103 | kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0); | 120 | kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); |
104 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); | 121 | kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); |
105 | *advance = 0; | 122 | *advance = 0; |
106 | break; | 123 | break; |
107 | 124 | ||
@@ -113,16 +130,16 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
113 | case 31: | 130 | case 31: |
114 | switch (get_xop(inst)) { | 131 | switch (get_xop(inst)) { |
115 | case OP_31_XOP_MFMSR: | 132 | case OP_31_XOP_MFMSR: |
116 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); | 133 | kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); |
117 | break; | 134 | break; |
118 | case OP_31_XOP_MTMSRD: | 135 | case OP_31_XOP_MTMSRD: |
119 | { | 136 | { |
120 | ulong rs_val = kvmppc_get_gpr(vcpu, rs); | 137 | ulong rs_val = kvmppc_get_gpr(vcpu, rs); |
121 | if (inst & 0x10000) { | 138 | if (inst & 0x10000) { |
122 | ulong new_msr = vcpu->arch.shared->msr; | 139 | ulong new_msr = kvmppc_get_msr(vcpu); |
123 | new_msr &= ~(MSR_RI | MSR_EE); | 140 | new_msr &= ~(MSR_RI | MSR_EE); |
124 | new_msr |= rs_val & (MSR_RI | MSR_EE); | 141 | new_msr |= rs_val & (MSR_RI | MSR_EE); |
125 | vcpu->arch.shared->msr = new_msr; | 142 | kvmppc_set_msr_fast(vcpu, new_msr); |
126 | } else | 143 | } else |
127 | kvmppc_set_msr(vcpu, rs_val); | 144 | kvmppc_set_msr(vcpu, rs_val); |
128 | break; | 145 | break; |
@@ -179,7 +196,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
179 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | 196 | ulong cmd = kvmppc_get_gpr(vcpu, 3); |
180 | int i; | 197 | int i; |
181 | 198 | ||
182 | if ((vcpu->arch.shared->msr & MSR_PR) || | 199 | if ((kvmppc_get_msr(vcpu) & MSR_PR) || |
183 | !vcpu->arch.papr_enabled) { | 200 | !vcpu->arch.papr_enabled) { |
184 | emulated = EMULATE_FAIL; | 201 | emulated = EMULATE_FAIL; |
185 | break; | 202 | break; |
@@ -261,14 +278,14 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
261 | ra_val = kvmppc_get_gpr(vcpu, ra); | 278 | ra_val = kvmppc_get_gpr(vcpu, ra); |
262 | 279 | ||
263 | addr = (ra_val + rb_val) & ~31ULL; | 280 | addr = (ra_val + rb_val) & ~31ULL; |
264 | if (!(vcpu->arch.shared->msr & MSR_SF)) | 281 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
265 | addr &= 0xffffffff; | 282 | addr &= 0xffffffff; |
266 | vaddr = addr; | 283 | vaddr = addr; |
267 | 284 | ||
268 | r = kvmppc_st(vcpu, &addr, 32, zeros, true); | 285 | r = kvmppc_st(vcpu, &addr, 32, zeros, true); |
269 | if ((r == -ENOENT) || (r == -EPERM)) { | 286 | if ((r == -ENOENT) || (r == -EPERM)) { |
270 | *advance = 0; | 287 | *advance = 0; |
271 | vcpu->arch.shared->dar = vaddr; | 288 | kvmppc_set_dar(vcpu, vaddr); |
272 | vcpu->arch.fault_dar = vaddr; | 289 | vcpu->arch.fault_dar = vaddr; |
273 | 290 | ||
274 | dsisr = DSISR_ISSTORE; | 291 | dsisr = DSISR_ISSTORE; |
@@ -277,7 +294,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
277 | else if (r == -EPERM) | 294 | else if (r == -EPERM) |
278 | dsisr |= DSISR_PROTFAULT; | 295 | dsisr |= DSISR_PROTFAULT; |
279 | 296 | ||
280 | vcpu->arch.shared->dsisr = dsisr; | 297 | kvmppc_set_dsisr(vcpu, dsisr); |
281 | vcpu->arch.fault_dsisr = dsisr; | 298 | vcpu->arch.fault_dsisr = dsisr; |
282 | 299 | ||
283 | kvmppc_book3s_queue_irqprio(vcpu, | 300 | kvmppc_book3s_queue_irqprio(vcpu, |
@@ -356,10 +373,10 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
356 | to_book3s(vcpu)->sdr1 = spr_val; | 373 | to_book3s(vcpu)->sdr1 = spr_val; |
357 | break; | 374 | break; |
358 | case SPRN_DSISR: | 375 | case SPRN_DSISR: |
359 | vcpu->arch.shared->dsisr = spr_val; | 376 | kvmppc_set_dsisr(vcpu, spr_val); |
360 | break; | 377 | break; |
361 | case SPRN_DAR: | 378 | case SPRN_DAR: |
362 | vcpu->arch.shared->dar = spr_val; | 379 | kvmppc_set_dar(vcpu, spr_val); |
363 | break; | 380 | break; |
364 | case SPRN_HIOR: | 381 | case SPRN_HIOR: |
365 | to_book3s(vcpu)->hior = spr_val; | 382 | to_book3s(vcpu)->hior = spr_val; |
@@ -438,6 +455,31 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
438 | case SPRN_GQR7: | 455 | case SPRN_GQR7: |
439 | to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; | 456 | to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; |
440 | break; | 457 | break; |
458 | case SPRN_FSCR: | ||
459 | vcpu->arch.fscr = spr_val; | ||
460 | break; | ||
461 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
462 | case SPRN_BESCR: | ||
463 | vcpu->arch.bescr = spr_val; | ||
464 | break; | ||
465 | case SPRN_EBBHR: | ||
466 | vcpu->arch.ebbhr = spr_val; | ||
467 | break; | ||
468 | case SPRN_EBBRR: | ||
469 | vcpu->arch.ebbrr = spr_val; | ||
470 | break; | ||
471 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
472 | case SPRN_TFHAR: | ||
473 | vcpu->arch.tfhar = spr_val; | ||
474 | break; | ||
475 | case SPRN_TEXASR: | ||
476 | vcpu->arch.texasr = spr_val; | ||
477 | break; | ||
478 | case SPRN_TFIAR: | ||
479 | vcpu->arch.tfiar = spr_val; | ||
480 | break; | ||
481 | #endif | ||
482 | #endif | ||
441 | case SPRN_ICTC: | 483 | case SPRN_ICTC: |
442 | case SPRN_THRM1: | 484 | case SPRN_THRM1: |
443 | case SPRN_THRM2: | 485 | case SPRN_THRM2: |
@@ -455,6 +497,13 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
455 | case SPRN_WPAR_GEKKO: | 497 | case SPRN_WPAR_GEKKO: |
456 | case SPRN_MSSSR0: | 498 | case SPRN_MSSSR0: |
457 | case SPRN_DABR: | 499 | case SPRN_DABR: |
500 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
501 | case SPRN_MMCRS: | ||
502 | case SPRN_MMCRA: | ||
503 | case SPRN_MMCR0: | ||
504 | case SPRN_MMCR1: | ||
505 | case SPRN_MMCR2: | ||
506 | #endif | ||
458 | break; | 507 | break; |
459 | unprivileged: | 508 | unprivileged: |
460 | default: | 509 | default: |
@@ -493,10 +542,10 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val | |||
493 | *spr_val = to_book3s(vcpu)->sdr1; | 542 | *spr_val = to_book3s(vcpu)->sdr1; |
494 | break; | 543 | break; |
495 | case SPRN_DSISR: | 544 | case SPRN_DSISR: |
496 | *spr_val = vcpu->arch.shared->dsisr; | 545 | *spr_val = kvmppc_get_dsisr(vcpu); |
497 | break; | 546 | break; |
498 | case SPRN_DAR: | 547 | case SPRN_DAR: |
499 | *spr_val = vcpu->arch.shared->dar; | 548 | *spr_val = kvmppc_get_dar(vcpu); |
500 | break; | 549 | break; |
501 | case SPRN_HIOR: | 550 | case SPRN_HIOR: |
502 | *spr_val = to_book3s(vcpu)->hior; | 551 | *spr_val = to_book3s(vcpu)->hior; |
@@ -538,6 +587,31 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val | |||
538 | case SPRN_GQR7: | 587 | case SPRN_GQR7: |
539 | *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; | 588 | *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; |
540 | break; | 589 | break; |
590 | case SPRN_FSCR: | ||
591 | *spr_val = vcpu->arch.fscr; | ||
592 | break; | ||
593 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
594 | case SPRN_BESCR: | ||
595 | *spr_val = vcpu->arch.bescr; | ||
596 | break; | ||
597 | case SPRN_EBBHR: | ||
598 | *spr_val = vcpu->arch.ebbhr; | ||
599 | break; | ||
600 | case SPRN_EBBRR: | ||
601 | *spr_val = vcpu->arch.ebbrr; | ||
602 | break; | ||
603 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
604 | case SPRN_TFHAR: | ||
605 | *spr_val = vcpu->arch.tfhar; | ||
606 | break; | ||
607 | case SPRN_TEXASR: | ||
608 | *spr_val = vcpu->arch.texasr; | ||
609 | break; | ||
610 | case SPRN_TFIAR: | ||
611 | *spr_val = vcpu->arch.tfiar; | ||
612 | break; | ||
613 | #endif | ||
614 | #endif | ||
541 | case SPRN_THRM1: | 615 | case SPRN_THRM1: |
542 | case SPRN_THRM2: | 616 | case SPRN_THRM2: |
543 | case SPRN_THRM3: | 617 | case SPRN_THRM3: |
@@ -553,6 +627,14 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val | |||
553 | case SPRN_WPAR_GEKKO: | 627 | case SPRN_WPAR_GEKKO: |
554 | case SPRN_MSSSR0: | 628 | case SPRN_MSSSR0: |
555 | case SPRN_DABR: | 629 | case SPRN_DABR: |
630 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
631 | case SPRN_MMCRS: | ||
632 | case SPRN_MMCRA: | ||
633 | case SPRN_MMCR0: | ||
634 | case SPRN_MMCR1: | ||
635 | case SPRN_MMCR2: | ||
636 | case SPRN_TIR: | ||
637 | #endif | ||
556 | *spr_val = 0; | 638 | *spr_val = 0; |
557 | break; | 639 | break; |
558 | default: | 640 | default: |
@@ -569,48 +651,17 @@ unprivileged: | |||
569 | 651 | ||
570 | u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) | 652 | u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) |
571 | { | 653 | { |
572 | u32 dsisr = 0; | 654 | return make_dsisr(inst); |
573 | |||
574 | /* | ||
575 | * This is what the spec says about DSISR bits (not mentioned = 0): | ||
576 | * | ||
577 | * 12:13 [DS] Set to bits 30:31 | ||
578 | * 15:16 [X] Set to bits 29:30 | ||
579 | * 17 [X] Set to bit 25 | ||
580 | * [D/DS] Set to bit 5 | ||
581 | * 18:21 [X] Set to bits 21:24 | ||
582 | * [D/DS] Set to bits 1:4 | ||
583 | * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS) | ||
584 | * 27:31 Set to bits 11:15 (RA) | ||
585 | */ | ||
586 | |||
587 | switch (get_op(inst)) { | ||
588 | /* D-form */ | ||
589 | case OP_LFS: | ||
590 | case OP_LFD: | ||
591 | case OP_STFD: | ||
592 | case OP_STFS: | ||
593 | dsisr |= (inst >> 12) & 0x4000; /* bit 17 */ | ||
594 | dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */ | ||
595 | break; | ||
596 | /* X-form */ | ||
597 | case 31: | ||
598 | dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */ | ||
599 | dsisr |= (inst << 8) & 0x04000; /* bit 17 */ | ||
600 | dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */ | ||
601 | break; | ||
602 | default: | ||
603 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); | ||
604 | break; | ||
605 | } | ||
606 | |||
607 | dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */ | ||
608 | |||
609 | return dsisr; | ||
610 | } | 655 | } |
611 | 656 | ||
612 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) | 657 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) |
613 | { | 658 | { |
659 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
660 | /* | ||
661 | * Linux's fix_alignment() assumes that DAR is valid, so can we | ||
662 | */ | ||
663 | return vcpu->arch.fault_dar; | ||
664 | #else | ||
614 | ulong dar = 0; | 665 | ulong dar = 0; |
615 | ulong ra = get_ra(inst); | 666 | ulong ra = get_ra(inst); |
616 | ulong rb = get_rb(inst); | 667 | ulong rb = get_rb(inst); |
@@ -635,4 +686,5 @@ ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) | |||
635 | } | 686 | } |
636 | 687 | ||
637 | return dar; | 688 | return dar; |
689 | #endif | ||
638 | } | 690 | } |
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c index 20d4ea8e656d..0d013fbc2e13 100644 --- a/arch/powerpc/kvm/book3s_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <asm/kvm_ppc.h> | ||
21 | #include <asm/kvm_book3s.h> | 22 | #include <asm/kvm_book3s.h> |
22 | 23 | ||
23 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 24 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8227dba5af0f..aba05bbb3e74 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -879,24 +879,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
879 | case KVM_REG_PPC_IAMR: | 879 | case KVM_REG_PPC_IAMR: |
880 | *val = get_reg_val(id, vcpu->arch.iamr); | 880 | *val = get_reg_val(id, vcpu->arch.iamr); |
881 | break; | 881 | break; |
882 | case KVM_REG_PPC_FSCR: | ||
883 | *val = get_reg_val(id, vcpu->arch.fscr); | ||
884 | break; | ||
885 | case KVM_REG_PPC_PSPB: | 882 | case KVM_REG_PPC_PSPB: |
886 | *val = get_reg_val(id, vcpu->arch.pspb); | 883 | *val = get_reg_val(id, vcpu->arch.pspb); |
887 | break; | 884 | break; |
888 | case KVM_REG_PPC_EBBHR: | ||
889 | *val = get_reg_val(id, vcpu->arch.ebbhr); | ||
890 | break; | ||
891 | case KVM_REG_PPC_EBBRR: | ||
892 | *val = get_reg_val(id, vcpu->arch.ebbrr); | ||
893 | break; | ||
894 | case KVM_REG_PPC_BESCR: | ||
895 | *val = get_reg_val(id, vcpu->arch.bescr); | ||
896 | break; | ||
897 | case KVM_REG_PPC_TAR: | ||
898 | *val = get_reg_val(id, vcpu->arch.tar); | ||
899 | break; | ||
900 | case KVM_REG_PPC_DPDES: | 885 | case KVM_REG_PPC_DPDES: |
901 | *val = get_reg_val(id, vcpu->arch.vcore->dpdes); | 886 | *val = get_reg_val(id, vcpu->arch.vcore->dpdes); |
902 | break; | 887 | break; |
@@ -1091,24 +1076,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
1091 | case KVM_REG_PPC_IAMR: | 1076 | case KVM_REG_PPC_IAMR: |
1092 | vcpu->arch.iamr = set_reg_val(id, *val); | 1077 | vcpu->arch.iamr = set_reg_val(id, *val); |
1093 | break; | 1078 | break; |
1094 | case KVM_REG_PPC_FSCR: | ||
1095 | vcpu->arch.fscr = set_reg_val(id, *val); | ||
1096 | break; | ||
1097 | case KVM_REG_PPC_PSPB: | 1079 | case KVM_REG_PPC_PSPB: |
1098 | vcpu->arch.pspb = set_reg_val(id, *val); | 1080 | vcpu->arch.pspb = set_reg_val(id, *val); |
1099 | break; | 1081 | break; |
1100 | case KVM_REG_PPC_EBBHR: | ||
1101 | vcpu->arch.ebbhr = set_reg_val(id, *val); | ||
1102 | break; | ||
1103 | case KVM_REG_PPC_EBBRR: | ||
1104 | vcpu->arch.ebbrr = set_reg_val(id, *val); | ||
1105 | break; | ||
1106 | case KVM_REG_PPC_BESCR: | ||
1107 | vcpu->arch.bescr = set_reg_val(id, *val); | ||
1108 | break; | ||
1109 | case KVM_REG_PPC_TAR: | ||
1110 | vcpu->arch.tar = set_reg_val(id, *val); | ||
1111 | break; | ||
1112 | case KVM_REG_PPC_DPDES: | 1082 | case KVM_REG_PPC_DPDES: |
1113 | vcpu->arch.vcore->dpdes = set_reg_val(id, *val); | 1083 | vcpu->arch.vcore->dpdes = set_reg_val(id, *val); |
1114 | break; | 1084 | break; |
@@ -1280,6 +1250,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, | |||
1280 | goto free_vcpu; | 1250 | goto free_vcpu; |
1281 | 1251 | ||
1282 | vcpu->arch.shared = &vcpu->arch.shregs; | 1252 | vcpu->arch.shared = &vcpu->arch.shregs; |
1253 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | ||
1254 | /* | ||
1255 | * The shared struct is never shared on HV, | ||
1256 | * so we can always use host endianness | ||
1257 | */ | ||
1258 | #ifdef __BIG_ENDIAN__ | ||
1259 | vcpu->arch.shared_big_endian = true; | ||
1260 | #else | ||
1261 | vcpu->arch.shared_big_endian = false; | ||
1262 | #endif | ||
1263 | #endif | ||
1283 | vcpu->arch.mmcr[0] = MMCR0_FC; | 1264 | vcpu->arch.mmcr[0] = MMCR0_FC; |
1284 | vcpu->arch.ctrl = CTRL_RUNLATCH; | 1265 | vcpu->arch.ctrl = CTRL_RUNLATCH; |
1285 | /* default to host PVR, since we can't spoof it */ | 1266 | /* default to host PVR, since we can't spoof it */ |
@@ -1949,6 +1930,13 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, | |||
1949 | * support pte_enc here | 1930 | * support pte_enc here |
1950 | */ | 1931 | */ |
1951 | (*sps)->enc[0].pte_enc = def->penc[linux_psize]; | 1932 | (*sps)->enc[0].pte_enc = def->penc[linux_psize]; |
1933 | /* | ||
1934 | * Add 16MB MPSS support if host supports it | ||
1935 | */ | ||
1936 | if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) { | ||
1937 | (*sps)->enc[1].page_shift = 24; | ||
1938 | (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M]; | ||
1939 | } | ||
1952 | (*sps)++; | 1940 | (*sps)++; |
1953 | } | 1941 | } |
1954 | 1942 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 1d6c56ad5b60..ac840c6dfa9b 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -42,13 +42,14 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags) | |||
42 | 42 | ||
43 | /* | 43 | /* |
44 | * If there is only one vcore, and it's currently running, | 44 | * If there is only one vcore, and it's currently running, |
45 | * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, | ||
45 | * we can use tlbiel as long as we mark all other physical | 46 | * we can use tlbiel as long as we mark all other physical |
46 | * cores as potentially having stale TLB entries for this lpid. | 47 | * cores as potentially having stale TLB entries for this lpid. |
47 | * If we're not using MMU notifiers, we never take pages away | 48 | * If we're not using MMU notifiers, we never take pages away |
48 | * from the guest, so we can use tlbiel if requested. | 49 | * from the guest, so we can use tlbiel if requested. |
49 | * Otherwise, don't use tlbiel. | 50 | * Otherwise, don't use tlbiel. |
50 | */ | 51 | */ |
51 | if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore) | 52 | if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) |
52 | global = 0; | 53 | global = 0; |
53 | else if (kvm->arch.using_mmu_notifiers) | 54 | else if (kvm->arch.using_mmu_notifiers) |
54 | global = 1; | 55 | global = 1; |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ffbb871c2bd8..220aefbcb7ca 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -86,6 +86,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |||
86 | lbz r4, LPPACA_PMCINUSE(r3) | 86 | lbz r4, LPPACA_PMCINUSE(r3) |
87 | cmpwi r4, 0 | 87 | cmpwi r4, 0 |
88 | beq 23f /* skip if not */ | 88 | beq 23f /* skip if not */ |
89 | BEGIN_FTR_SECTION | ||
90 | ld r3, HSTATE_MMCR(r13) | ||
91 | andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO | ||
92 | cmpwi r4, MMCR0_PMAO | ||
93 | beql kvmppc_fix_pmao | ||
94 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) | ||
89 | lwz r3, HSTATE_PMC(r13) | 95 | lwz r3, HSTATE_PMC(r13) |
90 | lwz r4, HSTATE_PMC + 4(r13) | 96 | lwz r4, HSTATE_PMC + 4(r13) |
91 | lwz r5, HSTATE_PMC + 8(r13) | 97 | lwz r5, HSTATE_PMC + 8(r13) |
@@ -726,6 +732,12 @@ skip_tm: | |||
726 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | 732 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
727 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | 733 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ |
728 | isync | 734 | isync |
735 | BEGIN_FTR_SECTION | ||
736 | ld r3, VCPU_MMCR(r4) | ||
737 | andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO | ||
738 | cmpwi r5, MMCR0_PMAO | ||
739 | beql kvmppc_fix_pmao | ||
740 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) | ||
729 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ | 741 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ |
730 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ | 742 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ |
731 | lwz r6, VCPU_PMC + 8(r4) | 743 | lwz r6, VCPU_PMC + 8(r4) |
@@ -1324,6 +1336,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
1324 | 25: | 1336 | 25: |
1325 | /* Save PMU registers if requested */ | 1337 | /* Save PMU registers if requested */ |
1326 | /* r8 and cr0.eq are live here */ | 1338 | /* r8 and cr0.eq are live here */ |
1339 | BEGIN_FTR_SECTION | ||
1340 | /* | ||
1341 | * POWER8 seems to have a hardware bug where setting | ||
1342 | * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] | ||
1343 | * when some counters are already negative doesn't seem | ||
1344 | * to cause a performance monitor alert (and hence interrupt). | ||
1345 | * The effect of this is that when saving the PMU state, | ||
1346 | * if there is no PMU alert pending when we read MMCR0 | ||
1347 | * before freezing the counters, but one becomes pending | ||
1348 | * before we read the counters, we lose it. | ||
1349 | * To work around this, we need a way to freeze the counters | ||
1350 | * before reading MMCR0. Normally, freezing the counters | ||
1351 | * is done by writing MMCR0 (to set MMCR0[FC]) which | ||
1352 | * unavoidably writes MMCR0[PMA0] as well. On POWER8, | ||
1353 | * we can also freeze the counters using MMCR2, by writing | ||
1354 | * 1s to all the counter freeze condition bits (there are | ||
1355 | * 9 bits each for 6 counters). | ||
1356 | */ | ||
1357 | li r3, -1 /* set all freeze bits */ | ||
1358 | clrrdi r3, r3, 10 | ||
1359 | mfspr r10, SPRN_MMCR2 | ||
1360 | mtspr SPRN_MMCR2, r3 | ||
1361 | isync | ||
1362 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
1327 | li r3, 1 | 1363 | li r3, 1 |
1328 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | 1364 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
1329 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | 1365 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ |
@@ -1347,6 +1383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
1347 | std r4, VCPU_MMCR(r9) | 1383 | std r4, VCPU_MMCR(r9) |
1348 | std r5, VCPU_MMCR + 8(r9) | 1384 | std r5, VCPU_MMCR + 8(r9) |
1349 | std r6, VCPU_MMCR + 16(r9) | 1385 | std r6, VCPU_MMCR + 16(r9) |
1386 | BEGIN_FTR_SECTION | ||
1387 | std r10, VCPU_MMCR + 24(r9) | ||
1388 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
1350 | std r7, VCPU_SIAR(r9) | 1389 | std r7, VCPU_SIAR(r9) |
1351 | std r8, VCPU_SDAR(r9) | 1390 | std r8, VCPU_SDAR(r9) |
1352 | mfspr r3, SPRN_PMC1 | 1391 | mfspr r3, SPRN_PMC1 |
@@ -1370,12 +1409,10 @@ BEGIN_FTR_SECTION | |||
1370 | stw r11, VCPU_PMC + 28(r9) | 1409 | stw r11, VCPU_PMC + 28(r9) |
1371 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | 1410 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
1372 | BEGIN_FTR_SECTION | 1411 | BEGIN_FTR_SECTION |
1373 | mfspr r4, SPRN_MMCR2 | ||
1374 | mfspr r5, SPRN_SIER | 1412 | mfspr r5, SPRN_SIER |
1375 | mfspr r6, SPRN_SPMC1 | 1413 | mfspr r6, SPRN_SPMC1 |
1376 | mfspr r7, SPRN_SPMC2 | 1414 | mfspr r7, SPRN_SPMC2 |
1377 | mfspr r8, SPRN_MMCRS | 1415 | mfspr r8, SPRN_MMCRS |
1378 | std r4, VCPU_MMCR + 24(r9) | ||
1379 | std r5, VCPU_SIER(r9) | 1416 | std r5, VCPU_SIER(r9) |
1380 | stw r6, VCPU_PMC + 24(r9) | 1417 | stw r6, VCPU_PMC + 24(r9) |
1381 | stw r7, VCPU_PMC + 28(r9) | 1418 | stw r7, VCPU_PMC + 28(r9) |
@@ -2107,6 +2144,7 @@ machine_check_realmode: | |||
2107 | beq mc_cont | 2144 | beq mc_cont |
2108 | /* If not, deliver a machine check. SRR0/1 are already set */ | 2145 | /* If not, deliver a machine check. SRR0/1 are already set */ |
2109 | li r10, BOOK3S_INTERRUPT_MACHINE_CHECK | 2146 | li r10, BOOK3S_INTERRUPT_MACHINE_CHECK |
2147 | ld r11, VCPU_MSR(r9) | ||
2110 | bl kvmppc_msr_interrupt | 2148 | bl kvmppc_msr_interrupt |
2111 | b fast_interrupt_c_return | 2149 | b fast_interrupt_c_return |
2112 | 2150 | ||
@@ -2311,3 +2349,21 @@ kvmppc_msr_interrupt: | |||
2311 | li r0, 1 | 2349 | li r0, 1 |
2312 | 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG | 2350 | 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG |
2313 | blr | 2351 | blr |
2352 | |||
2353 | /* | ||
2354 | * This works around a hardware bug on POWER8E processors, where | ||
2355 | * writing a 1 to the MMCR0[PMAO] bit doesn't generate a | ||
2356 | * performance monitor interrupt. Instead, when we need to have | ||
2357 | * an interrupt pending, we have to arrange for a counter to overflow. | ||
2358 | */ | ||
2359 | kvmppc_fix_pmao: | ||
2360 | li r3, 0 | ||
2361 | mtspr SPRN_MMCR2, r3 | ||
2362 | lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h | ||
2363 | ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN | ||
2364 | mtspr SPRN_MMCR0, r3 | ||
2365 | lis r3, 0x7fff | ||
2366 | ori r3, r3, 0xffff | ||
2367 | mtspr SPRN_PMC6, r3 | ||
2368 | isync | ||
2369 | blr | ||
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 3533c999194a..e2c29e381dc7 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -104,8 +104,27 @@ kvm_start_lightweight: | |||
104 | stb r3, HSTATE_RESTORE_HID5(r13) | 104 | stb r3, HSTATE_RESTORE_HID5(r13) |
105 | 105 | ||
106 | /* Load up guest SPRG3 value, since it's user readable */ | 106 | /* Load up guest SPRG3 value, since it's user readable */ |
107 | ld r3, VCPU_SHARED(r4) | 107 | lwz r3, VCPU_SHAREDBE(r4) |
108 | ld r3, VCPU_SHARED_SPRG3(r3) | 108 | cmpwi r3, 0 |
109 | ld r5, VCPU_SHARED(r4) | ||
110 | beq sprg3_little_endian | ||
111 | sprg3_big_endian: | ||
112 | #ifdef __BIG_ENDIAN__ | ||
113 | ld r3, VCPU_SHARED_SPRG3(r5) | ||
114 | #else | ||
115 | addi r5, r5, VCPU_SHARED_SPRG3 | ||
116 | ldbrx r3, 0, r5 | ||
117 | #endif | ||
118 | b after_sprg3_load | ||
119 | sprg3_little_endian: | ||
120 | #ifdef __LITTLE_ENDIAN__ | ||
121 | ld r3, VCPU_SHARED_SPRG3(r5) | ||
122 | #else | ||
123 | addi r5, r5, VCPU_SHARED_SPRG3 | ||
124 | ldbrx r3, 0, r5 | ||
125 | #endif | ||
126 | |||
127 | after_sprg3_load: | ||
109 | mtspr SPRN_SPRG3, r3 | 128 | mtspr SPRN_SPRG3, r3 |
110 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 129 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
111 | 130 | ||
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index c1abd95063f4..6c8011fd57e6 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
@@ -165,16 +165,18 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) | |||
165 | 165 | ||
166 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) | 166 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) |
167 | { | 167 | { |
168 | u64 dsisr; | 168 | u32 dsisr; |
169 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | 169 | u64 msr = kvmppc_get_msr(vcpu); |
170 | 170 | ||
171 | shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0); | 171 | msr = kvmppc_set_field(msr, 33, 36, 0); |
172 | shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0); | 172 | msr = kvmppc_set_field(msr, 42, 47, 0); |
173 | shared->dar = eaddr; | 173 | kvmppc_set_msr(vcpu, msr); |
174 | kvmppc_set_dar(vcpu, eaddr); | ||
174 | /* Page Fault */ | 175 | /* Page Fault */ |
175 | dsisr = kvmppc_set_field(0, 33, 33, 1); | 176 | dsisr = kvmppc_set_field(0, 33, 33, 1); |
176 | if (is_store) | 177 | if (is_store) |
177 | shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); | 178 | dsisr = kvmppc_set_field(dsisr, 38, 38, 1); |
179 | kvmppc_set_dsisr(vcpu, dsisr); | ||
178 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); | 180 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); |
179 | } | 181 | } |
180 | 182 | ||
@@ -660,7 +662,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
660 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) | 662 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) |
661 | return EMULATE_FAIL; | 663 | return EMULATE_FAIL; |
662 | 664 | ||
663 | if (!(vcpu->arch.shared->msr & MSR_FP)) { | 665 | if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { |
664 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); | 666 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); |
665 | return EMULATE_AGAIN; | 667 | return EMULATE_AGAIN; |
666 | } | 668 | } |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index c5c052a9729c..23367a7e44c3 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -53,6 +53,7 @@ | |||
53 | 53 | ||
54 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | 54 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
55 | ulong msr); | 55 | ulong msr); |
56 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | ||
56 | 57 | ||
57 | /* Some compatibility defines */ | 58 | /* Some compatibility defines */ |
58 | #ifdef CONFIG_PPC_BOOK3S_32 | 59 | #ifdef CONFIG_PPC_BOOK3S_32 |
@@ -89,6 +90,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
89 | #endif | 90 | #endif |
90 | 91 | ||
91 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); | 92 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
93 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | ||
92 | vcpu->cpu = -1; | 94 | vcpu->cpu = -1; |
93 | } | 95 | } |
94 | 96 | ||
@@ -115,6 +117,9 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |||
115 | svcpu->ctr = vcpu->arch.ctr; | 117 | svcpu->ctr = vcpu->arch.ctr; |
116 | svcpu->lr = vcpu->arch.lr; | 118 | svcpu->lr = vcpu->arch.lr; |
117 | svcpu->pc = vcpu->arch.pc; | 119 | svcpu->pc = vcpu->arch.pc; |
120 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
121 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; | ||
122 | #endif | ||
118 | svcpu->in_use = true; | 123 | svcpu->in_use = true; |
119 | } | 124 | } |
120 | 125 | ||
@@ -158,6 +163,9 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |||
158 | vcpu->arch.fault_dar = svcpu->fault_dar; | 163 | vcpu->arch.fault_dar = svcpu->fault_dar; |
159 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 164 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
160 | vcpu->arch.last_inst = svcpu->last_inst; | 165 | vcpu->arch.last_inst = svcpu->last_inst; |
166 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
167 | vcpu->arch.shadow_fscr = svcpu->shadow_fscr; | ||
168 | #endif | ||
161 | svcpu->in_use = false; | 169 | svcpu->in_use = false; |
162 | 170 | ||
163 | out: | 171 | out: |
@@ -246,14 +254,15 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) | |||
246 | 254 | ||
247 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | 255 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
248 | { | 256 | { |
249 | ulong smsr = vcpu->arch.shared->msr; | 257 | ulong guest_msr = kvmppc_get_msr(vcpu); |
258 | ulong smsr = guest_msr; | ||
250 | 259 | ||
251 | /* Guest MSR values */ | 260 | /* Guest MSR values */ |
252 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE; | 261 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; |
253 | /* Process MSR values */ | 262 | /* Process MSR values */ |
254 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | 263 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; |
255 | /* External providers the guest reserved */ | 264 | /* External providers the guest reserved */ |
256 | smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); | 265 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); |
257 | /* 64-bit Process MSR values */ | 266 | /* 64-bit Process MSR values */ |
258 | #ifdef CONFIG_PPC_BOOK3S_64 | 267 | #ifdef CONFIG_PPC_BOOK3S_64 |
259 | smsr |= MSR_ISF | MSR_HV; | 268 | smsr |= MSR_ISF | MSR_HV; |
@@ -263,14 +272,14 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | |||
263 | 272 | ||
264 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | 273 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
265 | { | 274 | { |
266 | ulong old_msr = vcpu->arch.shared->msr; | 275 | ulong old_msr = kvmppc_get_msr(vcpu); |
267 | 276 | ||
268 | #ifdef EXIT_DEBUG | 277 | #ifdef EXIT_DEBUG |
269 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 278 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
270 | #endif | 279 | #endif |
271 | 280 | ||
272 | msr &= to_book3s(vcpu)->msr_mask; | 281 | msr &= to_book3s(vcpu)->msr_mask; |
273 | vcpu->arch.shared->msr = msr; | 282 | kvmppc_set_msr_fast(vcpu, msr); |
274 | kvmppc_recalc_shadow_msr(vcpu); | 283 | kvmppc_recalc_shadow_msr(vcpu); |
275 | 284 | ||
276 | if (msr & MSR_POW) { | 285 | if (msr & MSR_POW) { |
@@ -281,11 +290,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | |||
281 | 290 | ||
282 | /* Unset POW bit after we woke up */ | 291 | /* Unset POW bit after we woke up */ |
283 | msr &= ~MSR_POW; | 292 | msr &= ~MSR_POW; |
284 | vcpu->arch.shared->msr = msr; | 293 | kvmppc_set_msr_fast(vcpu, msr); |
285 | } | 294 | } |
286 | } | 295 | } |
287 | 296 | ||
288 | if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != | 297 | if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != |
289 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | 298 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
290 | kvmppc_mmu_flush_segments(vcpu); | 299 | kvmppc_mmu_flush_segments(vcpu); |
291 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 300 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
@@ -317,7 +326,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | |||
317 | } | 326 | } |
318 | 327 | ||
319 | /* Preload FPU if it's enabled */ | 328 | /* Preload FPU if it's enabled */ |
320 | if (vcpu->arch.shared->msr & MSR_FP) | 329 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
321 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 330 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
322 | } | 331 | } |
323 | 332 | ||
@@ -427,8 +436,8 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |||
427 | 436 | ||
428 | /* patch dcbz into reserved instruction, so we trap */ | 437 | /* patch dcbz into reserved instruction, so we trap */ |
429 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | 438 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) |
430 | if ((page[i] & 0xff0007ff) == INS_DCBZ) | 439 | if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) |
431 | page[i] &= 0xfffffff7; | 440 | page[i] &= cpu_to_be32(0xfffffff7); |
432 | 441 | ||
433 | kunmap_atomic(page); | 442 | kunmap_atomic(page); |
434 | put_page(hpage); | 443 | put_page(hpage); |
@@ -438,7 +447,7 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
438 | { | 447 | { |
439 | ulong mp_pa = vcpu->arch.magic_page_pa; | 448 | ulong mp_pa = vcpu->arch.magic_page_pa; |
440 | 449 | ||
441 | if (!(vcpu->arch.shared->msr & MSR_SF)) | 450 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
442 | mp_pa = (uint32_t)mp_pa; | 451 | mp_pa = (uint32_t)mp_pa; |
443 | 452 | ||
444 | if (unlikely(mp_pa) && | 453 | if (unlikely(mp_pa) && |
@@ -459,8 +468,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
459 | int page_found = 0; | 468 | int page_found = 0; |
460 | struct kvmppc_pte pte; | 469 | struct kvmppc_pte pte; |
461 | bool is_mmio = false; | 470 | bool is_mmio = false; |
462 | bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; | 471 | bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; |
463 | bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; | 472 | bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; |
464 | u64 vsid; | 473 | u64 vsid; |
465 | 474 | ||
466 | relocated = data ? dr : ir; | 475 | relocated = data ? dr : ir; |
@@ -480,7 +489,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
480 | pte.page_size = MMU_PAGE_64K; | 489 | pte.page_size = MMU_PAGE_64K; |
481 | } | 490 | } |
482 | 491 | ||
483 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 492 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
484 | case 0: | 493 | case 0: |
485 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | 494 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); |
486 | break; | 495 | break; |
@@ -488,7 +497,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
488 | case MSR_IR: | 497 | case MSR_IR: |
489 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | 498 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
490 | 499 | ||
491 | if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) | 500 | if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) |
492 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | 501 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
493 | else | 502 | else |
494 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | 503 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); |
@@ -511,22 +520,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
511 | 520 | ||
512 | if (page_found == -ENOENT) { | 521 | if (page_found == -ENOENT) { |
513 | /* Page not found in guest PTE entries */ | 522 | /* Page not found in guest PTE entries */ |
514 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 523 | u64 ssrr1 = vcpu->arch.shadow_srr1; |
515 | vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr; | 524 | u64 msr = kvmppc_get_msr(vcpu); |
516 | vcpu->arch.shared->msr |= | 525 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
517 | vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; | 526 | kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); |
527 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | ||
518 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 528 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
519 | } else if (page_found == -EPERM) { | 529 | } else if (page_found == -EPERM) { |
520 | /* Storage protection */ | 530 | /* Storage protection */ |
521 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 531 | u32 dsisr = vcpu->arch.fault_dsisr; |
522 | vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | 532 | u64 ssrr1 = vcpu->arch.shadow_srr1; |
523 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; | 533 | u64 msr = kvmppc_get_msr(vcpu); |
524 | vcpu->arch.shared->msr |= | 534 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
525 | vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; | 535 | dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT; |
536 | kvmppc_set_dsisr(vcpu, dsisr); | ||
537 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | ||
526 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 538 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
527 | } else if (page_found == -EINVAL) { | 539 | } else if (page_found == -EINVAL) { |
528 | /* Page not found in guest SLB */ | 540 | /* Page not found in guest SLB */ |
529 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 541 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
530 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 542 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
531 | } else if (!is_mmio && | 543 | } else if (!is_mmio && |
532 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 544 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { |
@@ -606,6 +618,25 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |||
606 | kvmppc_recalc_shadow_msr(vcpu); | 618 | kvmppc_recalc_shadow_msr(vcpu); |
607 | } | 619 | } |
608 | 620 | ||
621 | /* Give up facility (TAR / EBB / DSCR) */ | ||
622 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) | ||
623 | { | ||
624 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
625 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { | ||
626 | /* Facility not available to the guest, ignore giveup request*/ | ||
627 | return; | ||
628 | } | ||
629 | |||
630 | switch (fac) { | ||
631 | case FSCR_TAR_LG: | ||
632 | vcpu->arch.tar = mfspr(SPRN_TAR); | ||
633 | mtspr(SPRN_TAR, current->thread.tar); | ||
634 | vcpu->arch.shadow_fscr &= ~FSCR_TAR; | ||
635 | break; | ||
636 | } | ||
637 | #endif | ||
638 | } | ||
639 | |||
609 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | 640 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) |
610 | { | 641 | { |
611 | ulong srr0 = kvmppc_get_pc(vcpu); | 642 | ulong srr0 = kvmppc_get_pc(vcpu); |
@@ -614,11 +645,12 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | |||
614 | 645 | ||
615 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | 646 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); |
616 | if (ret == -ENOENT) { | 647 | if (ret == -ENOENT) { |
617 | ulong msr = vcpu->arch.shared->msr; | 648 | ulong msr = kvmppc_get_msr(vcpu); |
618 | 649 | ||
619 | msr = kvmppc_set_field(msr, 33, 33, 1); | 650 | msr = kvmppc_set_field(msr, 33, 33, 1); |
620 | msr = kvmppc_set_field(msr, 34, 36, 0); | 651 | msr = kvmppc_set_field(msr, 34, 36, 0); |
621 | vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); | 652 | msr = kvmppc_set_field(msr, 42, 47, 0); |
653 | kvmppc_set_msr_fast(vcpu, msr); | ||
622 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | 654 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); |
623 | return EMULATE_AGAIN; | 655 | return EMULATE_AGAIN; |
624 | } | 656 | } |
@@ -651,7 +683,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
651 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | 683 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) |
652 | return RESUME_GUEST; | 684 | return RESUME_GUEST; |
653 | 685 | ||
654 | if (!(vcpu->arch.shared->msr & msr)) { | 686 | if (!(kvmppc_get_msr(vcpu) & msr)) { |
655 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 687 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
656 | return RESUME_GUEST; | 688 | return RESUME_GUEST; |
657 | } | 689 | } |
@@ -683,16 +715,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
683 | #endif | 715 | #endif |
684 | 716 | ||
685 | if (msr & MSR_FP) { | 717 | if (msr & MSR_FP) { |
718 | preempt_disable(); | ||
686 | enable_kernel_fp(); | 719 | enable_kernel_fp(); |
687 | load_fp_state(&vcpu->arch.fp); | 720 | load_fp_state(&vcpu->arch.fp); |
688 | t->fp_save_area = &vcpu->arch.fp; | 721 | t->fp_save_area = &vcpu->arch.fp; |
722 | preempt_enable(); | ||
689 | } | 723 | } |
690 | 724 | ||
691 | if (msr & MSR_VEC) { | 725 | if (msr & MSR_VEC) { |
692 | #ifdef CONFIG_ALTIVEC | 726 | #ifdef CONFIG_ALTIVEC |
727 | preempt_disable(); | ||
693 | enable_kernel_altivec(); | 728 | enable_kernel_altivec(); |
694 | load_vr_state(&vcpu->arch.vr); | 729 | load_vr_state(&vcpu->arch.vr); |
695 | t->vr_save_area = &vcpu->arch.vr; | 730 | t->vr_save_area = &vcpu->arch.vr; |
731 | preempt_enable(); | ||
696 | #endif | 732 | #endif |
697 | } | 733 | } |
698 | 734 | ||
@@ -716,18 +752,90 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |||
716 | return; | 752 | return; |
717 | 753 | ||
718 | if (lost_ext & MSR_FP) { | 754 | if (lost_ext & MSR_FP) { |
755 | preempt_disable(); | ||
719 | enable_kernel_fp(); | 756 | enable_kernel_fp(); |
720 | load_fp_state(&vcpu->arch.fp); | 757 | load_fp_state(&vcpu->arch.fp); |
758 | preempt_enable(); | ||
721 | } | 759 | } |
722 | #ifdef CONFIG_ALTIVEC | 760 | #ifdef CONFIG_ALTIVEC |
723 | if (lost_ext & MSR_VEC) { | 761 | if (lost_ext & MSR_VEC) { |
762 | preempt_disable(); | ||
724 | enable_kernel_altivec(); | 763 | enable_kernel_altivec(); |
725 | load_vr_state(&vcpu->arch.vr); | 764 | load_vr_state(&vcpu->arch.vr); |
765 | preempt_enable(); | ||
726 | } | 766 | } |
727 | #endif | 767 | #endif |
728 | current->thread.regs->msr |= lost_ext; | 768 | current->thread.regs->msr |= lost_ext; |
729 | } | 769 | } |
730 | 770 | ||
771 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
772 | |||
773 | static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) | ||
774 | { | ||
775 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ | ||
776 | vcpu->arch.fscr &= ~(0xffULL << 56); | ||
777 | vcpu->arch.fscr |= (fac << 56); | ||
778 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); | ||
779 | } | ||
780 | |||
781 | static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) | ||
782 | { | ||
783 | enum emulation_result er = EMULATE_FAIL; | ||
784 | |||
785 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) | ||
786 | er = kvmppc_emulate_instruction(vcpu->run, vcpu); | ||
787 | |||
788 | if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { | ||
789 | /* Couldn't emulate, trigger interrupt in guest */ | ||
790 | kvmppc_trigger_fac_interrupt(vcpu, fac); | ||
791 | } | ||
792 | } | ||
793 | |||
794 | /* Enable facilities (TAR, EBB, DSCR) for the guest */ | ||
795 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) | ||
796 | { | ||
797 | bool guest_fac_enabled; | ||
798 | BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); | ||
799 | |||
800 | /* | ||
801 | * Not every facility is enabled by FSCR bits, check whether the | ||
802 | * guest has this facility enabled at all. | ||
803 | */ | ||
804 | switch (fac) { | ||
805 | case FSCR_TAR_LG: | ||
806 | case FSCR_EBB_LG: | ||
807 | guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); | ||
808 | break; | ||
809 | case FSCR_TM_LG: | ||
810 | guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; | ||
811 | break; | ||
812 | default: | ||
813 | guest_fac_enabled = false; | ||
814 | break; | ||
815 | } | ||
816 | |||
817 | if (!guest_fac_enabled) { | ||
818 | /* Facility not enabled by the guest */ | ||
819 | kvmppc_trigger_fac_interrupt(vcpu, fac); | ||
820 | return RESUME_GUEST; | ||
821 | } | ||
822 | |||
823 | switch (fac) { | ||
824 | case FSCR_TAR_LG: | ||
825 | /* TAR switching isn't lazy in Linux yet */ | ||
826 | current->thread.tar = mfspr(SPRN_TAR); | ||
827 | mtspr(SPRN_TAR, vcpu->arch.tar); | ||
828 | vcpu->arch.shadow_fscr |= FSCR_TAR; | ||
829 | break; | ||
830 | default: | ||
831 | kvmppc_emulate_fac(vcpu, fac); | ||
832 | break; | ||
833 | } | ||
834 | |||
835 | return RESUME_GUEST; | ||
836 | } | ||
837 | #endif | ||
838 | |||
731 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | 839 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
732 | unsigned int exit_nr) | 840 | unsigned int exit_nr) |
733 | { | 841 | { |
@@ -784,7 +892,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
784 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 892 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
785 | r = RESUME_GUEST; | 893 | r = RESUME_GUEST; |
786 | } else { | 894 | } else { |
787 | vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; | 895 | u64 msr = kvmppc_get_msr(vcpu); |
896 | msr |= shadow_srr1 & 0x58000000; | ||
897 | kvmppc_set_msr_fast(vcpu, msr); | ||
788 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 898 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
789 | r = RESUME_GUEST; | 899 | r = RESUME_GUEST; |
790 | } | 900 | } |
@@ -824,8 +934,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
824 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 934 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
825 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | 935 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
826 | } else { | 936 | } else { |
827 | vcpu->arch.shared->dar = dar; | 937 | kvmppc_set_dar(vcpu, dar); |
828 | vcpu->arch.shared->dsisr = fault_dsisr; | 938 | kvmppc_set_dsisr(vcpu, fault_dsisr); |
829 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 939 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
830 | r = RESUME_GUEST; | 940 | r = RESUME_GUEST; |
831 | } | 941 | } |
@@ -833,7 +943,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
833 | } | 943 | } |
834 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | 944 | case BOOK3S_INTERRUPT_DATA_SEGMENT: |
835 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | 945 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { |
836 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 946 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
837 | kvmppc_book3s_queue_irqprio(vcpu, | 947 | kvmppc_book3s_queue_irqprio(vcpu, |
838 | BOOK3S_INTERRUPT_DATA_SEGMENT); | 948 | BOOK3S_INTERRUPT_DATA_SEGMENT); |
839 | } | 949 | } |
@@ -871,7 +981,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
871 | program_interrupt: | 981 | program_interrupt: |
872 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | 982 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
873 | 983 | ||
874 | if (vcpu->arch.shared->msr & MSR_PR) { | 984 | if (kvmppc_get_msr(vcpu) & MSR_PR) { |
875 | #ifdef EXIT_DEBUG | 985 | #ifdef EXIT_DEBUG |
876 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | 986 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); |
877 | #endif | 987 | #endif |
@@ -913,7 +1023,7 @@ program_interrupt: | |||
913 | case BOOK3S_INTERRUPT_SYSCALL: | 1023 | case BOOK3S_INTERRUPT_SYSCALL: |
914 | if (vcpu->arch.papr_enabled && | 1024 | if (vcpu->arch.papr_enabled && |
915 | (kvmppc_get_last_sc(vcpu) == 0x44000022) && | 1025 | (kvmppc_get_last_sc(vcpu) == 0x44000022) && |
916 | !(vcpu->arch.shared->msr & MSR_PR)) { | 1026 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
917 | /* SC 1 papr hypercalls */ | 1027 | /* SC 1 papr hypercalls */ |
918 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | 1028 | ulong cmd = kvmppc_get_gpr(vcpu, 3); |
919 | int i; | 1029 | int i; |
@@ -945,7 +1055,7 @@ program_interrupt: | |||
945 | gprs[i] = kvmppc_get_gpr(vcpu, i); | 1055 | gprs[i] = kvmppc_get_gpr(vcpu, i); |
946 | vcpu->arch.osi_needed = 1; | 1056 | vcpu->arch.osi_needed = 1; |
947 | r = RESUME_HOST_NV; | 1057 | r = RESUME_HOST_NV; |
948 | } else if (!(vcpu->arch.shared->msr & MSR_PR) && | 1058 | } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && |
949 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | 1059 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
950 | /* KVM PV hypercalls */ | 1060 | /* KVM PV hypercalls */ |
951 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | 1061 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); |
@@ -986,14 +1096,26 @@ program_interrupt: | |||
986 | } | 1096 | } |
987 | case BOOK3S_INTERRUPT_ALIGNMENT: | 1097 | case BOOK3S_INTERRUPT_ALIGNMENT: |
988 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | 1098 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { |
989 | vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, | 1099 | u32 last_inst = kvmppc_get_last_inst(vcpu); |
990 | kvmppc_get_last_inst(vcpu)); | 1100 | u32 dsisr; |
991 | vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, | 1101 | u64 dar; |
992 | kvmppc_get_last_inst(vcpu)); | 1102 | |
1103 | dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); | ||
1104 | dar = kvmppc_alignment_dar(vcpu, last_inst); | ||
1105 | |||
1106 | kvmppc_set_dsisr(vcpu, dsisr); | ||
1107 | kvmppc_set_dar(vcpu, dar); | ||
1108 | |||
993 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 1109 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
994 | } | 1110 | } |
995 | r = RESUME_GUEST; | 1111 | r = RESUME_GUEST; |
996 | break; | 1112 | break; |
1113 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1114 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: | ||
1115 | kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); | ||
1116 | r = RESUME_GUEST; | ||
1117 | break; | ||
1118 | #endif | ||
997 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | 1119 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
998 | case BOOK3S_INTERRUPT_TRACE: | 1120 | case BOOK3S_INTERRUPT_TRACE: |
999 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 1121 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
@@ -1054,7 +1176,7 @@ static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, | |||
1054 | } | 1176 | } |
1055 | } else { | 1177 | } else { |
1056 | for (i = 0; i < 16; i++) | 1178 | for (i = 0; i < 16; i++) |
1057 | sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; | 1179 | sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); |
1058 | 1180 | ||
1059 | for (i = 0; i < 8; i++) { | 1181 | for (i = 0; i < 8; i++) { |
1060 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | 1182 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; |
@@ -1110,6 +1232,15 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
1110 | case KVM_REG_PPC_HIOR: | 1232 | case KVM_REG_PPC_HIOR: |
1111 | *val = get_reg_val(id, to_book3s(vcpu)->hior); | 1233 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
1112 | break; | 1234 | break; |
1235 | case KVM_REG_PPC_LPCR: | ||
1236 | /* | ||
1237 | * We are only interested in the LPCR_ILE bit | ||
1238 | */ | ||
1239 | if (vcpu->arch.intr_msr & MSR_LE) | ||
1240 | *val = get_reg_val(id, LPCR_ILE); | ||
1241 | else | ||
1242 | *val = get_reg_val(id, 0); | ||
1243 | break; | ||
1113 | default: | 1244 | default: |
1114 | r = -EINVAL; | 1245 | r = -EINVAL; |
1115 | break; | 1246 | break; |
@@ -1118,6 +1249,14 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
1118 | return r; | 1249 | return r; |
1119 | } | 1250 | } |
1120 | 1251 | ||
1252 | static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) | ||
1253 | { | ||
1254 | if (new_lpcr & LPCR_ILE) | ||
1255 | vcpu->arch.intr_msr |= MSR_LE; | ||
1256 | else | ||
1257 | vcpu->arch.intr_msr &= ~MSR_LE; | ||
1258 | } | ||
1259 | |||
1121 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | 1260 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1122 | union kvmppc_one_reg *val) | 1261 | union kvmppc_one_reg *val) |
1123 | { | 1262 | { |
@@ -1128,6 +1267,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
1128 | to_book3s(vcpu)->hior = set_reg_val(id, *val); | 1267 | to_book3s(vcpu)->hior = set_reg_val(id, *val); |
1129 | to_book3s(vcpu)->hior_explicit = true; | 1268 | to_book3s(vcpu)->hior_explicit = true; |
1130 | break; | 1269 | break; |
1270 | case KVM_REG_PPC_LPCR: | ||
1271 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); | ||
1272 | break; | ||
1131 | default: | 1273 | default: |
1132 | r = -EINVAL; | 1274 | r = -EINVAL; |
1133 | break; | 1275 | break; |
@@ -1170,8 +1312,14 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, | |||
1170 | goto uninit_vcpu; | 1312 | goto uninit_vcpu; |
1171 | /* the real shared page fills the last 4k of our page */ | 1313 | /* the real shared page fills the last 4k of our page */ |
1172 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); | 1314 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); |
1173 | |||
1174 | #ifdef CONFIG_PPC_BOOK3S_64 | 1315 | #ifdef CONFIG_PPC_BOOK3S_64 |
1316 | /* Always start the shared struct in native endian mode */ | ||
1317 | #ifdef __BIG_ENDIAN__ | ||
1318 | vcpu->arch.shared_big_endian = true; | ||
1319 | #else | ||
1320 | vcpu->arch.shared_big_endian = false; | ||
1321 | #endif | ||
1322 | |||
1175 | /* | 1323 | /* |
1176 | * Default to the same as the host if we're on sufficiently | 1324 | * Default to the same as the host if we're on sufficiently |
1177 | * recent machine that we have 1TB segments; | 1325 | * recent machine that we have 1TB segments; |
@@ -1180,6 +1328,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, | |||
1180 | vcpu->arch.pvr = 0x3C0301; | 1328 | vcpu->arch.pvr = 0x3C0301; |
1181 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | 1329 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1182 | vcpu->arch.pvr = mfspr(SPRN_PVR); | 1330 | vcpu->arch.pvr = mfspr(SPRN_PVR); |
1331 | vcpu->arch.intr_msr = MSR_SF; | ||
1183 | #else | 1332 | #else |
1184 | /* default to book3s_32 (750) */ | 1333 | /* default to book3s_32 (750) */ |
1185 | vcpu->arch.pvr = 0x84202; | 1334 | vcpu->arch.pvr = 0x84202; |
@@ -1187,7 +1336,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, | |||
1187 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); | 1336 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); |
1188 | vcpu->arch.slb_nr = 64; | 1337 | vcpu->arch.slb_nr = 64; |
1189 | 1338 | ||
1190 | vcpu->arch.shadow_msr = MSR_USER64; | 1339 | vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; |
1191 | 1340 | ||
1192 | err = kvmppc_mmu_init(vcpu); | 1341 | err = kvmppc_mmu_init(vcpu); |
1193 | if (err < 0) | 1342 | if (err < 0) |
@@ -1264,7 +1413,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1264 | #endif | 1413 | #endif |
1265 | 1414 | ||
1266 | /* Preload FPU if it's enabled */ | 1415 | /* Preload FPU if it's enabled */ |
1267 | if (vcpu->arch.shared->msr & MSR_FP) | 1416 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
1268 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 1417 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1269 | 1418 | ||
1270 | kvmppc_fix_ee_before_entry(); | 1419 | kvmppc_fix_ee_before_entry(); |
@@ -1277,6 +1426,9 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1277 | /* Make sure we save the guest FPU/Altivec/VSX state */ | 1426 | /* Make sure we save the guest FPU/Altivec/VSX state */ |
1278 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); | 1427 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
1279 | 1428 | ||
1429 | /* Make sure we save the guest TAR/EBB/DSCR state */ | ||
1430 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | ||
1431 | |||
1280 | out: | 1432 | out: |
1281 | vcpu->mode = OUTSIDE_GUEST_MODE; | 1433 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1282 | return ret; | 1434 | return ret; |
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index 5efa97b993d8..52a63bfe3f07 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
@@ -57,7 +57,7 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) | |||
57 | for (i = 0; ; ++i) { | 57 | for (i = 0; ; ++i) { |
58 | if (i == 8) | 58 | if (i == 8) |
59 | goto done; | 59 | goto done; |
60 | if ((*hpte & HPTE_V_VALID) == 0) | 60 | if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0) |
61 | break; | 61 | break; |
62 | hpte += 2; | 62 | hpte += 2; |
63 | } | 63 | } |
@@ -67,8 +67,8 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) | |||
67 | goto done; | 67 | goto done; |
68 | } | 68 | } |
69 | 69 | ||
70 | hpte[0] = kvmppc_get_gpr(vcpu, 6); | 70 | hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); |
71 | hpte[1] = kvmppc_get_gpr(vcpu, 7); | 71 | hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); |
72 | pteg_addr += i * HPTE_SIZE; | 72 | pteg_addr += i * HPTE_SIZE; |
73 | copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); | 73 | copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); |
74 | kvmppc_set_gpr(vcpu, 4, pte_index | i); | 74 | kvmppc_set_gpr(vcpu, 4, pte_index | i); |
@@ -93,6 +93,8 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
93 | pteg = get_pteg_addr(vcpu, pte_index); | 93 | pteg = get_pteg_addr(vcpu, pte_index); |
94 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 94 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
95 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 95 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
96 | pte[0] = be64_to_cpu(pte[0]); | ||
97 | pte[1] = be64_to_cpu(pte[1]); | ||
96 | 98 | ||
97 | ret = H_NOT_FOUND; | 99 | ret = H_NOT_FOUND; |
98 | if ((pte[0] & HPTE_V_VALID) == 0 || | 100 | if ((pte[0] & HPTE_V_VALID) == 0 || |
@@ -169,6 +171,8 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | |||
169 | 171 | ||
170 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); | 172 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); |
171 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 173 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
174 | pte[0] = be64_to_cpu(pte[0]); | ||
175 | pte[1] = be64_to_cpu(pte[1]); | ||
172 | 176 | ||
173 | /* tsl = AVPN */ | 177 | /* tsl = AVPN */ |
174 | flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; | 178 | flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; |
@@ -207,6 +211,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
207 | pteg = get_pteg_addr(vcpu, pte_index); | 211 | pteg = get_pteg_addr(vcpu, pte_index); |
208 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 212 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
209 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 213 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
214 | pte[0] = be64_to_cpu(pte[0]); | ||
215 | pte[1] = be64_to_cpu(pte[1]); | ||
210 | 216 | ||
211 | ret = H_NOT_FOUND; | 217 | ret = H_NOT_FOUND; |
212 | if ((pte[0] & HPTE_V_VALID) == 0 || | 218 | if ((pte[0] & HPTE_V_VALID) == 0 || |
@@ -225,6 +231,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
225 | 231 | ||
226 | rb = compute_tlbie_rb(v, r, pte_index); | 232 | rb = compute_tlbie_rb(v, r, pte_index); |
227 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | 233 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
234 | pte[0] = cpu_to_be64(pte[0]); | ||
235 | pte[1] = cpu_to_be64(pte[1]); | ||
228 | copy_to_user((void __user *)pteg, pte, sizeof(pte)); | 236 | copy_to_user((void __user *)pteg, pte, sizeof(pte)); |
229 | ret = H_SUCCESS; | 237 | ret = H_SUCCESS; |
230 | 238 | ||
@@ -270,7 +278,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | |||
270 | case H_PUT_TCE: | 278 | case H_PUT_TCE: |
271 | return kvmppc_h_pr_put_tce(vcpu); | 279 | return kvmppc_h_pr_put_tce(vcpu); |
272 | case H_CEDE: | 280 | case H_CEDE: |
273 | vcpu->arch.shared->msr |= MSR_EE; | 281 | kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); |
274 | kvm_vcpu_block(vcpu); | 282 | kvm_vcpu_block(vcpu); |
275 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 283 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
276 | vcpu->stat.halt_wakeup++; | 284 | vcpu->stat.halt_wakeup++; |
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 7a053157483b..edb14ba992b3 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c | |||
@@ -205,6 +205,32 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) | |||
205 | return rc; | 205 | return rc; |
206 | } | 206 | } |
207 | 207 | ||
208 | static void kvmppc_rtas_swap_endian_in(struct rtas_args *args) | ||
209 | { | ||
210 | #ifdef __LITTLE_ENDIAN__ | ||
211 | int i; | ||
212 | |||
213 | args->token = be32_to_cpu(args->token); | ||
214 | args->nargs = be32_to_cpu(args->nargs); | ||
215 | args->nret = be32_to_cpu(args->nret); | ||
216 | for (i = 0; i < args->nargs; i++) | ||
217 | args->args[i] = be32_to_cpu(args->args[i]); | ||
218 | #endif | ||
219 | } | ||
220 | |||
221 | static void kvmppc_rtas_swap_endian_out(struct rtas_args *args) | ||
222 | { | ||
223 | #ifdef __LITTLE_ENDIAN__ | ||
224 | int i; | ||
225 | |||
226 | for (i = 0; i < args->nret; i++) | ||
227 | args->args[i] = cpu_to_be32(args->args[i]); | ||
228 | args->token = cpu_to_be32(args->token); | ||
229 | args->nargs = cpu_to_be32(args->nargs); | ||
230 | args->nret = cpu_to_be32(args->nret); | ||
231 | #endif | ||
232 | } | ||
233 | |||
208 | int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) | 234 | int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) |
209 | { | 235 | { |
210 | struct rtas_token_definition *d; | 236 | struct rtas_token_definition *d; |
@@ -223,6 +249,8 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) | |||
223 | if (rc) | 249 | if (rc) |
224 | goto fail; | 250 | goto fail; |
225 | 251 | ||
252 | kvmppc_rtas_swap_endian_in(&args); | ||
253 | |||
226 | /* | 254 | /* |
227 | * args->rets is a pointer into args->args. Now that we've | 255 | * args->rets is a pointer into args->args. Now that we've |
228 | * copied args we need to fix it up to point into our copy, | 256 | * copied args we need to fix it up to point into our copy, |
@@ -247,6 +275,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) | |||
247 | 275 | ||
248 | if (rc == 0) { | 276 | if (rc == 0) { |
249 | args.rets = orig_rets; | 277 | args.rets = orig_rets; |
278 | kvmppc_rtas_swap_endian_out(&args); | ||
250 | rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); | 279 | rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); |
251 | if (rc) | 280 | if (rc) |
252 | goto fail; | 281 | goto fail; |
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 1e0cc2adfd40..acee37cde840 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -90,6 +90,15 @@ kvmppc_handler_trampoline_enter: | |||
90 | LOAD_GUEST_SEGMENTS | 90 | LOAD_GUEST_SEGMENTS |
91 | 91 | ||
92 | #ifdef CONFIG_PPC_BOOK3S_64 | 92 | #ifdef CONFIG_PPC_BOOK3S_64 |
93 | BEGIN_FTR_SECTION | ||
94 | /* Save host FSCR */ | ||
95 | mfspr r8, SPRN_FSCR | ||
96 | std r8, HSTATE_HOST_FSCR(r13) | ||
97 | /* Set FSCR during guest execution */ | ||
98 | ld r9, SVCPU_SHADOW_FSCR(r13) | ||
99 | mtspr SPRN_FSCR, r9 | ||
100 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
101 | |||
93 | /* Some guests may need to have dcbz set to 32 byte length. | 102 | /* Some guests may need to have dcbz set to 32 byte length. |
94 | * | 103 | * |
95 | * Usually we ensure that by patching the guest's instructions | 104 | * Usually we ensure that by patching the guest's instructions |
@@ -255,6 +264,10 @@ BEGIN_FTR_SECTION | |||
255 | cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST | 264 | cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST |
256 | beq- ld_last_inst | 265 | beq- ld_last_inst |
257 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | 266 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
267 | BEGIN_FTR_SECTION | ||
268 | cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL | ||
269 | beq- ld_last_inst | ||
270 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
258 | #endif | 271 | #endif |
259 | 272 | ||
260 | b no_ld_last_inst | 273 | b no_ld_last_inst |
@@ -311,6 +324,18 @@ no_ld_last_inst: | |||
311 | 324 | ||
312 | no_dcbz32_off: | 325 | no_dcbz32_off: |
313 | 326 | ||
327 | BEGIN_FTR_SECTION | ||
328 | /* Save guest FSCR on a FAC_UNAVAIL interrupt */ | ||
329 | cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL | ||
330 | bne+ no_fscr_save | ||
331 | mfspr r7, SPRN_FSCR | ||
332 | std r7, SVCPU_SHADOW_FSCR(r13) | ||
333 | no_fscr_save: | ||
334 | /* Restore host FSCR */ | ||
335 | ld r8, HSTATE_HOST_FSCR(r13) | ||
336 | mtspr SPRN_FSCR, r8 | ||
337 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
338 | |||
314 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 339 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
315 | 340 | ||
316 | /* | 341 | /* |
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 89b7f821f6c4..002d51764143 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include "booke.h" | 19 | #include "booke.h" |
20 | #include "e500.h" | 20 | #include "e500.h" |
21 | 21 | ||
22 | #define XOP_DCBTLS 166 | ||
22 | #define XOP_MSGSND 206 | 23 | #define XOP_MSGSND 206 |
23 | #define XOP_MSGCLR 238 | 24 | #define XOP_MSGCLR 238 |
24 | #define XOP_TLBIVAX 786 | 25 | #define XOP_TLBIVAX 786 |
@@ -103,6 +104,15 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
103 | return emulated; | 104 | return emulated; |
104 | } | 105 | } |
105 | 106 | ||
107 | static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu) | ||
108 | { | ||
109 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
110 | |||
111 | /* Always fail to lock the cache */ | ||
112 | vcpu_e500->l1csr0 |= L1CSR0_CUL; | ||
113 | return EMULATE_DONE; | ||
114 | } | ||
115 | |||
106 | int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, | 116 | int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, |
107 | unsigned int inst, int *advance) | 117 | unsigned int inst, int *advance) |
108 | { | 118 | { |
@@ -116,6 +126,10 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
116 | case 31: | 126 | case 31: |
117 | switch (get_xop(inst)) { | 127 | switch (get_xop(inst)) { |
118 | 128 | ||
129 | case XOP_DCBTLS: | ||
130 | emulated = kvmppc_e500_emul_dcbtls(vcpu); | ||
131 | break; | ||
132 | |||
119 | #ifdef CONFIG_KVM_E500MC | 133 | #ifdef CONFIG_KVM_E500MC |
120 | case XOP_MSGSND: | 134 | case XOP_MSGSND: |
121 | emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); | 135 | emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); |
@@ -222,6 +236,7 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va | |||
222 | break; | 236 | break; |
223 | case SPRN_L1CSR1: | 237 | case SPRN_L1CSR1: |
224 | vcpu_e500->l1csr1 = spr_val; | 238 | vcpu_e500->l1csr1 = spr_val; |
239 | vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR); | ||
225 | break; | 240 | break; |
226 | case SPRN_HID0: | 241 | case SPRN_HID0: |
227 | vcpu_e500->hid0 = spr_val; | 242 | vcpu_e500->hid0 = spr_val; |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index c2b887be2c29..da86d9ba3476 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -97,10 +97,10 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
97 | 97 | ||
98 | switch (sprn) { | 98 | switch (sprn) { |
99 | case SPRN_SRR0: | 99 | case SPRN_SRR0: |
100 | vcpu->arch.shared->srr0 = spr_val; | 100 | kvmppc_set_srr0(vcpu, spr_val); |
101 | break; | 101 | break; |
102 | case SPRN_SRR1: | 102 | case SPRN_SRR1: |
103 | vcpu->arch.shared->srr1 = spr_val; | 103 | kvmppc_set_srr1(vcpu, spr_val); |
104 | break; | 104 | break; |
105 | 105 | ||
106 | /* XXX We need to context-switch the timebase for | 106 | /* XXX We need to context-switch the timebase for |
@@ -114,16 +114,16 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
114 | break; | 114 | break; |
115 | 115 | ||
116 | case SPRN_SPRG0: | 116 | case SPRN_SPRG0: |
117 | vcpu->arch.shared->sprg0 = spr_val; | 117 | kvmppc_set_sprg0(vcpu, spr_val); |
118 | break; | 118 | break; |
119 | case SPRN_SPRG1: | 119 | case SPRN_SPRG1: |
120 | vcpu->arch.shared->sprg1 = spr_val; | 120 | kvmppc_set_sprg1(vcpu, spr_val); |
121 | break; | 121 | break; |
122 | case SPRN_SPRG2: | 122 | case SPRN_SPRG2: |
123 | vcpu->arch.shared->sprg2 = spr_val; | 123 | kvmppc_set_sprg2(vcpu, spr_val); |
124 | break; | 124 | break; |
125 | case SPRN_SPRG3: | 125 | case SPRN_SPRG3: |
126 | vcpu->arch.shared->sprg3 = spr_val; | 126 | kvmppc_set_sprg3(vcpu, spr_val); |
127 | break; | 127 | break; |
128 | 128 | ||
129 | /* PIR can legally be written, but we ignore it */ | 129 | /* PIR can legally be written, but we ignore it */ |
@@ -150,10 +150,10 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
150 | 150 | ||
151 | switch (sprn) { | 151 | switch (sprn) { |
152 | case SPRN_SRR0: | 152 | case SPRN_SRR0: |
153 | spr_val = vcpu->arch.shared->srr0; | 153 | spr_val = kvmppc_get_srr0(vcpu); |
154 | break; | 154 | break; |
155 | case SPRN_SRR1: | 155 | case SPRN_SRR1: |
156 | spr_val = vcpu->arch.shared->srr1; | 156 | spr_val = kvmppc_get_srr1(vcpu); |
157 | break; | 157 | break; |
158 | case SPRN_PVR: | 158 | case SPRN_PVR: |
159 | spr_val = vcpu->arch.pvr; | 159 | spr_val = vcpu->arch.pvr; |
@@ -173,16 +173,16 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
173 | break; | 173 | break; |
174 | 174 | ||
175 | case SPRN_SPRG0: | 175 | case SPRN_SPRG0: |
176 | spr_val = vcpu->arch.shared->sprg0; | 176 | spr_val = kvmppc_get_sprg0(vcpu); |
177 | break; | 177 | break; |
178 | case SPRN_SPRG1: | 178 | case SPRN_SPRG1: |
179 | spr_val = vcpu->arch.shared->sprg1; | 179 | spr_val = kvmppc_get_sprg1(vcpu); |
180 | break; | 180 | break; |
181 | case SPRN_SPRG2: | 181 | case SPRN_SPRG2: |
182 | spr_val = vcpu->arch.shared->sprg2; | 182 | spr_val = kvmppc_get_sprg2(vcpu); |
183 | break; | 183 | break; |
184 | case SPRN_SPRG3: | 184 | case SPRN_SPRG3: |
185 | spr_val = vcpu->arch.shared->sprg3; | 185 | spr_val = kvmppc_get_sprg3(vcpu); |
186 | break; | 186 | break; |
187 | /* Note: SPRG4-7 are user-readable, so we don't get | 187 | /* Note: SPRG4-7 are user-readable, so we don't get |
188 | * a trap. */ | 188 | * a trap. */ |
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c index efbd9962a209..b68d0dc9479a 100644 --- a/arch/powerpc/kvm/mpic.c +++ b/arch/powerpc/kvm/mpic.c | |||
@@ -126,6 +126,8 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr, | |||
126 | u32 val, int idx); | 126 | u32 val, int idx); |
127 | static int openpic_cpu_read_internal(void *opaque, gpa_t addr, | 127 | static int openpic_cpu_read_internal(void *opaque, gpa_t addr, |
128 | u32 *ptr, int idx); | 128 | u32 *ptr, int idx); |
129 | static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ, | ||
130 | uint32_t val); | ||
129 | 131 | ||
130 | enum irq_type { | 132 | enum irq_type { |
131 | IRQ_TYPE_NORMAL = 0, | 133 | IRQ_TYPE_NORMAL = 0, |
@@ -528,7 +530,6 @@ static void openpic_reset(struct openpic *opp) | |||
528 | /* Initialise IRQ sources */ | 530 | /* Initialise IRQ sources */ |
529 | for (i = 0; i < opp->max_irq; i++) { | 531 | for (i = 0; i < opp->max_irq; i++) { |
530 | opp->src[i].ivpr = opp->ivpr_reset; | 532 | opp->src[i].ivpr = opp->ivpr_reset; |
531 | opp->src[i].idr = opp->idr_reset; | ||
532 | 533 | ||
533 | switch (opp->src[i].type) { | 534 | switch (opp->src[i].type) { |
534 | case IRQ_TYPE_NORMAL: | 535 | case IRQ_TYPE_NORMAL: |
@@ -543,6 +544,8 @@ static void openpic_reset(struct openpic *opp) | |||
543 | case IRQ_TYPE_FSLSPECIAL: | 544 | case IRQ_TYPE_FSLSPECIAL: |
544 | break; | 545 | break; |
545 | } | 546 | } |
547 | |||
548 | write_IRQreg_idr(opp, i, opp->idr_reset); | ||
546 | } | 549 | } |
547 | /* Initialise IRQ destinations */ | 550 | /* Initialise IRQ destinations */ |
548 | for (i = 0; i < MAX_CPU; i++) { | 551 | for (i = 0; i < MAX_CPU; i++) { |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3cf541a53e2a..bab20f410443 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -125,6 +125,27 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
125 | } | 125 | } |
126 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); | 126 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
127 | 127 | ||
128 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) | ||
129 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) | ||
130 | { | ||
131 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | ||
132 | int i; | ||
133 | |||
134 | shared->sprg0 = swab64(shared->sprg0); | ||
135 | shared->sprg1 = swab64(shared->sprg1); | ||
136 | shared->sprg2 = swab64(shared->sprg2); | ||
137 | shared->sprg3 = swab64(shared->sprg3); | ||
138 | shared->srr0 = swab64(shared->srr0); | ||
139 | shared->srr1 = swab64(shared->srr1); | ||
140 | shared->dar = swab64(shared->dar); | ||
141 | shared->msr = swab64(shared->msr); | ||
142 | shared->dsisr = swab32(shared->dsisr); | ||
143 | shared->int_pending = swab32(shared->int_pending); | ||
144 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) | ||
145 | shared->sr[i] = swab32(shared->sr[i]); | ||
146 | } | ||
147 | #endif | ||
148 | |||
128 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | 149 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
129 | { | 150 | { |
130 | int nr = kvmppc_get_gpr(vcpu, 11); | 151 | int nr = kvmppc_get_gpr(vcpu, 11); |
@@ -135,7 +156,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
135 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | 156 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); |
136 | unsigned long r2 = 0; | 157 | unsigned long r2 = 0; |
137 | 158 | ||
138 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | 159 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
139 | /* 32 bit mode */ | 160 | /* 32 bit mode */ |
140 | param1 &= 0xffffffff; | 161 | param1 &= 0xffffffff; |
141 | param2 &= 0xffffffff; | 162 | param2 &= 0xffffffff; |
@@ -146,8 +167,28 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
146 | switch (nr) { | 167 | switch (nr) { |
147 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): | 168 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
148 | { | 169 | { |
149 | vcpu->arch.magic_page_pa = param1; | 170 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
150 | vcpu->arch.magic_page_ea = param2; | 171 | /* Book3S can be little endian, find it out here */ |
172 | int shared_big_endian = true; | ||
173 | if (vcpu->arch.intr_msr & MSR_LE) | ||
174 | shared_big_endian = false; | ||
175 | if (shared_big_endian != vcpu->arch.shared_big_endian) | ||
176 | kvmppc_swab_shared(vcpu); | ||
177 | vcpu->arch.shared_big_endian = shared_big_endian; | ||
178 | #endif | ||
179 | |||
180 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { | ||
181 | /* | ||
182 | * Older versions of the Linux magic page code had | ||
183 | * a bug where they would map their trampoline code | ||
184 | * NX. If that's the case, remove !PR NX capability. | ||
185 | */ | ||
186 | vcpu->arch.disable_kernel_nx = true; | ||
187 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | ||
188 | } | ||
189 | |||
190 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; | ||
191 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; | ||
151 | 192 | ||
152 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; | 193 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
153 | 194 | ||
@@ -375,6 +416,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
375 | case KVM_CAP_SPAPR_TCE: | 416 | case KVM_CAP_SPAPR_TCE: |
376 | case KVM_CAP_PPC_ALLOC_HTAB: | 417 | case KVM_CAP_PPC_ALLOC_HTAB: |
377 | case KVM_CAP_PPC_RTAS: | 418 | case KVM_CAP_PPC_RTAS: |
419 | case KVM_CAP_PPC_FIXUP_HCALL: | ||
378 | #ifdef CONFIG_KVM_XICS | 420 | #ifdef CONFIG_KVM_XICS |
379 | case KVM_CAP_IRQ_XICS: | 421 | case KVM_CAP_IRQ_XICS: |
380 | #endif | 422 | #endif |
@@ -1015,10 +1057,10 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) | |||
1015 | u32 inst_nop = 0x60000000; | 1057 | u32 inst_nop = 0x60000000; |
1016 | #ifdef CONFIG_KVM_BOOKE_HV | 1058 | #ifdef CONFIG_KVM_BOOKE_HV |
1017 | u32 inst_sc1 = 0x44000022; | 1059 | u32 inst_sc1 = 0x44000022; |
1018 | pvinfo->hcall[0] = inst_sc1; | 1060 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
1019 | pvinfo->hcall[1] = inst_nop; | 1061 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); |
1020 | pvinfo->hcall[2] = inst_nop; | 1062 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); |
1021 | pvinfo->hcall[3] = inst_nop; | 1063 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
1022 | #else | 1064 | #else |
1023 | u32 inst_lis = 0x3c000000; | 1065 | u32 inst_lis = 0x3c000000; |
1024 | u32 inst_ori = 0x60000000; | 1066 | u32 inst_ori = 0x60000000; |
@@ -1034,10 +1076,10 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) | |||
1034 | * sc | 1076 | * sc |
1035 | * nop | 1077 | * nop |
1036 | */ | 1078 | */ |
1037 | pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); | 1079 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
1038 | pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); | 1080 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); |
1039 | pvinfo->hcall[2] = inst_sc; | 1081 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); |
1040 | pvinfo->hcall[3] = inst_nop; | 1082 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
1041 | #endif | 1083 | #endif |
1042 | 1084 | ||
1043 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; | 1085 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h index 8b22e4748344..e1357cd8dc1f 100644 --- a/arch/powerpc/kvm/trace_pr.h +++ b/arch/powerpc/kvm/trace_pr.h | |||
@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_exit, | |||
255 | __entry->exit_nr = exit_nr; | 255 | __entry->exit_nr = exit_nr; |
256 | __entry->pc = kvmppc_get_pc(vcpu); | 256 | __entry->pc = kvmppc_get_pc(vcpu); |
257 | __entry->dar = kvmppc_get_fault_dar(vcpu); | 257 | __entry->dar = kvmppc_get_fault_dar(vcpu); |
258 | __entry->msr = vcpu->arch.shared->msr; | 258 | __entry->msr = kvmppc_get_msr(vcpu); |
259 | __entry->srr1 = vcpu->arch.shadow_srr1; | 259 | __entry->srr1 = vcpu->arch.shadow_srr1; |
260 | __entry->last_inst = vcpu->arch.last_inst; | 260 | __entry->last_inst = vcpu->arch.last_inst; |
261 | ), | 261 | ), |
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 9d1d33cd2be5..964a5f61488a 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -97,7 +97,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, | |||
97 | static void __slb_flush_and_rebolt(void) | 97 | static void __slb_flush_and_rebolt(void) |
98 | { | 98 | { |
99 | /* If you change this make sure you change SLB_NUM_BOLTED | 99 | /* If you change this make sure you change SLB_NUM_BOLTED |
100 | * appropriately too. */ | 100 | * and PR KVM appropriately too. */ |
101 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; | 101 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
102 | unsigned long ksp_esid_data, ksp_vsid_data; | 102 | unsigned long ksp_esid_data, ksp_vsid_data; |
103 | 103 | ||