diff options
28 files changed, 2103 insertions, 81 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index b2511360b8f5..e8875fef3eb8 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -1532,6 +1532,23 @@ Userspace can now handle the hypercall and when it's done modify the gprs as | |||
1532 | necessary. Upon guest entry all guest GPRs will then be replaced by the values | 1532 | necessary. Upon guest entry all guest GPRs will then be replaced by the values |
1533 | in this struct. | 1533 | in this struct. |
1534 | 1534 | ||
1535 | /* KVM_EXIT_PAPR_HCALL */ | ||
1536 | struct { | ||
1537 | __u64 nr; | ||
1538 | __u64 ret; | ||
1539 | __u64 args[9]; | ||
1540 | } papr_hcall; | ||
1541 | |||
1542 | This is used on 64-bit PowerPC when emulating a pSeries partition, | ||
1543 | e.g. with the 'pseries' machine type in qemu. It occurs when the | ||
1544 | guest does a hypercall using the 'sc 1' instruction. The 'nr' field | ||
1545 | contains the hypercall number (from the guest R3), and 'args' contains | ||
1546 | the arguments (from the guest R4 - R12). Userspace should put the | ||
1547 | return code in 'ret' and any extra returned values in args[]. | ||
1548 | The possible hypercalls are defined in the Power Architecture Platform | ||
1549 | Requirements (PAPR) document available from www.power.org (free | ||
1550 | developer registration required to access it). | ||
1551 | |||
1535 | /* Fix the size of the union. */ | 1552 | /* Fix the size of the union. */ |
1536 | char padding[256]; | 1553 | char padding[256]; |
1537 | }; | 1554 | }; |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 296c9b66c04a..69435da8f2ba 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -134,6 +134,17 @@ do_kvm_##n: \ | |||
134 | #define KVM_HANDLER_SKIP(area, h, n) | 134 | #define KVM_HANDLER_SKIP(area, h, n) |
135 | #endif | 135 | #endif |
136 | 136 | ||
137 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
138 | #define KVMTEST_PR(n) __KVMTEST(n) | ||
139 | #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) | ||
140 | #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) | ||
141 | |||
142 | #else | ||
143 | #define KVMTEST_PR(n) | ||
144 | #define KVM_HANDLER_PR(area, h, n) | ||
145 | #define KVM_HANDLER_PR_SKIP(area, h, n) | ||
146 | #endif | ||
147 | |||
137 | #define NOTEST(n) | 148 | #define NOTEST(n) |
138 | 149 | ||
139 | /* | 150 | /* |
@@ -210,7 +221,7 @@ label##_pSeries: \ | |||
210 | HMT_MEDIUM; \ | 221 | HMT_MEDIUM; \ |
211 | SET_SCRATCH0(r13); /* save r13 */ \ | 222 | SET_SCRATCH0(r13); /* save r13 */ \ |
212 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ | 223 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ |
213 | EXC_STD, KVMTEST, vec) | 224 | EXC_STD, KVMTEST_PR, vec) |
214 | 225 | ||
215 | #define STD_EXCEPTION_HV(loc, vec, label) \ | 226 | #define STD_EXCEPTION_HV(loc, vec, label) \ |
216 | . = loc; \ | 227 | . = loc; \ |
@@ -227,8 +238,8 @@ label##_hv: \ | |||
227 | beq masked_##h##interrupt | 238 | beq masked_##h##interrupt |
228 | #define _SOFTEN_TEST(h) __SOFTEN_TEST(h) | 239 | #define _SOFTEN_TEST(h) __SOFTEN_TEST(h) |
229 | 240 | ||
230 | #define SOFTEN_TEST(vec) \ | 241 | #define SOFTEN_TEST_PR(vec) \ |
231 | KVMTEST(vec); \ | 242 | KVMTEST_PR(vec); \ |
232 | _SOFTEN_TEST(EXC_STD) | 243 | _SOFTEN_TEST(EXC_STD) |
233 | 244 | ||
234 | #define SOFTEN_TEST_HV(vec) \ | 245 | #define SOFTEN_TEST_HV(vec) \ |
@@ -248,7 +259,7 @@ label##_hv: \ | |||
248 | .globl label##_pSeries; \ | 259 | .globl label##_pSeries; \ |
249 | label##_pSeries: \ | 260 | label##_pSeries: \ |
250 | _MASKABLE_EXCEPTION_PSERIES(vec, label, \ | 261 | _MASKABLE_EXCEPTION_PSERIES(vec, label, \ |
251 | EXC_STD, SOFTEN_TEST) | 262 | EXC_STD, SOFTEN_TEST_PR) |
252 | 263 | ||
253 | #define MASKABLE_EXCEPTION_HV(loc, vec, label) \ | 264 | #define MASKABLE_EXCEPTION_HV(loc, vec, label) \ |
254 | . = loc; \ | 265 | . = loc; \ |
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 0951b17f4eb5..7b1f0e0fc653 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -64,8 +64,12 @@ | |||
64 | #define BOOK3S_INTERRUPT_PROGRAM 0x700 | 64 | #define BOOK3S_INTERRUPT_PROGRAM 0x700 |
65 | #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 | 65 | #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 |
66 | #define BOOK3S_INTERRUPT_DECREMENTER 0x900 | 66 | #define BOOK3S_INTERRUPT_DECREMENTER 0x900 |
67 | #define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980 | ||
67 | #define BOOK3S_INTERRUPT_SYSCALL 0xc00 | 68 | #define BOOK3S_INTERRUPT_SYSCALL 0xc00 |
68 | #define BOOK3S_INTERRUPT_TRACE 0xd00 | 69 | #define BOOK3S_INTERRUPT_TRACE 0xd00 |
70 | #define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00 | ||
71 | #define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20 | ||
72 | #define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40 | ||
69 | #define BOOK3S_INTERRUPT_PERFMON 0xf00 | 73 | #define BOOK3S_INTERRUPT_PERFMON 0xf00 |
70 | #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 | 74 | #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 |
71 | #define BOOK3S_INTERRUPT_VSX 0xf40 | 75 | #define BOOK3S_INTERRUPT_VSX 0xf40 |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 480fff6090db..5537c45d626c 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -116,6 +116,7 @@ extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); | |||
116 | extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); | 116 | extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); |
117 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); | 117 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); |
118 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); | 118 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); |
119 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); | ||
119 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | 120 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); |
120 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); | 121 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); |
121 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); | 122 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); |
@@ -127,10 +128,12 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); | |||
127 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | 128 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); |
128 | extern int kvmppc_mmu_hpte_sysinit(void); | 129 | extern int kvmppc_mmu_hpte_sysinit(void); |
129 | extern void kvmppc_mmu_hpte_sysexit(void); | 130 | extern void kvmppc_mmu_hpte_sysexit(void); |
131 | extern int kvmppc_mmu_hv_init(void); | ||
130 | 132 | ||
131 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); | 133 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
132 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); | 134 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
133 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); | 135 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); |
136 | extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); | ||
134 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | 137 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, |
135 | bool upper, u32 val); | 138 | bool upper, u32 val); |
136 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 139 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
@@ -140,6 +143,7 @@ extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | |||
140 | extern void kvmppc_handler_lowmem_trampoline(void); | 143 | extern void kvmppc_handler_lowmem_trampoline(void); |
141 | extern void kvmppc_handler_trampoline_enter(void); | 144 | extern void kvmppc_handler_trampoline_enter(void); |
142 | extern void kvmppc_rmcall(ulong srr0, ulong srr1); | 145 | extern void kvmppc_rmcall(ulong srr0, ulong srr1); |
146 | extern void kvmppc_hv_entry_trampoline(void); | ||
143 | extern void kvmppc_load_up_fpu(void); | 147 | extern void kvmppc_load_up_fpu(void); |
144 | extern void kvmppc_load_up_altivec(void); | 148 | extern void kvmppc_load_up_altivec(void); |
145 | extern void kvmppc_load_up_vsx(void); | 149 | extern void kvmppc_load_up_vsx(void); |
@@ -151,6 +155,19 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | |||
151 | return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); | 155 | return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); |
152 | } | 156 | } |
153 | 157 | ||
158 | extern void kvm_return_point(void); | ||
159 | |||
160 | /* Also add subarch specific defines */ | ||
161 | |||
162 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER | ||
163 | #include <asm/kvm_book3s_32.h> | ||
164 | #endif | ||
165 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
166 | #include <asm/kvm_book3s_64.h> | ||
167 | #endif | ||
168 | |||
169 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
170 | |||
154 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | 171 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) |
155 | { | 172 | { |
156 | return to_book3s(vcpu)->hior; | 173 | return to_book3s(vcpu)->hior; |
@@ -165,16 +182,6 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | |||
165 | vcpu->arch.shared->int_pending = 0; | 182 | vcpu->arch.shared->int_pending = 0; |
166 | } | 183 | } |
167 | 184 | ||
168 | static inline ulong dsisr(void) | ||
169 | { | ||
170 | ulong r; | ||
171 | asm ( "mfdsisr %0 " : "=r" (r) ); | ||
172 | return r; | ||
173 | } | ||
174 | |||
175 | extern void kvm_return_point(void); | ||
176 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu); | ||
177 | |||
178 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 185 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
179 | { | 186 | { |
180 | if ( num < 14 ) { | 187 | if ( num < 14 ) { |
@@ -281,6 +288,108 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | |||
281 | 288 | ||
282 | return crit; | 289 | return crit; |
283 | } | 290 | } |
291 | #else /* CONFIG_KVM_BOOK3S_PR */ | ||
292 | |||
293 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | ||
294 | { | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | ||
299 | unsigned long pending_now, unsigned long old_pending) | ||
300 | { | ||
301 | /* Recalculate LPCR:MER based on the presence of | ||
302 | * a pending external interrupt | ||
303 | */ | ||
304 | if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &pending_now) || | ||
305 | test_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &pending_now)) | ||
306 | vcpu->arch.lpcr |= LPCR_MER; | ||
307 | else | ||
308 | vcpu->arch.lpcr &= ~((u64)LPCR_MER); | ||
309 | } | ||
310 | |||
311 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | ||
312 | { | ||
313 | vcpu->arch.gpr[num] = val; | ||
314 | } | ||
315 | |||
316 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | ||
317 | { | ||
318 | return vcpu->arch.gpr[num]; | ||
319 | } | ||
320 | |||
321 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | ||
322 | { | ||
323 | vcpu->arch.cr = val; | ||
324 | } | ||
325 | |||
326 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | ||
327 | { | ||
328 | return vcpu->arch.cr; | ||
329 | } | ||
330 | |||
331 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | ||
332 | { | ||
333 | vcpu->arch.xer = val; | ||
334 | } | ||
335 | |||
336 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | ||
337 | { | ||
338 | return vcpu->arch.xer; | ||
339 | } | ||
340 | |||
341 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | ||
342 | { | ||
343 | vcpu->arch.ctr = val; | ||
344 | } | ||
345 | |||
346 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | ||
347 | { | ||
348 | return vcpu->arch.ctr; | ||
349 | } | ||
350 | |||
351 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | ||
352 | { | ||
353 | vcpu->arch.lr = val; | ||
354 | } | ||
355 | |||
356 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | ||
357 | { | ||
358 | return vcpu->arch.lr; | ||
359 | } | ||
360 | |||
361 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | ||
362 | { | ||
363 | vcpu->arch.pc = val; | ||
364 | } | ||
365 | |||
366 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | ||
367 | { | ||
368 | return vcpu->arch.pc; | ||
369 | } | ||
370 | |||
371 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | ||
372 | { | ||
373 | ulong pc = kvmppc_get_pc(vcpu); | ||
374 | |||
375 | /* Load the instruction manually if it failed to do so in the | ||
376 | * exit path */ | ||
377 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) | ||
378 | kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); | ||
379 | |||
380 | return vcpu->arch.last_inst; | ||
381 | } | ||
382 | |||
383 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | ||
384 | { | ||
385 | return vcpu->arch.fault_dar; | ||
386 | } | ||
387 | |||
388 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | ||
389 | { | ||
390 | return false; | ||
391 | } | ||
392 | #endif | ||
284 | 393 | ||
285 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly | 394 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly |
286 | * instruction for the OSI hypercalls */ | 395 | * instruction for the OSI hypercalls */ |
@@ -289,12 +398,4 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | |||
289 | 398 | ||
290 | #define INS_DCBZ 0x7c0007ec | 399 | #define INS_DCBZ 0x7c0007ec |
291 | 400 | ||
292 | /* Also add subarch specific defines */ | ||
293 | |||
294 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
295 | #include <asm/kvm_book3s_32.h> | ||
296 | #else | ||
297 | #include <asm/kvm_book3s_64.h> | ||
298 | #endif | ||
299 | |||
300 | #endif /* __ASM_KVM_BOOK3S_H__ */ | 401 | #endif /* __ASM_KVM_BOOK3S_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 4cadd612d575..5f73388ea0af 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -20,9 +20,11 @@ | |||
20 | #ifndef __ASM_KVM_BOOK3S_64_H__ | 20 | #ifndef __ASM_KVM_BOOK3S_64_H__ |
21 | #define __ASM_KVM_BOOK3S_64_H__ | 21 | #define __ASM_KVM_BOOK3S_64_H__ |
22 | 22 | ||
23 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
23 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) | 24 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) |
24 | { | 25 | { |
25 | return &get_paca()->shadow_vcpu; | 26 | return &get_paca()->shadow_vcpu; |
26 | } | 27 | } |
28 | #endif | ||
27 | 29 | ||
28 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ | 30 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 312617529864..b7b039532fbc 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -70,10 +70,22 @@ kvmppc_resume_\intno: | |||
70 | struct kvmppc_host_state { | 70 | struct kvmppc_host_state { |
71 | ulong host_r1; | 71 | ulong host_r1; |
72 | ulong host_r2; | 72 | ulong host_r2; |
73 | ulong host_msr; | ||
73 | ulong vmhandler; | 74 | ulong vmhandler; |
74 | ulong scratch0; | 75 | ulong scratch0; |
75 | ulong scratch1; | 76 | ulong scratch1; |
76 | u8 in_guest; | 77 | u8 in_guest; |
78 | |||
79 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
80 | struct kvm_vcpu *kvm_vcpu; | ||
81 | u64 dabr; | ||
82 | u64 host_mmcr[3]; | ||
83 | u32 host_pmc[6]; | ||
84 | u64 host_purr; | ||
85 | u64 host_spurr; | ||
86 | u64 host_dscr; | ||
87 | u64 dec_expires; | ||
88 | #endif | ||
77 | }; | 89 | }; |
78 | 90 | ||
79 | struct kvmppc_book3s_shadow_vcpu { | 91 | struct kvmppc_book3s_shadow_vcpu { |
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index 9c9ba3d59b1b..a90e09188777 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
@@ -93,4 +93,8 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
93 | return vcpu->arch.fault_dear; | 93 | return vcpu->arch.fault_dear; |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu) | ||
97 | { | ||
98 | return vcpu->arch.shared->msr; | ||
99 | } | ||
96 | #endif /* __ASM_KVM_BOOKE_H__ */ | 100 | #endif /* __ASM_KVM_BOOKE_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 069eb9fc6c41..4a3f790d5fc4 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -33,7 +33,9 @@ | |||
33 | /* memory slots that does not exposed to userspace */ | 33 | /* memory slots that does not exposed to userspace */ |
34 | #define KVM_PRIVATE_MEM_SLOTS 4 | 34 | #define KVM_PRIVATE_MEM_SLOTS 4 |
35 | 35 | ||
36 | #ifdef CONFIG_KVM_MMIO | ||
36 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 37 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
38 | #endif | ||
37 | 39 | ||
38 | /* We don't currently support large pages. */ | 40 | /* We don't currently support large pages. */ |
39 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | 41 | #define KVM_HPAGE_GFN_SHIFT(x) 0 |
@@ -133,7 +135,26 @@ struct kvmppc_exit_timing { | |||
133 | }; | 135 | }; |
134 | }; | 136 | }; |
135 | 137 | ||
138 | struct kvmppc_pginfo { | ||
139 | unsigned long pfn; | ||
140 | atomic_t refcnt; | ||
141 | }; | ||
142 | |||
136 | struct kvm_arch { | 143 | struct kvm_arch { |
144 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
145 | unsigned long hpt_virt; | ||
146 | unsigned long ram_npages; | ||
147 | unsigned long ram_psize; | ||
148 | unsigned long ram_porder; | ||
149 | struct kvmppc_pginfo *ram_pginfo; | ||
150 | unsigned int lpid; | ||
151 | unsigned int host_lpid; | ||
152 | unsigned long host_lpcr; | ||
153 | unsigned long sdr1; | ||
154 | unsigned long host_sdr1; | ||
155 | int tlbie_lock; | ||
156 | unsigned short last_vcpu[NR_CPUS]; | ||
157 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | ||
137 | }; | 158 | }; |
138 | 159 | ||
139 | struct kvmppc_pte { | 160 | struct kvmppc_pte { |
@@ -190,7 +211,7 @@ struct kvm_vcpu_arch { | |||
190 | ulong rmcall; | 211 | ulong rmcall; |
191 | ulong host_paca_phys; | 212 | ulong host_paca_phys; |
192 | struct kvmppc_slb slb[64]; | 213 | struct kvmppc_slb slb[64]; |
193 | int slb_max; /* # valid entries in slb[] */ | 214 | int slb_max; /* 1 + index of last valid entry in slb[] */ |
194 | int slb_nr; /* total number of entries in SLB */ | 215 | int slb_nr; /* total number of entries in SLB */ |
195 | struct kvmppc_mmu mmu; | 216 | struct kvmppc_mmu mmu; |
196 | #endif | 217 | #endif |
@@ -212,7 +233,7 @@ struct kvm_vcpu_arch { | |||
212 | #endif | 233 | #endif |
213 | 234 | ||
214 | #ifdef CONFIG_VSX | 235 | #ifdef CONFIG_VSX |
215 | u64 vsr[32]; | 236 | u64 vsr[64]; |
216 | #endif | 237 | #endif |
217 | 238 | ||
218 | #ifdef CONFIG_PPC_BOOK3S | 239 | #ifdef CONFIG_PPC_BOOK3S |
@@ -220,18 +241,24 @@ struct kvm_vcpu_arch { | |||
220 | u32 qpr[32]; | 241 | u32 qpr[32]; |
221 | #endif | 242 | #endif |
222 | 243 | ||
223 | #ifdef CONFIG_BOOKE | ||
224 | ulong pc; | 244 | ulong pc; |
225 | ulong ctr; | 245 | ulong ctr; |
226 | ulong lr; | 246 | ulong lr; |
227 | 247 | ||
228 | ulong xer; | 248 | ulong xer; |
229 | u32 cr; | 249 | u32 cr; |
230 | #endif | ||
231 | 250 | ||
232 | #ifdef CONFIG_PPC_BOOK3S | 251 | #ifdef CONFIG_PPC_BOOK3S |
233 | ulong hflags; | 252 | ulong hflags; |
234 | ulong guest_owned_ext; | 253 | ulong guest_owned_ext; |
254 | ulong purr; | ||
255 | ulong spurr; | ||
256 | ulong lpcr; | ||
257 | ulong dscr; | ||
258 | ulong amr; | ||
259 | ulong uamor; | ||
260 | u32 ctrl; | ||
261 | ulong dabr; | ||
235 | #endif | 262 | #endif |
236 | u32 vrsave; /* also USPRG0 */ | 263 | u32 vrsave; /* also USPRG0 */ |
237 | u32 mmucr; | 264 | u32 mmucr; |
@@ -270,6 +297,9 @@ struct kvm_vcpu_arch { | |||
270 | u32 dbcr1; | 297 | u32 dbcr1; |
271 | u32 dbsr; | 298 | u32 dbsr; |
272 | 299 | ||
300 | u64 mmcr[3]; | ||
301 | u32 pmc[6]; | ||
302 | |||
273 | #ifdef CONFIG_KVM_EXIT_TIMING | 303 | #ifdef CONFIG_KVM_EXIT_TIMING |
274 | struct mutex exit_timing_lock; | 304 | struct mutex exit_timing_lock; |
275 | struct kvmppc_exit_timing timing_exit; | 305 | struct kvmppc_exit_timing timing_exit; |
@@ -284,8 +314,12 @@ struct kvm_vcpu_arch { | |||
284 | struct dentry *debugfs_exit_timing; | 314 | struct dentry *debugfs_exit_timing; |
285 | #endif | 315 | #endif |
286 | 316 | ||
317 | #ifdef CONFIG_PPC_BOOK3S | ||
318 | ulong fault_dar; | ||
319 | u32 fault_dsisr; | ||
320 | #endif | ||
321 | |||
287 | #ifdef CONFIG_BOOKE | 322 | #ifdef CONFIG_BOOKE |
288 | u32 last_inst; | ||
289 | ulong fault_dear; | 323 | ulong fault_dear; |
290 | ulong fault_esr; | 324 | ulong fault_esr; |
291 | ulong queued_dear; | 325 | ulong queued_dear; |
@@ -300,16 +334,25 @@ struct kvm_vcpu_arch { | |||
300 | u8 dcr_is_write; | 334 | u8 dcr_is_write; |
301 | u8 osi_needed; | 335 | u8 osi_needed; |
302 | u8 osi_enabled; | 336 | u8 osi_enabled; |
337 | u8 hcall_needed; | ||
303 | 338 | ||
304 | u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ | 339 | u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ |
305 | 340 | ||
306 | struct hrtimer dec_timer; | 341 | struct hrtimer dec_timer; |
307 | struct tasklet_struct tasklet; | 342 | struct tasklet_struct tasklet; |
308 | u64 dec_jiffies; | 343 | u64 dec_jiffies; |
344 | u64 dec_expires; | ||
309 | unsigned long pending_exceptions; | 345 | unsigned long pending_exceptions; |
346 | u16 last_cpu; | ||
347 | u32 last_inst; | ||
348 | int trap; | ||
310 | struct kvm_vcpu_arch_shared *shared; | 349 | struct kvm_vcpu_arch_shared *shared; |
311 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ | 350 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ |
312 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ | 351 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ |
352 | |||
353 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
354 | struct kvm_vcpu_arch_shared shregs; | ||
355 | #endif | ||
313 | }; | 356 | }; |
314 | 357 | ||
315 | #endif /* __POWERPC_KVM_HOST_H__ */ | 358 | #endif /* __POWERPC_KVM_HOST_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 48b7ab76de2d..0dafd53c30ed 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -112,6 +112,12 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); | |||
112 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); | 112 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); |
113 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); | 113 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); |
114 | 114 | ||
115 | extern long kvmppc_alloc_hpt(struct kvm *kvm); | ||
116 | extern void kvmppc_free_hpt(struct kvm *kvm); | ||
117 | extern long kvmppc_prepare_vrma(struct kvm *kvm, | ||
118 | struct kvm_userspace_memory_region *mem); | ||
119 | extern void kvmppc_map_vrma(struct kvm *kvm, | ||
120 | struct kvm_userspace_memory_region *mem); | ||
115 | extern int kvmppc_core_init_vm(struct kvm *kvm); | 121 | extern int kvmppc_core_init_vm(struct kvm *kvm); |
116 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); | 122 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); |
117 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 123 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index d865bd909c7d..b445e0af4c2b 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -90,13 +90,19 @@ extern char initial_stab[]; | |||
90 | 90 | ||
91 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) | 91 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) |
92 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) | 92 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) |
93 | #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000) | ||
93 | #define HPTE_R_RPN_SHIFT 12 | 94 | #define HPTE_R_RPN_SHIFT 12 |
94 | #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) | 95 | #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) |
95 | #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) | ||
96 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) | 96 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) |
97 | #define HPTE_R_N ASM_CONST(0x0000000000000004) | 97 | #define HPTE_R_N ASM_CONST(0x0000000000000004) |
98 | #define HPTE_R_G ASM_CONST(0x0000000000000008) | ||
99 | #define HPTE_R_M ASM_CONST(0x0000000000000010) | ||
100 | #define HPTE_R_I ASM_CONST(0x0000000000000020) | ||
101 | #define HPTE_R_W ASM_CONST(0x0000000000000040) | ||
102 | #define HPTE_R_WIMG ASM_CONST(0x0000000000000078) | ||
98 | #define HPTE_R_C ASM_CONST(0x0000000000000080) | 103 | #define HPTE_R_C ASM_CONST(0x0000000000000080) |
99 | #define HPTE_R_R ASM_CONST(0x0000000000000100) | 104 | #define HPTE_R_R ASM_CONST(0x0000000000000100) |
105 | #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) | ||
100 | 106 | ||
101 | #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) | 107 | #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) |
102 | #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) | 108 | #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 58f4a18ef60c..a6da12859959 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -147,8 +147,10 @@ struct paca_struct { | |||
147 | struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ | 147 | struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ |
148 | 148 | ||
149 | #ifdef CONFIG_KVM_BOOK3S_HANDLER | 149 | #ifdef CONFIG_KVM_BOOK3S_HANDLER |
150 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
150 | /* We use this to store guest state in */ | 151 | /* We use this to store guest state in */ |
151 | struct kvmppc_book3s_shadow_vcpu shadow_vcpu; | 152 | struct kvmppc_book3s_shadow_vcpu shadow_vcpu; |
153 | #endif | ||
152 | struct kvmppc_host_state kvm_hstate; | 154 | struct kvmppc_host_state kvm_hstate; |
153 | #endif | 155 | #endif |
154 | }; | 156 | }; |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d879a6b91635..36a611b398c5 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -189,6 +189,9 @@ | |||
189 | #define SPRN_CTR 0x009 /* Count Register */ | 189 | #define SPRN_CTR 0x009 /* Count Register */ |
190 | #define SPRN_DSCR 0x11 | 190 | #define SPRN_DSCR 0x11 |
191 | #define SPRN_CFAR 0x1c /* Come From Address Register */ | 191 | #define SPRN_CFAR 0x1c /* Come From Address Register */ |
192 | #define SPRN_AMR 0x1d /* Authority Mask Register */ | ||
193 | #define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */ | ||
194 | #define SPRN_AMOR 0x15d /* Authority Mask Override Register */ | ||
192 | #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ | 195 | #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ |
193 | #define SPRN_CTRLF 0x088 | 196 | #define SPRN_CTRLF 0x088 |
194 | #define SPRN_CTRLT 0x098 | 197 | #define SPRN_CTRLT 0x098 |
@@ -252,6 +255,7 @@ | |||
252 | #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ | 255 | #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ |
253 | #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ | 256 | #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ |
254 | #define SPRN_LPID 0x13F /* Logical Partition Identifier */ | 257 | #define SPRN_LPID 0x13F /* Logical Partition Identifier */ |
258 | #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ | ||
255 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ | 259 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ |
256 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ | 260 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ |
257 | #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ | 261 | #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index dabfb7346f36..936267462cae 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -187,6 +187,7 @@ int main(void) | |||
187 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | 187 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); |
188 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | 188 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); |
189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | 189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); |
190 | DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); | ||
190 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); | 191 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
191 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); | 192 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); |
192 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 193 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
@@ -392,6 +393,29 @@ int main(void) | |||
392 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 393 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
393 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 394 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
394 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); | 395 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); |
396 | DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); | ||
397 | DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); | ||
398 | #ifdef CONFIG_ALTIVEC | ||
399 | DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); | ||
400 | DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); | ||
401 | #endif | ||
402 | #ifdef CONFIG_VSX | ||
403 | DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr)); | ||
404 | #endif | ||
405 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | ||
406 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); | ||
407 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | ||
408 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | ||
409 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); | ||
410 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
411 | DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); | ||
412 | DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); | ||
413 | DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); | ||
414 | DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); | ||
415 | DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); | ||
416 | DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); | ||
417 | DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); | ||
418 | #endif | ||
395 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); | 419 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); |
396 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); | 420 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); |
397 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); | 421 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); |
@@ -403,17 +427,60 @@ int main(void) | |||
403 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); | 427 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); |
404 | 428 | ||
405 | /* book3s */ | 429 | /* book3s */ |
430 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
431 | DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); | ||
432 | DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); | ||
433 | DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); | ||
434 | DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); | ||
435 | DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); | ||
436 | DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); | ||
437 | DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter)); | ||
438 | DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu)); | ||
439 | DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); | ||
440 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); | ||
441 | #endif | ||
406 | #ifdef CONFIG_PPC_BOOK3S | 442 | #ifdef CONFIG_PPC_BOOK3S |
443 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | ||
444 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); | ||
407 | DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); | 445 | DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); |
408 | DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); | 446 | DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); |
447 | DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); | ||
448 | DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); | ||
449 | DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); | ||
450 | DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); | ||
451 | DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); | ||
452 | DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); | ||
453 | DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); | ||
409 | DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); | 454 | DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); |
410 | DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); | 455 | DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); |
411 | DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); | 456 | DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); |
412 | DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); | 457 | DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); |
413 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); | 458 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); |
459 | DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); | ||
460 | DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); | ||
461 | DEFINE(VCPU_LPCR, offsetof(struct kvm_vcpu, arch.lpcr)); | ||
462 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); | ||
463 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); | ||
464 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); | ||
465 | DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); | ||
466 | DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); | ||
467 | DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu)); | ||
468 | DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); | ||
469 | DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); | ||
470 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); | ||
471 | DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); | ||
472 | DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - | ||
473 | offsetof(struct kvmppc_vcpu_book3s, vcpu)); | ||
474 | DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); | ||
475 | DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); | ||
476 | DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); | ||
414 | 477 | ||
415 | #ifdef CONFIG_PPC_BOOK3S_64 | 478 | #ifdef CONFIG_PPC_BOOK3S_64 |
479 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
416 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) | 480 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) |
481 | #else | ||
482 | # define SVCPU_FIELD(x, f) | ||
483 | #endif | ||
417 | # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) | 484 | # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) |
418 | #else /* 32-bit */ | 485 | #else /* 32-bit */ |
419 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) | 486 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) |
@@ -453,11 +520,23 @@ int main(void) | |||
453 | 520 | ||
454 | HSTATE_FIELD(HSTATE_HOST_R1, host_r1); | 521 | HSTATE_FIELD(HSTATE_HOST_R1, host_r1); |
455 | HSTATE_FIELD(HSTATE_HOST_R2, host_r2); | 522 | HSTATE_FIELD(HSTATE_HOST_R2, host_r2); |
523 | HSTATE_FIELD(HSTATE_HOST_MSR, host_msr); | ||
456 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 524 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
457 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 525 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
458 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 526 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
459 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 527 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
460 | 528 | ||
529 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
530 | HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); | ||
531 | HSTATE_FIELD(HSTATE_MMCR, host_mmcr); | ||
532 | HSTATE_FIELD(HSTATE_PMC, host_pmc); | ||
533 | HSTATE_FIELD(HSTATE_PURR, host_purr); | ||
534 | HSTATE_FIELD(HSTATE_SPURR, host_spurr); | ||
535 | HSTATE_FIELD(HSTATE_DSCR, host_dscr); | ||
536 | HSTATE_FIELD(HSTATE_DABR, dabr); | ||
537 | HSTATE_FIELD(HSTATE_DECEXP, dec_expires); | ||
538 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | ||
539 | |||
461 | #else /* CONFIG_PPC_BOOK3S */ | 540 | #else /* CONFIG_PPC_BOOK3S */ |
462 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | 541 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
463 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | 542 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 6da00550afea..163c041cec24 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -87,14 +87,14 @@ data_access_not_stab: | |||
87 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | 87 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) |
88 | #endif | 88 | #endif |
89 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, | 89 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, |
90 | KVMTEST, 0x300) | 90 | KVMTEST_PR, 0x300) |
91 | 91 | ||
92 | . = 0x380 | 92 | . = 0x380 |
93 | .globl data_access_slb_pSeries | 93 | .globl data_access_slb_pSeries |
94 | data_access_slb_pSeries: | 94 | data_access_slb_pSeries: |
95 | HMT_MEDIUM | 95 | HMT_MEDIUM |
96 | SET_SCRATCH0(r13) | 96 | SET_SCRATCH0(r13) |
97 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) | 97 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) |
98 | std r3,PACA_EXSLB+EX_R3(r13) | 98 | std r3,PACA_EXSLB+EX_R3(r13) |
99 | mfspr r3,SPRN_DAR | 99 | mfspr r3,SPRN_DAR |
100 | #ifdef __DISABLED__ | 100 | #ifdef __DISABLED__ |
@@ -125,7 +125,7 @@ data_access_slb_pSeries: | |||
125 | instruction_access_slb_pSeries: | 125 | instruction_access_slb_pSeries: |
126 | HMT_MEDIUM | 126 | HMT_MEDIUM |
127 | SET_SCRATCH0(r13) | 127 | SET_SCRATCH0(r13) |
128 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480) | 128 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) |
129 | std r3,PACA_EXSLB+EX_R3(r13) | 129 | std r3,PACA_EXSLB+EX_R3(r13) |
130 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 130 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
131 | #ifdef __DISABLED__ | 131 | #ifdef __DISABLED__ |
@@ -153,32 +153,32 @@ instruction_access_slb_pSeries: | |||
153 | hardware_interrupt_pSeries: | 153 | hardware_interrupt_pSeries: |
154 | hardware_interrupt_hv: | 154 | hardware_interrupt_hv: |
155 | BEGIN_FTR_SECTION | 155 | BEGIN_FTR_SECTION |
156 | _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, | ||
157 | EXC_STD, SOFTEN_TEST) | ||
158 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) | ||
159 | FTR_SECTION_ELSE | ||
160 | _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, | 156 | _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, |
161 | EXC_HV, SOFTEN_TEST_HV) | 157 | EXC_HV, SOFTEN_TEST_HV) |
162 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) | 158 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) |
163 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206) | 159 | FTR_SECTION_ELSE |
160 | _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, | ||
161 | EXC_STD, SOFTEN_TEST_PR) | ||
162 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) | ||
163 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE_206) | ||
164 | 164 | ||
165 | STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) | 165 | STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) |
166 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600) | 166 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) |
167 | 167 | ||
168 | STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) | 168 | STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) |
169 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700) | 169 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) |
170 | 170 | ||
171 | STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) | 171 | STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) |
172 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800) | 172 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) |
173 | 173 | ||
174 | MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) | 174 | MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) |
175 | MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) | 175 | MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) |
176 | 176 | ||
177 | STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) | 177 | STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) |
178 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00) | 178 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) |
179 | 179 | ||
180 | STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) | 180 | STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) |
181 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00) | 181 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) |
182 | 182 | ||
183 | . = 0xc00 | 183 | . = 0xc00 |
184 | .globl system_call_pSeries | 184 | .globl system_call_pSeries |
@@ -219,7 +219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
219 | b . | 219 | b . |
220 | 220 | ||
221 | STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) | 221 | STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) |
222 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00) | 222 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) |
223 | 223 | ||
224 | /* At 0xe??? we have a bunch of hypervisor exceptions, we branch | 224 | /* At 0xe??? we have a bunch of hypervisor exceptions, we branch |
225 | * out of line to handle them | 225 | * out of line to handle them |
@@ -254,23 +254,23 @@ vsx_unavailable_pSeries_1: | |||
254 | 254 | ||
255 | #ifdef CONFIG_CBE_RAS | 255 | #ifdef CONFIG_CBE_RAS |
256 | STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) | 256 | STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) |
257 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) | 257 | KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202) |
258 | #endif /* CONFIG_CBE_RAS */ | 258 | #endif /* CONFIG_CBE_RAS */ |
259 | 259 | ||
260 | STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) | 260 | STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) |
261 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300) | 261 | KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) |
262 | 262 | ||
263 | #ifdef CONFIG_CBE_RAS | 263 | #ifdef CONFIG_CBE_RAS |
264 | STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) | 264 | STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) |
265 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) | 265 | KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602) |
266 | #endif /* CONFIG_CBE_RAS */ | 266 | #endif /* CONFIG_CBE_RAS */ |
267 | 267 | ||
268 | STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) | 268 | STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) |
269 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700) | 269 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) |
270 | 270 | ||
271 | #ifdef CONFIG_CBE_RAS | 271 | #ifdef CONFIG_CBE_RAS |
272 | STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) | 272 | STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) |
273 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) | 273 | KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802) |
274 | #endif /* CONFIG_CBE_RAS */ | 274 | #endif /* CONFIG_CBE_RAS */ |
275 | 275 | ||
276 | . = 0x3000 | 276 | . = 0x3000 |
@@ -297,7 +297,7 @@ data_access_check_stab: | |||
297 | mfspr r9,SPRN_DSISR | 297 | mfspr r9,SPRN_DSISR |
298 | srdi r10,r10,60 | 298 | srdi r10,r10,60 |
299 | rlwimi r10,r9,16,0x20 | 299 | rlwimi r10,r9,16,0x20 |
300 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | 300 | #ifdef CONFIG_KVM_BOOK3S_PR |
301 | lbz r9,HSTATE_IN_GUEST(r13) | 301 | lbz r9,HSTATE_IN_GUEST(r13) |
302 | rlwimi r10,r9,8,0x300 | 302 | rlwimi r10,r9,8,0x300 |
303 | #endif | 303 | #endif |
@@ -316,11 +316,11 @@ do_stab_bolted_pSeries: | |||
316 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) | 316 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) |
317 | #endif /* CONFIG_POWER4_ONLY */ | 317 | #endif /* CONFIG_POWER4_ONLY */ |
318 | 318 | ||
319 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) | 319 | KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300) |
320 | KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) | 320 | KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380) |
321 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400) | 321 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) |
322 | KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480) | 322 | KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) |
323 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900) | 323 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) |
324 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) | 324 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) |
325 | 325 | ||
326 | .align 7 | 326 | .align 7 |
@@ -336,11 +336,11 @@ do_stab_bolted_pSeries: | |||
336 | 336 | ||
337 | /* moved from 0xf00 */ | 337 | /* moved from 0xf00 */ |
338 | STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) | 338 | STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) |
339 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00) | 339 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) |
340 | STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) | 340 | STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) |
341 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20) | 341 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) |
342 | STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) | 342 | STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) |
343 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40) | 343 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 346 | * An interrupt came in while soft-disabled; clear EE in SRR1, |
@@ -417,7 +417,11 @@ slb_miss_user_pseries: | |||
417 | /* KVM's trampoline code needs to be close to the interrupt handlers */ | 417 | /* KVM's trampoline code needs to be close to the interrupt handlers */ |
418 | 418 | ||
419 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | 419 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
420 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
420 | #include "../kvm/book3s_rmhandlers.S" | 421 | #include "../kvm/book3s_rmhandlers.S" |
422 | #else | ||
423 | #include "../kvm/book3s_hv_rmhandlers.S" | ||
424 | #endif | ||
421 | #endif | 425 | #endif |
422 | 426 | ||
423 | .align 7 | 427 | .align 7 |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 60ac2a9251db..ec2d0edeb134 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk) | |||
96 | preempt_enable(); | 96 | preempt_enable(); |
97 | } | 97 | } |
98 | } | 98 | } |
99 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); | ||
99 | 100 | ||
100 | void enable_kernel_fp(void) | 101 | void enable_kernel_fp(void) |
101 | { | 102 | { |
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk) | |||
145 | preempt_enable(); | 146 | preempt_enable(); |
146 | } | 147 | } |
147 | } | 148 | } |
149 | EXPORT_SYMBOL_GPL(flush_altivec_to_thread); | ||
148 | #endif /* CONFIG_ALTIVEC */ | 150 | #endif /* CONFIG_ALTIVEC */ |
149 | 151 | ||
150 | #ifdef CONFIG_VSX | 152 | #ifdef CONFIG_VSX |
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk) | |||
186 | preempt_enable(); | 188 | preempt_enable(); |
187 | } | 189 | } |
188 | } | 190 | } |
191 | EXPORT_SYMBOL_GPL(flush_vsx_to_thread); | ||
189 | #endif /* CONFIG_VSX */ | 192 | #endif /* CONFIG_VSX */ |
190 | 193 | ||
191 | #ifdef CONFIG_SPE | 194 | #ifdef CONFIG_SPE |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 79fca2651b65..22051ef04bd9 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -375,6 +375,9 @@ void __init check_for_initrd(void) | |||
375 | 375 | ||
376 | int threads_per_core, threads_shift; | 376 | int threads_per_core, threads_shift; |
377 | cpumask_t threads_core_mask; | 377 | cpumask_t threads_core_mask; |
378 | EXPORT_SYMBOL_GPL(threads_per_core); | ||
379 | EXPORT_SYMBOL_GPL(threads_shift); | ||
380 | EXPORT_SYMBOL_GPL(threads_core_mask); | ||
378 | 381 | ||
379 | static void __init cpu_init_thread_core_maps(int tpc) | 382 | static void __init cpu_init_thread_core_maps(int tpc) |
380 | { | 383 | { |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8ebc6700b98d..09a85a9045d6 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -243,6 +243,7 @@ void smp_send_reschedule(int cpu) | |||
243 | if (likely(smp_ops)) | 243 | if (likely(smp_ops)) |
244 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | 244 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); |
245 | } | 245 | } |
246 | EXPORT_SYMBOL_GPL(smp_send_reschedule); | ||
246 | 247 | ||
247 | void arch_send_call_function_single_ipi(int cpu) | 248 | void arch_send_call_function_single_ipi(int cpu) |
248 | { | 249 | { |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index b7baff78f90c..5d9b78ebbaa6 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -20,7 +20,6 @@ config KVM | |||
20 | bool | 20 | bool |
21 | select PREEMPT_NOTIFIERS | 21 | select PREEMPT_NOTIFIERS |
22 | select ANON_INODES | 22 | select ANON_INODES |
23 | select KVM_MMIO | ||
24 | 23 | ||
25 | config KVM_BOOK3S_HANDLER | 24 | config KVM_BOOK3S_HANDLER |
26 | bool | 25 | bool |
@@ -28,16 +27,22 @@ config KVM_BOOK3S_HANDLER | |||
28 | config KVM_BOOK3S_32_HANDLER | 27 | config KVM_BOOK3S_32_HANDLER |
29 | bool | 28 | bool |
30 | select KVM_BOOK3S_HANDLER | 29 | select KVM_BOOK3S_HANDLER |
30 | select KVM_MMIO | ||
31 | 31 | ||
32 | config KVM_BOOK3S_64_HANDLER | 32 | config KVM_BOOK3S_64_HANDLER |
33 | bool | 33 | bool |
34 | select KVM_BOOK3S_HANDLER | 34 | select KVM_BOOK3S_HANDLER |
35 | 35 | ||
36 | config KVM_BOOK3S_PR | ||
37 | bool | ||
38 | select KVM_MMIO | ||
39 | |||
36 | config KVM_BOOK3S_32 | 40 | config KVM_BOOK3S_32 |
37 | tristate "KVM support for PowerPC book3s_32 processors" | 41 | tristate "KVM support for PowerPC book3s_32 processors" |
38 | depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT | 42 | depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT |
39 | select KVM | 43 | select KVM |
40 | select KVM_BOOK3S_32_HANDLER | 44 | select KVM_BOOK3S_32_HANDLER |
45 | select KVM_BOOK3S_PR | ||
41 | ---help--- | 46 | ---help--- |
42 | Support running unmodified book3s_32 guest kernels | 47 | Support running unmodified book3s_32 guest kernels |
43 | in virtual machines on book3s_32 host processors. | 48 | in virtual machines on book3s_32 host processors. |
@@ -50,8 +55,8 @@ config KVM_BOOK3S_32 | |||
50 | config KVM_BOOK3S_64 | 55 | config KVM_BOOK3S_64 |
51 | tristate "KVM support for PowerPC book3s_64 processors" | 56 | tristate "KVM support for PowerPC book3s_64 processors" |
52 | depends on EXPERIMENTAL && PPC_BOOK3S_64 | 57 | depends on EXPERIMENTAL && PPC_BOOK3S_64 |
53 | select KVM | ||
54 | select KVM_BOOK3S_64_HANDLER | 58 | select KVM_BOOK3S_64_HANDLER |
59 | select KVM | ||
55 | ---help--- | 60 | ---help--- |
56 | Support running unmodified book3s_64 and book3s_32 guest kernels | 61 | Support running unmodified book3s_64 and book3s_32 guest kernels |
57 | in virtual machines on book3s_64 host processors. | 62 | in virtual machines on book3s_64 host processors. |
@@ -61,10 +66,37 @@ config KVM_BOOK3S_64 | |||
61 | 66 | ||
62 | If unsure, say N. | 67 | If unsure, say N. |
63 | 68 | ||
69 | config KVM_BOOK3S_64_HV | ||
70 | bool "KVM support for POWER7 using hypervisor mode in host" | ||
71 | depends on KVM_BOOK3S_64 | ||
72 | ---help--- | ||
73 | Support running unmodified book3s_64 guest kernels in | ||
74 | virtual machines on POWER7 processors that have hypervisor | ||
75 | mode available to the host. | ||
76 | |||
77 | If you say Y here, KVM will use the hardware virtualization | ||
78 | facilities of POWER7 (and later) processors, meaning that | ||
79 | guest operating systems will run at full hardware speed | ||
80 | using supervisor and user modes. However, this also means | ||
81 | that KVM is not usable under PowerVM (pHyp), is only usable | ||
82 | on POWER7 (or later) processors, and can only emulate | ||
83 | POWER5+, POWER6 and POWER7 processors. | ||
84 | |||
85 | This module provides access to the hardware capabilities through | ||
86 | a character device node named /dev/kvm. | ||
87 | |||
88 | If unsure, say N. | ||
89 | |||
90 | config KVM_BOOK3S_64_PR | ||
91 | def_bool y | ||
92 | depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV | ||
93 | select KVM_BOOK3S_PR | ||
94 | |||
64 | config KVM_440 | 95 | config KVM_440 |
65 | bool "KVM support for PowerPC 440 processors" | 96 | bool "KVM support for PowerPC 440 processors" |
66 | depends on EXPERIMENTAL && 44x | 97 | depends on EXPERIMENTAL && 44x |
67 | select KVM | 98 | select KVM |
99 | select KVM_MMIO | ||
68 | ---help--- | 100 | ---help--- |
69 | Support running unmodified 440 guest kernels in virtual machines on | 101 | Support running unmodified 440 guest kernels in virtual machines on |
70 | 440 host processors. | 102 | 440 host processors. |
@@ -89,6 +121,7 @@ config KVM_E500 | |||
89 | bool "KVM support for PowerPC E500 processors" | 121 | bool "KVM support for PowerPC E500 processors" |
90 | depends on EXPERIMENTAL && E500 | 122 | depends on EXPERIMENTAL && E500 |
91 | select KVM | 123 | select KVM |
124 | select KVM_MMIO | ||
92 | ---help--- | 125 | ---help--- |
93 | Support running unmodified E500 guest kernels in virtual machines on | 126 | Support running unmodified E500 guest kernels in virtual machines on |
94 | E500 host processors. | 127 | E500 host processors. |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index bf9854fa7f63..8a435a6da665 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -38,11 +38,10 @@ kvm-e500-objs := \ | |||
38 | e500_emulate.o | 38 | e500_emulate.o |
39 | kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) | 39 | kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) |
40 | 40 | ||
41 | kvm-book3s_64-objs := \ | 41 | kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ |
42 | $(common-objs-y) \ | 42 | ../../../virt/kvm/coalesced_mmio.o \ |
43 | fpu.o \ | 43 | fpu.o \ |
44 | book3s_paired_singles.o \ | 44 | book3s_paired_singles.o \ |
45 | book3s.o \ | ||
46 | book3s_pr.o \ | 45 | book3s_pr.o \ |
47 | book3s_emulate.o \ | 46 | book3s_emulate.o \ |
48 | book3s_interrupts.o \ | 47 | book3s_interrupts.o \ |
@@ -50,6 +49,18 @@ kvm-book3s_64-objs := \ | |||
50 | book3s_64_mmu_host.o \ | 49 | book3s_64_mmu_host.o \ |
51 | book3s_64_mmu.o \ | 50 | book3s_64_mmu.o \ |
52 | book3s_32_mmu.o | 51 | book3s_32_mmu.o |
52 | |||
53 | kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ | ||
54 | book3s_hv.o \ | ||
55 | book3s_hv_interrupts.o \ | ||
56 | book3s_64_mmu_hv.o | ||
57 | |||
58 | kvm-book3s_64-objs := \ | ||
59 | ../../../virt/kvm/kvm_main.o \ | ||
60 | powerpc.o \ | ||
61 | emulate.o \ | ||
62 | book3s.o \ | ||
63 | $(kvm-book3s_64-objs-y) | ||
53 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs) | 64 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs) |
54 | 65 | ||
55 | kvm-book3s_32-objs := \ | 66 | kvm-book3s_32-objs := \ |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c new file mode 100644 index 000000000000..4a4fbec61a17 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -0,0 +1,258 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
16 | */ | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/kvm.h> | ||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/highmem.h> | ||
23 | #include <linux/gfp.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/hugetlb.h> | ||
26 | |||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm/kvm_ppc.h> | ||
29 | #include <asm/kvm_book3s.h> | ||
30 | #include <asm/mmu-hash64.h> | ||
31 | #include <asm/hvcall.h> | ||
32 | #include <asm/synch.h> | ||
33 | #include <asm/ppc-opcode.h> | ||
34 | #include <asm/cputable.h> | ||
35 | |||
36 | /* For now use fixed-size 16MB page table */ | ||
37 | #define HPT_ORDER 24 | ||
38 | #define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ | ||
39 | #define HPT_HASH_MASK (HPT_NPTEG - 1) | ||
40 | |||
41 | /* Pages in the VRMA are 16MB pages */ | ||
42 | #define VRMA_PAGE_ORDER 24 | ||
43 | #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ | ||
44 | |||
45 | #define NR_LPIDS (LPID_RSVD + 1) | ||
46 | unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)]; | ||
47 | |||
48 | long kvmppc_alloc_hpt(struct kvm *kvm) | ||
49 | { | ||
50 | unsigned long hpt; | ||
51 | unsigned long lpid; | ||
52 | |||
53 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN, | ||
54 | HPT_ORDER - PAGE_SHIFT); | ||
55 | if (!hpt) { | ||
56 | pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); | ||
57 | return -ENOMEM; | ||
58 | } | ||
59 | kvm->arch.hpt_virt = hpt; | ||
60 | |||
61 | do { | ||
62 | lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS); | ||
63 | if (lpid >= NR_LPIDS) { | ||
64 | pr_err("kvm_alloc_hpt: No LPIDs free\n"); | ||
65 | free_pages(hpt, HPT_ORDER - PAGE_SHIFT); | ||
66 | return -ENOMEM; | ||
67 | } | ||
68 | } while (test_and_set_bit(lpid, lpid_inuse)); | ||
69 | |||
70 | kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); | ||
71 | kvm->arch.lpid = lpid; | ||
72 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); | ||
73 | kvm->arch.host_lpid = mfspr(SPRN_LPID); | ||
74 | kvm->arch.host_lpcr = mfspr(SPRN_LPCR); | ||
75 | |||
76 | pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | void kvmppc_free_hpt(struct kvm *kvm) | ||
81 | { | ||
82 | unsigned long i; | ||
83 | struct kvmppc_pginfo *pginfo; | ||
84 | |||
85 | clear_bit(kvm->arch.lpid, lpid_inuse); | ||
86 | free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); | ||
87 | |||
88 | if (kvm->arch.ram_pginfo) { | ||
89 | pginfo = kvm->arch.ram_pginfo; | ||
90 | kvm->arch.ram_pginfo = NULL; | ||
91 | for (i = 0; i < kvm->arch.ram_npages; ++i) | ||
92 | put_page(pfn_to_page(pginfo[i].pfn)); | ||
93 | kfree(pginfo); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | static unsigned long user_page_size(unsigned long addr) | ||
98 | { | ||
99 | struct vm_area_struct *vma; | ||
100 | unsigned long size = PAGE_SIZE; | ||
101 | |||
102 | down_read(¤t->mm->mmap_sem); | ||
103 | vma = find_vma(current->mm, addr); | ||
104 | if (vma) | ||
105 | size = vma_kernel_pagesize(vma); | ||
106 | up_read(¤t->mm->mmap_sem); | ||
107 | return size; | ||
108 | } | ||
109 | |||
110 | static pfn_t hva_to_pfn(unsigned long addr) | ||
111 | { | ||
112 | struct page *page[1]; | ||
113 | int npages; | ||
114 | |||
115 | might_sleep(); | ||
116 | |||
117 | npages = get_user_pages_fast(addr, 1, 1, page); | ||
118 | |||
119 | if (unlikely(npages != 1)) | ||
120 | return 0; | ||
121 | |||
122 | return page_to_pfn(page[0]); | ||
123 | } | ||
124 | |||
125 | long kvmppc_prepare_vrma(struct kvm *kvm, | ||
126 | struct kvm_userspace_memory_region *mem) | ||
127 | { | ||
128 | unsigned long psize, porder; | ||
129 | unsigned long i, npages; | ||
130 | struct kvmppc_pginfo *pginfo; | ||
131 | pfn_t pfn; | ||
132 | unsigned long hva; | ||
133 | |||
134 | /* First see what page size we have */ | ||
135 | psize = user_page_size(mem->userspace_addr); | ||
136 | /* For now, only allow 16MB pages */ | ||
137 | if (psize != 1ul << VRMA_PAGE_ORDER || (mem->memory_size & (psize - 1))) { | ||
138 | pr_err("bad psize=%lx memory_size=%llx @ %llx\n", | ||
139 | psize, mem->memory_size, mem->userspace_addr); | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | porder = __ilog2(psize); | ||
143 | |||
144 | npages = mem->memory_size >> porder; | ||
145 | pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo), GFP_KERNEL); | ||
146 | if (!pginfo) { | ||
147 | pr_err("kvmppc_prepare_vrma: couldn't alloc %lu bytes\n", | ||
148 | npages * sizeof(struct kvmppc_pginfo)); | ||
149 | return -ENOMEM; | ||
150 | } | ||
151 | |||
152 | for (i = 0; i < npages; ++i) { | ||
153 | hva = mem->userspace_addr + (i << porder); | ||
154 | if (user_page_size(hva) != psize) | ||
155 | goto err; | ||
156 | pfn = hva_to_pfn(hva); | ||
157 | if (pfn == 0) { | ||
158 | pr_err("oops, no pfn for hva %lx\n", hva); | ||
159 | goto err; | ||
160 | } | ||
161 | if (pfn & ((1ul << (porder - PAGE_SHIFT)) - 1)) { | ||
162 | pr_err("oops, unaligned pfn %llx\n", pfn); | ||
163 | put_page(pfn_to_page(pfn)); | ||
164 | goto err; | ||
165 | } | ||
166 | pginfo[i].pfn = pfn; | ||
167 | } | ||
168 | |||
169 | kvm->arch.ram_npages = npages; | ||
170 | kvm->arch.ram_psize = psize; | ||
171 | kvm->arch.ram_porder = porder; | ||
172 | kvm->arch.ram_pginfo = pginfo; | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | err: | ||
177 | kfree(pginfo); | ||
178 | return -EINVAL; | ||
179 | } | ||
180 | |||
181 | void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem) | ||
182 | { | ||
183 | unsigned long i; | ||
184 | unsigned long npages = kvm->arch.ram_npages; | ||
185 | unsigned long pfn; | ||
186 | unsigned long *hpte; | ||
187 | unsigned long hash; | ||
188 | struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo; | ||
189 | |||
190 | if (!pginfo) | ||
191 | return; | ||
192 | |||
193 | /* VRMA can't be > 1TB */ | ||
194 | if (npages > 1ul << (40 - kvm->arch.ram_porder)) | ||
195 | npages = 1ul << (40 - kvm->arch.ram_porder); | ||
196 | /* Can't use more than 1 HPTE per HPTEG */ | ||
197 | if (npages > HPT_NPTEG) | ||
198 | npages = HPT_NPTEG; | ||
199 | |||
200 | for (i = 0; i < npages; ++i) { | ||
201 | pfn = pginfo[i].pfn; | ||
202 | /* can't use hpt_hash since va > 64 bits */ | ||
203 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK; | ||
204 | /* | ||
205 | * We assume that the hash table is empty and no | ||
206 | * vcpus are using it at this stage. Since we create | ||
207 | * at most one HPTE per HPTEG, we just assume entry 7 | ||
208 | * is available and use it. | ||
209 | */ | ||
210 | hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7)); | ||
211 | hpte += 7 * 2; | ||
212 | /* HPTE low word - RPN, protection, etc. */ | ||
213 | hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C | | ||
214 | HPTE_R_M | PP_RWXX; | ||
215 | wmb(); | ||
216 | hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | | ||
217 | (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED | | ||
218 | HPTE_V_LARGE | HPTE_V_VALID; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | int kvmppc_mmu_hv_init(void) | ||
223 | { | ||
224 | if (!cpu_has_feature(CPU_FTR_HVMODE_206)) | ||
225 | return -EINVAL; | ||
226 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | ||
227 | set_bit(mfspr(SPRN_LPID), lpid_inuse); | ||
228 | set_bit(LPID_RSVD, lpid_inuse); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | ||
234 | { | ||
235 | } | ||
236 | |||
237 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) | ||
238 | { | ||
239 | kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); | ||
240 | } | ||
241 | |||
242 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
243 | struct kvmppc_pte *gpte, bool data) | ||
244 | { | ||
245 | return -ENOENT; | ||
246 | } | ||
247 | |||
248 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) | ||
249 | { | ||
250 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; | ||
251 | |||
252 | vcpu->arch.slb_nr = 32; /* Assume POWER7 for now */ | ||
253 | |||
254 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; | ||
255 | mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; | ||
256 | |||
257 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; | ||
258 | } | ||
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c index f94fd9a4c69a..88c8f26add02 100644 --- a/arch/powerpc/kvm/book3s_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c | |||
@@ -20,6 +20,9 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <asm/kvm_book3s.h> | 21 | #include <asm/kvm_book3s.h> |
22 | 22 | ||
23 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
24 | EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); | ||
25 | #else | ||
23 | EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter); | 26 | EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter); |
24 | EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline); | 27 | EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline); |
25 | EXPORT_SYMBOL_GPL(kvmppc_rmcall); | 28 | EXPORT_SYMBOL_GPL(kvmppc_rmcall); |
@@ -30,3 +33,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); | |||
30 | #ifdef CONFIG_VSX | 33 | #ifdef CONFIG_VSX |
31 | EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); | 34 | EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); |
32 | #endif | 35 | #endif |
36 | #endif | ||
37 | |||
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c new file mode 100644 index 000000000000..60b7300568c8 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -0,0 +1,445 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | ||
4 | * | ||
5 | * Authors: | ||
6 | * Paul Mackerras <paulus@au1.ibm.com> | ||
7 | * Alexander Graf <agraf@suse.de> | ||
8 | * Kevin Wolf <mail@kevin-wolf.de> | ||
9 | * | ||
10 | * Description: KVM functions specific to running on Book 3S | ||
11 | * processors in hypervisor mode (specifically POWER7 and later). | ||
12 | * | ||
13 | * This file is derived from arch/powerpc/kvm/book3s.c, | ||
14 | * by Alexander Graf <agraf@suse.de>. | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License, version 2, as | ||
18 | * published by the Free Software Foundation. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/preempt.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/fs.h> | ||
28 | #include <linux/anon_inodes.h> | ||
29 | #include <linux/cpumask.h> | ||
30 | |||
31 | #include <asm/reg.h> | ||
32 | #include <asm/cputable.h> | ||
33 | #include <asm/cacheflush.h> | ||
34 | #include <asm/tlbflush.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | #include <asm/io.h> | ||
37 | #include <asm/kvm_ppc.h> | ||
38 | #include <asm/kvm_book3s.h> | ||
39 | #include <asm/mmu_context.h> | ||
40 | #include <asm/lppaca.h> | ||
41 | #include <asm/processor.h> | ||
42 | #include <linux/gfp.h> | ||
43 | #include <linux/sched.h> | ||
44 | #include <linux/vmalloc.h> | ||
45 | #include <linux/highmem.h> | ||
46 | |||
47 | /* #define EXIT_DEBUG */ | ||
48 | /* #define EXIT_DEBUG_SIMPLE */ | ||
49 | /* #define EXIT_DEBUG_INT */ | ||
50 | |||
51 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
52 | { | ||
53 | local_paca->kvm_hstate.kvm_vcpu = vcpu; | ||
54 | } | ||
55 | |||
56 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | ||
57 | { | ||
58 | } | ||
59 | |||
60 | void kvmppc_vcpu_block(struct kvm_vcpu *vcpu) | ||
61 | { | ||
62 | u64 now; | ||
63 | unsigned long dec_nsec; | ||
64 | |||
65 | now = get_tb(); | ||
66 | if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu)) | ||
67 | kvmppc_core_queue_dec(vcpu); | ||
68 | if (vcpu->arch.pending_exceptions) | ||
69 | return; | ||
70 | if (vcpu->arch.dec_expires != ~(u64)0) { | ||
71 | dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC / | ||
72 | tb_ticks_per_sec; | ||
73 | hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), | ||
74 | HRTIMER_MODE_REL); | ||
75 | } | ||
76 | |||
77 | kvm_vcpu_block(vcpu); | ||
78 | vcpu->stat.halt_wakeup++; | ||
79 | |||
80 | if (vcpu->arch.dec_expires != ~(u64)0) | ||
81 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | ||
82 | } | ||
83 | |||
84 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | ||
85 | { | ||
86 | vcpu->arch.shregs.msr = msr; | ||
87 | } | ||
88 | |||
89 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | ||
90 | { | ||
91 | vcpu->arch.pvr = pvr; | ||
92 | } | ||
93 | |||
94 | void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | ||
95 | { | ||
96 | int r; | ||
97 | |||
98 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); | ||
99 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", | ||
100 | vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); | ||
101 | for (r = 0; r < 16; ++r) | ||
102 | pr_err("r%2d = %.16lx r%d = %.16lx\n", | ||
103 | r, kvmppc_get_gpr(vcpu, r), | ||
104 | r+16, kvmppc_get_gpr(vcpu, r+16)); | ||
105 | pr_err("ctr = %.16lx lr = %.16lx\n", | ||
106 | vcpu->arch.ctr, vcpu->arch.lr); | ||
107 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", | ||
108 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); | ||
109 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", | ||
110 | vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); | ||
111 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", | ||
112 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); | ||
113 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", | ||
114 | vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); | ||
115 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); | ||
116 | pr_err("fault dar = %.16lx dsisr = %.8x\n", | ||
117 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | ||
118 | pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); | ||
119 | for (r = 0; r < vcpu->arch.slb_max; ++r) | ||
120 | pr_err(" ESID = %.16llx VSID = %.16llx\n", | ||
121 | vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); | ||
122 | pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", | ||
123 | vcpu->arch.lpcr, vcpu->kvm->arch.sdr1, | ||
124 | vcpu->arch.last_inst); | ||
125 | } | ||
126 | |||
127 | static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
128 | struct task_struct *tsk) | ||
129 | { | ||
130 | int r = RESUME_HOST; | ||
131 | |||
132 | vcpu->stat.sum_exits++; | ||
133 | |||
134 | run->exit_reason = KVM_EXIT_UNKNOWN; | ||
135 | run->ready_for_interrupt_injection = 1; | ||
136 | switch (vcpu->arch.trap) { | ||
137 | /* We're good on these - the host merely wanted to get our attention */ | ||
138 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | ||
139 | vcpu->stat.dec_exits++; | ||
140 | r = RESUME_GUEST; | ||
141 | break; | ||
142 | case BOOK3S_INTERRUPT_EXTERNAL: | ||
143 | vcpu->stat.ext_intr_exits++; | ||
144 | r = RESUME_GUEST; | ||
145 | break; | ||
146 | case BOOK3S_INTERRUPT_PERFMON: | ||
147 | r = RESUME_GUEST; | ||
148 | break; | ||
149 | case BOOK3S_INTERRUPT_PROGRAM: | ||
150 | { | ||
151 | ulong flags; | ||
152 | /* | ||
153 | * Normally program interrupts are delivered directly | ||
154 | * to the guest by the hardware, but we can get here | ||
155 | * as a result of a hypervisor emulation interrupt | ||
156 | * (e40) getting turned into a 700 by BML RTAS. | ||
157 | */ | ||
158 | flags = vcpu->arch.shregs.msr & 0x1f0000ull; | ||
159 | kvmppc_core_queue_program(vcpu, flags); | ||
160 | r = RESUME_GUEST; | ||
161 | break; | ||
162 | } | ||
163 | case BOOK3S_INTERRUPT_SYSCALL: | ||
164 | { | ||
165 | /* hcall - punt to userspace */ | ||
166 | int i; | ||
167 | |||
168 | if (vcpu->arch.shregs.msr & MSR_PR) { | ||
169 | /* sc 1 from userspace - reflect to guest syscall */ | ||
170 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL); | ||
171 | r = RESUME_GUEST; | ||
172 | break; | ||
173 | } | ||
174 | run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); | ||
175 | for (i = 0; i < 9; ++i) | ||
176 | run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); | ||
177 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | ||
178 | vcpu->arch.hcall_needed = 1; | ||
179 | r = RESUME_HOST; | ||
180 | break; | ||
181 | } | ||
182 | /* | ||
183 | * We get these next two if the guest does a bad real-mode access, | ||
184 | * as we have enabled VRMA (virtualized real mode area) mode in the | ||
185 | * LPCR. We just generate an appropriate DSI/ISI to the guest. | ||
186 | */ | ||
187 | case BOOK3S_INTERRUPT_H_DATA_STORAGE: | ||
188 | vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr; | ||
189 | vcpu->arch.shregs.dar = vcpu->arch.fault_dar; | ||
190 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); | ||
191 | r = RESUME_GUEST; | ||
192 | break; | ||
193 | case BOOK3S_INTERRUPT_H_INST_STORAGE: | ||
194 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, | ||
195 | 0x08000000); | ||
196 | r = RESUME_GUEST; | ||
197 | break; | ||
198 | /* | ||
199 | * This occurs if the guest executes an illegal instruction. | ||
200 | * We just generate a program interrupt to the guest, since | ||
201 | * we don't emulate any guest instructions at this stage. | ||
202 | */ | ||
203 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | ||
204 | kvmppc_core_queue_program(vcpu, 0x80000); | ||
205 | r = RESUME_GUEST; | ||
206 | break; | ||
207 | default: | ||
208 | kvmppc_dump_regs(vcpu); | ||
209 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | ||
210 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | ||
211 | vcpu->arch.shregs.msr); | ||
212 | r = RESUME_HOST; | ||
213 | BUG(); | ||
214 | break; | ||
215 | } | ||
216 | |||
217 | |||
218 | if (!(r & RESUME_HOST)) { | ||
219 | /* To avoid clobbering exit_reason, only check for signals if | ||
220 | * we aren't already exiting to userspace for some other | ||
221 | * reason. */ | ||
222 | if (signal_pending(tsk)) { | ||
223 | vcpu->stat.signal_exits++; | ||
224 | run->exit_reason = KVM_EXIT_INTR; | ||
225 | r = -EINTR; | ||
226 | } else { | ||
227 | kvmppc_core_deliver_interrupts(vcpu); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | return r; | ||
232 | } | ||
233 | |||
234 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
235 | struct kvm_sregs *sregs) | ||
236 | { | ||
237 | int i; | ||
238 | |||
239 | sregs->pvr = vcpu->arch.pvr; | ||
240 | |||
241 | memset(sregs, 0, sizeof(struct kvm_sregs)); | ||
242 | for (i = 0; i < vcpu->arch.slb_max; i++) { | ||
243 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; | ||
244 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | ||
245 | } | ||
246 | |||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
251 | struct kvm_sregs *sregs) | ||
252 | { | ||
253 | int i, j; | ||
254 | |||
255 | kvmppc_set_pvr(vcpu, sregs->pvr); | ||
256 | |||
257 | j = 0; | ||
258 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | ||
259 | if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { | ||
260 | vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; | ||
261 | vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; | ||
262 | ++j; | ||
263 | } | ||
264 | } | ||
265 | vcpu->arch.slb_max = j; | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | int kvmppc_core_check_processor_compat(void) | ||
271 | { | ||
272 | if (cpu_has_feature(CPU_FTR_HVMODE_206)) | ||
273 | return 0; | ||
274 | return -EIO; | ||
275 | } | ||
276 | |||
277 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
278 | { | ||
279 | struct kvm_vcpu *vcpu; | ||
280 | int err = -ENOMEM; | ||
281 | unsigned long lpcr; | ||
282 | |||
283 | vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | ||
284 | if (!vcpu) | ||
285 | goto out; | ||
286 | |||
287 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
288 | if (err) | ||
289 | goto free_vcpu; | ||
290 | |||
291 | vcpu->arch.shared = &vcpu->arch.shregs; | ||
292 | vcpu->arch.last_cpu = -1; | ||
293 | vcpu->arch.mmcr[0] = MMCR0_FC; | ||
294 | vcpu->arch.ctrl = CTRL_RUNLATCH; | ||
295 | /* default to host PVR, since we can't spoof it */ | ||
296 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
297 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | ||
298 | |||
299 | lpcr = kvm->arch.host_lpcr & (LPCR_PECE | LPCR_LPES); | ||
300 | lpcr |= LPCR_VPM0 | LPCR_VRMA_L | (4UL << LPCR_DPFD_SH) | LPCR_HDICE; | ||
301 | vcpu->arch.lpcr = lpcr; | ||
302 | |||
303 | kvmppc_mmu_book3s_hv_init(vcpu); | ||
304 | |||
305 | return vcpu; | ||
306 | |||
307 | free_vcpu: | ||
308 | kfree(vcpu); | ||
309 | out: | ||
310 | return ERR_PTR(err); | ||
311 | } | ||
312 | |||
313 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | ||
314 | { | ||
315 | kvm_vcpu_uninit(vcpu); | ||
316 | kfree(vcpu); | ||
317 | } | ||
318 | |||
319 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | ||
320 | |||
321 | int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
322 | { | ||
323 | u64 now; | ||
324 | |||
325 | if (signal_pending(current)) { | ||
326 | run->exit_reason = KVM_EXIT_INTR; | ||
327 | return -EINTR; | ||
328 | } | ||
329 | |||
330 | flush_fp_to_thread(current); | ||
331 | flush_altivec_to_thread(current); | ||
332 | flush_vsx_to_thread(current); | ||
333 | preempt_disable(); | ||
334 | |||
335 | /* | ||
336 | * Make sure we are running on thread 0, and that | ||
337 | * secondary threads are offline. | ||
338 | * XXX we should also block attempts to bring any | ||
339 | * secondary threads online. | ||
340 | */ | ||
341 | if (threads_per_core > 1) { | ||
342 | int cpu = smp_processor_id(); | ||
343 | int thr = cpu_thread_in_core(cpu); | ||
344 | |||
345 | if (thr) | ||
346 | goto out; | ||
347 | while (++thr < threads_per_core) | ||
348 | if (cpu_online(cpu + thr)) | ||
349 | goto out; | ||
350 | } | ||
351 | |||
352 | kvm_guest_enter(); | ||
353 | |||
354 | __kvmppc_vcore_entry(NULL, vcpu); | ||
355 | |||
356 | kvm_guest_exit(); | ||
357 | |||
358 | preempt_enable(); | ||
359 | kvm_resched(vcpu); | ||
360 | |||
361 | now = get_tb(); | ||
362 | /* cancel pending dec exception if dec is positive */ | ||
363 | if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) | ||
364 | kvmppc_core_dequeue_dec(vcpu); | ||
365 | |||
366 | return kvmppc_handle_exit(run, vcpu, current); | ||
367 | |||
368 | out: | ||
369 | preempt_enable(); | ||
370 | return -EBUSY; | ||
371 | } | ||
372 | |||
373 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | ||
374 | struct kvm_userspace_memory_region *mem) | ||
375 | { | ||
376 | if (mem->guest_phys_addr == 0 && mem->memory_size != 0) | ||
377 | return kvmppc_prepare_vrma(kvm, mem); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | ||
382 | struct kvm_userspace_memory_region *mem) | ||
383 | { | ||
384 | if (mem->guest_phys_addr == 0 && mem->memory_size != 0) | ||
385 | kvmppc_map_vrma(kvm, mem); | ||
386 | } | ||
387 | |||
388 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
389 | { | ||
390 | long r; | ||
391 | |||
392 | /* Allocate hashed page table */ | ||
393 | r = kvmppc_alloc_hpt(kvm); | ||
394 | |||
395 | return r; | ||
396 | } | ||
397 | |||
398 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
399 | { | ||
400 | kvmppc_free_hpt(kvm); | ||
401 | } | ||
402 | |||
403 | /* These are stubs for now */ | ||
404 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | ||
405 | { | ||
406 | } | ||
407 | |||
408 | /* We don't need to emulate any privileged instructions or dcbz */ | ||
409 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
410 | unsigned int inst, int *advance) | ||
411 | { | ||
412 | return EMULATE_FAIL; | ||
413 | } | ||
414 | |||
415 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | ||
416 | { | ||
417 | return EMULATE_FAIL; | ||
418 | } | ||
419 | |||
420 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | ||
421 | { | ||
422 | return EMULATE_FAIL; | ||
423 | } | ||
424 | |||
425 | static int kvmppc_book3s_hv_init(void) | ||
426 | { | ||
427 | int r; | ||
428 | |||
429 | r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | ||
430 | |||
431 | if (r) | ||
432 | return r; | ||
433 | |||
434 | r = kvmppc_mmu_hv_init(); | ||
435 | |||
436 | return r; | ||
437 | } | ||
438 | |||
439 | static void kvmppc_book3s_hv_exit(void) | ||
440 | { | ||
441 | kvm_exit(); | ||
442 | } | ||
443 | |||
444 | module_init(kvmppc_book3s_hv_init); | ||
445 | module_exit(kvmppc_book3s_hv_exit); | ||
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S new file mode 100644 index 000000000000..532afaf19841 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
16 | * | ||
17 | * Derived from book3s_interrupts.S, which is: | ||
18 | * Copyright SUSE Linux Products GmbH 2009 | ||
19 | * | ||
20 | * Authors: Alexander Graf <agraf@suse.de> | ||
21 | */ | ||
22 | |||
23 | #include <asm/ppc_asm.h> | ||
24 | #include <asm/kvm_asm.h> | ||
25 | #include <asm/reg.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/asm-offsets.h> | ||
28 | #include <asm/exception-64s.h> | ||
29 | #include <asm/ppc-opcode.h> | ||
30 | |||
31 | /***************************************************************************** | ||
32 | * * | ||
33 | * Guest entry / exit code that is in kernel module memory (vmalloc) * | ||
34 | * * | ||
35 | ****************************************************************************/ | ||
36 | |||
37 | /* Registers: | ||
38 | * r4: vcpu pointer | ||
39 | */ | ||
40 | _GLOBAL(__kvmppc_vcore_entry) | ||
41 | |||
42 | /* Write correct stack frame */ | ||
43 | mflr r0 | ||
44 | std r0,PPC_LR_STKOFF(r1) | ||
45 | |||
46 | /* Save host state to the stack */ | ||
47 | stdu r1, -SWITCH_FRAME_SIZE(r1) | ||
48 | |||
49 | /* Save non-volatile registers (r14 - r31) */ | ||
50 | SAVE_NVGPRS(r1) | ||
51 | |||
52 | /* Save host DSCR */ | ||
53 | mfspr r3, SPRN_DSCR | ||
54 | std r3, HSTATE_DSCR(r13) | ||
55 | |||
56 | /* Save host DABR */ | ||
57 | mfspr r3, SPRN_DABR | ||
58 | std r3, HSTATE_DABR(r13) | ||
59 | |||
60 | /* Hard-disable interrupts */ | ||
61 | mfmsr r10 | ||
62 | std r10, HSTATE_HOST_MSR(r13) | ||
63 | rldicl r10,r10,48,1 | ||
64 | rotldi r10,r10,16 | ||
65 | mtmsrd r10,1 | ||
66 | |||
67 | /* Save host PMU registers and load guest PMU registers */ | ||
68 | /* R4 is live here (vcpu pointer) but not r3 or r5 */ | ||
69 | li r3, 1 | ||
70 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | ||
71 | mfspr r7, SPRN_MMCR0 /* save MMCR0 */ | ||
72 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ | ||
73 | isync | ||
74 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | ||
75 | lbz r5, LPPACA_PMCINUSE(r3) | ||
76 | cmpwi r5, 0 | ||
77 | beq 31f /* skip if not */ | ||
78 | mfspr r5, SPRN_MMCR1 | ||
79 | mfspr r6, SPRN_MMCRA | ||
80 | std r7, HSTATE_MMCR(r13) | ||
81 | std r5, HSTATE_MMCR + 8(r13) | ||
82 | std r6, HSTATE_MMCR + 16(r13) | ||
83 | mfspr r3, SPRN_PMC1 | ||
84 | mfspr r5, SPRN_PMC2 | ||
85 | mfspr r6, SPRN_PMC3 | ||
86 | mfspr r7, SPRN_PMC4 | ||
87 | mfspr r8, SPRN_PMC5 | ||
88 | mfspr r9, SPRN_PMC6 | ||
89 | stw r3, HSTATE_PMC(r13) | ||
90 | stw r5, HSTATE_PMC + 4(r13) | ||
91 | stw r6, HSTATE_PMC + 8(r13) | ||
92 | stw r7, HSTATE_PMC + 12(r13) | ||
93 | stw r8, HSTATE_PMC + 16(r13) | ||
94 | stw r9, HSTATE_PMC + 20(r13) | ||
95 | 31: | ||
96 | |||
97 | /* | ||
98 | * Put whatever is in the decrementer into the | ||
99 | * hypervisor decrementer. | ||
100 | */ | ||
101 | mfspr r8,SPRN_DEC | ||
102 | mftb r7 | ||
103 | mtspr SPRN_HDEC,r8 | ||
104 | extsw r8,r8 | ||
105 | add r8,r8,r7 | ||
106 | std r8,HSTATE_DECEXP(r13) | ||
107 | |||
108 | /* Jump to partition switch code */ | ||
109 | bl .kvmppc_hv_entry_trampoline | ||
110 | nop | ||
111 | |||
112 | /* | ||
113 | * We return here in virtual mode after the guest exits | ||
114 | * with something that we can't handle in real mode. | ||
115 | * Interrupts are enabled again at this point. | ||
116 | */ | ||
117 | |||
118 | .global kvmppc_handler_highmem | ||
119 | kvmppc_handler_highmem: | ||
120 | |||
121 | /* | ||
122 | * Register usage at this point: | ||
123 | * | ||
124 | * R1 = host R1 | ||
125 | * R2 = host R2 | ||
126 | * R12 = exit handler id | ||
127 | * R13 = PACA | ||
128 | */ | ||
129 | |||
130 | /* Restore non-volatile host registers (r14 - r31) */ | ||
131 | REST_NVGPRS(r1) | ||
132 | |||
133 | addi r1, r1, SWITCH_FRAME_SIZE | ||
134 | ld r0, PPC_LR_STKOFF(r1) | ||
135 | mtlr r0 | ||
136 | blr | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S new file mode 100644 index 000000000000..9af264840b98 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -0,0 +1,806 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
12 | * | ||
13 | * Derived from book3s_rmhandlers.S and other files, which are: | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/kvm_asm.h> | ||
22 | #include <asm/reg.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | #include <asm/exception-64s.h> | ||
26 | |||
27 | /***************************************************************************** | ||
28 | * * | ||
29 | * Real Mode handlers that need to be in the linear mapping * | ||
30 | * * | ||
31 | ****************************************************************************/ | ||
32 | |||
33 | #define SHADOW_VCPU_OFF PACA_KVM_SVCPU | ||
34 | |||
35 | .globl kvmppc_skip_interrupt | ||
36 | kvmppc_skip_interrupt: | ||
37 | mfspr r13,SPRN_SRR0 | ||
38 | addi r13,r13,4 | ||
39 | mtspr SPRN_SRR0,r13 | ||
40 | GET_SCRATCH0(r13) | ||
41 | rfid | ||
42 | b . | ||
43 | |||
44 | .globl kvmppc_skip_Hinterrupt | ||
45 | kvmppc_skip_Hinterrupt: | ||
46 | mfspr r13,SPRN_HSRR0 | ||
47 | addi r13,r13,4 | ||
48 | mtspr SPRN_HSRR0,r13 | ||
49 | GET_SCRATCH0(r13) | ||
50 | hrfid | ||
51 | b . | ||
52 | |||
53 | /* | ||
54 | * Call kvmppc_handler_trampoline_enter in real mode. | ||
55 | * Must be called with interrupts hard-disabled. | ||
56 | * | ||
57 | * Input Registers: | ||
58 | * | ||
59 | * LR = return address to continue at after eventually re-enabling MMU | ||
60 | */ | ||
61 | _GLOBAL(kvmppc_hv_entry_trampoline) | ||
62 | mfmsr r10 | ||
63 | LOAD_REG_ADDR(r5, kvmppc_hv_entry) | ||
64 | li r0,MSR_RI | ||
65 | andc r0,r10,r0 | ||
66 | li r6,MSR_IR | MSR_DR | ||
67 | andc r6,r10,r6 | ||
68 | mtmsrd r0,1 /* clear RI in MSR */ | ||
69 | mtsrr0 r5 | ||
70 | mtsrr1 r6 | ||
71 | RFI | ||
72 | |||
73 | #define ULONG_SIZE 8 | ||
74 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | ||
75 | |||
76 | /****************************************************************************** | ||
77 | * * | ||
78 | * Entry code * | ||
79 | * * | ||
80 | *****************************************************************************/ | ||
81 | |||
82 | .global kvmppc_hv_entry | ||
83 | kvmppc_hv_entry: | ||
84 | |||
85 | /* Required state: | ||
86 | * | ||
87 | * R4 = vcpu pointer | ||
88 | * MSR = ~IR|DR | ||
89 | * R13 = PACA | ||
90 | * R1 = host R1 | ||
91 | * all other volatile GPRS = free | ||
92 | */ | ||
93 | mflr r0 | ||
94 | std r0, HSTATE_VMHANDLER(r13) | ||
95 | |||
96 | ld r14, VCPU_GPR(r14)(r4) | ||
97 | ld r15, VCPU_GPR(r15)(r4) | ||
98 | ld r16, VCPU_GPR(r16)(r4) | ||
99 | ld r17, VCPU_GPR(r17)(r4) | ||
100 | ld r18, VCPU_GPR(r18)(r4) | ||
101 | ld r19, VCPU_GPR(r19)(r4) | ||
102 | ld r20, VCPU_GPR(r20)(r4) | ||
103 | ld r21, VCPU_GPR(r21)(r4) | ||
104 | ld r22, VCPU_GPR(r22)(r4) | ||
105 | ld r23, VCPU_GPR(r23)(r4) | ||
106 | ld r24, VCPU_GPR(r24)(r4) | ||
107 | ld r25, VCPU_GPR(r25)(r4) | ||
108 | ld r26, VCPU_GPR(r26)(r4) | ||
109 | ld r27, VCPU_GPR(r27)(r4) | ||
110 | ld r28, VCPU_GPR(r28)(r4) | ||
111 | ld r29, VCPU_GPR(r29)(r4) | ||
112 | ld r30, VCPU_GPR(r30)(r4) | ||
113 | ld r31, VCPU_GPR(r31)(r4) | ||
114 | |||
115 | /* Load guest PMU registers */ | ||
116 | /* R4 is live here (vcpu pointer) */ | ||
117 | li r3, 1 | ||
118 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | ||
119 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | ||
120 | isync | ||
121 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ | ||
122 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ | ||
123 | lwz r6, VCPU_PMC + 8(r4) | ||
124 | lwz r7, VCPU_PMC + 12(r4) | ||
125 | lwz r8, VCPU_PMC + 16(r4) | ||
126 | lwz r9, VCPU_PMC + 20(r4) | ||
127 | mtspr SPRN_PMC1, r3 | ||
128 | mtspr SPRN_PMC2, r5 | ||
129 | mtspr SPRN_PMC3, r6 | ||
130 | mtspr SPRN_PMC4, r7 | ||
131 | mtspr SPRN_PMC5, r8 | ||
132 | mtspr SPRN_PMC6, r9 | ||
133 | ld r3, VCPU_MMCR(r4) | ||
134 | ld r5, VCPU_MMCR + 8(r4) | ||
135 | ld r6, VCPU_MMCR + 16(r4) | ||
136 | mtspr SPRN_MMCR1, r5 | ||
137 | mtspr SPRN_MMCRA, r6 | ||
138 | mtspr SPRN_MMCR0, r3 | ||
139 | isync | ||
140 | |||
141 | /* Load up FP, VMX and VSX registers */ | ||
142 | bl kvmppc_load_fp | ||
143 | |||
144 | /* Switch DSCR to guest value */ | ||
145 | ld r5, VCPU_DSCR(r4) | ||
146 | mtspr SPRN_DSCR, r5 | ||
147 | |||
148 | /* | ||
149 | * Set the decrementer to the guest decrementer. | ||
150 | */ | ||
151 | ld r8,VCPU_DEC_EXPIRES(r4) | ||
152 | mftb r7 | ||
153 | subf r3,r7,r8 | ||
154 | mtspr SPRN_DEC,r3 | ||
155 | stw r3,VCPU_DEC(r4) | ||
156 | |||
157 | ld r5, VCPU_SPRG0(r4) | ||
158 | ld r6, VCPU_SPRG1(r4) | ||
159 | ld r7, VCPU_SPRG2(r4) | ||
160 | ld r8, VCPU_SPRG3(r4) | ||
161 | mtspr SPRN_SPRG0, r5 | ||
162 | mtspr SPRN_SPRG1, r6 | ||
163 | mtspr SPRN_SPRG2, r7 | ||
164 | mtspr SPRN_SPRG3, r8 | ||
165 | |||
166 | /* Save R1 in the PACA */ | ||
167 | std r1, HSTATE_HOST_R1(r13) | ||
168 | |||
169 | /* Load up DAR and DSISR */ | ||
170 | ld r5, VCPU_DAR(r4) | ||
171 | lwz r6, VCPU_DSISR(r4) | ||
172 | mtspr SPRN_DAR, r5 | ||
173 | mtspr SPRN_DSISR, r6 | ||
174 | |||
175 | /* Set partition DABR */ | ||
176 | li r5,3 | ||
177 | ld r6,VCPU_DABR(r4) | ||
178 | mtspr SPRN_DABRX,r5 | ||
179 | mtspr SPRN_DABR,r6 | ||
180 | |||
181 | /* Restore AMR and UAMOR, set AMOR to all 1s */ | ||
182 | ld r5,VCPU_AMR(r4) | ||
183 | ld r6,VCPU_UAMOR(r4) | ||
184 | li r7,-1 | ||
185 | mtspr SPRN_AMR,r5 | ||
186 | mtspr SPRN_UAMOR,r6 | ||
187 | mtspr SPRN_AMOR,r7 | ||
188 | |||
189 | /* Clear out SLB */ | ||
190 | li r6,0 | ||
191 | slbmte r6,r6 | ||
192 | slbia | ||
193 | ptesync | ||
194 | |||
195 | /* Switch to guest partition. */ | ||
196 | ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ | ||
197 | ld r6,KVM_SDR1(r9) | ||
198 | lwz r7,KVM_LPID(r9) | ||
199 | li r0,LPID_RSVD /* switch to reserved LPID */ | ||
200 | mtspr SPRN_LPID,r0 | ||
201 | ptesync | ||
202 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | ||
203 | mtspr SPRN_LPID,r7 | ||
204 | isync | ||
205 | ld r8,VCPU_LPCR(r4) | ||
206 | mtspr SPRN_LPCR,r8 | ||
207 | isync | ||
208 | |||
209 | /* Check if HDEC expires soon */ | ||
210 | mfspr r3,SPRN_HDEC | ||
211 | cmpwi r3,10 | ||
212 | li r12,BOOK3S_INTERRUPT_HV_DECREMENTER | ||
213 | mr r9,r4 | ||
214 | blt hdec_soon | ||
215 | |||
216 | /* | ||
217 | * Invalidate the TLB if we could possibly have stale TLB | ||
218 | * entries for this partition on this core due to the use | ||
219 | * of tlbiel. | ||
220 | */ | ||
221 | ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ | ||
222 | lwz r5,VCPU_VCPUID(r4) | ||
223 | lhz r6,PACAPACAINDEX(r13) | ||
224 | lhz r8,VCPU_LAST_CPU(r4) | ||
225 | sldi r7,r6,1 /* see if this is the same vcpu */ | ||
226 | add r7,r7,r9 /* as last ran on this pcpu */ | ||
227 | lhz r0,KVM_LAST_VCPU(r7) | ||
228 | cmpw r6,r8 /* on the same cpu core as last time? */ | ||
229 | bne 3f | ||
230 | cmpw r0,r5 /* same vcpu as this core last ran? */ | ||
231 | beq 1f | ||
232 | 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */ | ||
233 | sth r5,KVM_LAST_VCPU(r7) | ||
234 | li r6,128 | ||
235 | mtctr r6 | ||
236 | li r7,0x800 /* IS field = 0b10 */ | ||
237 | ptesync | ||
238 | 2: tlbiel r7 | ||
239 | addi r7,r7,0x1000 | ||
240 | bdnz 2b | ||
241 | ptesync | ||
242 | 1: | ||
243 | |||
244 | /* Save purr/spurr */ | ||
245 | mfspr r5,SPRN_PURR | ||
246 | mfspr r6,SPRN_SPURR | ||
247 | std r5,HSTATE_PURR(r13) | ||
248 | std r6,HSTATE_SPURR(r13) | ||
249 | ld r7,VCPU_PURR(r4) | ||
250 | ld r8,VCPU_SPURR(r4) | ||
251 | mtspr SPRN_PURR,r7 | ||
252 | mtspr SPRN_SPURR,r8 | ||
253 | |||
254 | /* Load up guest SLB entries */ | ||
255 | lwz r5,VCPU_SLB_MAX(r4) | ||
256 | cmpwi r5,0 | ||
257 | beq 9f | ||
258 | mtctr r5 | ||
259 | addi r6,r4,VCPU_SLB | ||
260 | 1: ld r8,VCPU_SLB_E(r6) | ||
261 | ld r9,VCPU_SLB_V(r6) | ||
262 | slbmte r9,r8 | ||
263 | addi r6,r6,VCPU_SLB_SIZE | ||
264 | bdnz 1b | ||
265 | 9: | ||
266 | |||
267 | /* Restore state of CTRL run bit; assume 1 on entry */ | ||
268 | lwz r5,VCPU_CTRL(r4) | ||
269 | andi. r5,r5,1 | ||
270 | bne 4f | ||
271 | mfspr r6,SPRN_CTRLF | ||
272 | clrrdi r6,r6,1 | ||
273 | mtspr SPRN_CTRLT,r6 | ||
274 | 4: | ||
275 | ld r6, VCPU_CTR(r4) | ||
276 | lwz r7, VCPU_XER(r4) | ||
277 | |||
278 | mtctr r6 | ||
279 | mtxer r7 | ||
280 | |||
281 | /* Move SRR0 and SRR1 into the respective regs */ | ||
282 | ld r6, VCPU_SRR0(r4) | ||
283 | ld r7, VCPU_SRR1(r4) | ||
284 | mtspr SPRN_SRR0, r6 | ||
285 | mtspr SPRN_SRR1, r7 | ||
286 | |||
287 | ld r10, VCPU_PC(r4) | ||
288 | |||
289 | ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */ | ||
290 | rldicl r11, r11, 63 - MSR_HV_LG, 1 | ||
291 | rotldi r11, r11, 1 + MSR_HV_LG | ||
292 | ori r11, r11, MSR_ME | ||
293 | |||
294 | fast_guest_return: | ||
295 | mtspr SPRN_HSRR0,r10 | ||
296 | mtspr SPRN_HSRR1,r11 | ||
297 | |||
298 | /* Activate guest mode, so faults get handled by KVM */ | ||
299 | li r9, KVM_GUEST_MODE_GUEST | ||
300 | stb r9, HSTATE_IN_GUEST(r13) | ||
301 | |||
302 | /* Enter guest */ | ||
303 | |||
304 | ld r5, VCPU_LR(r4) | ||
305 | lwz r6, VCPU_CR(r4) | ||
306 | mtlr r5 | ||
307 | mtcr r6 | ||
308 | |||
309 | ld r0, VCPU_GPR(r0)(r4) | ||
310 | ld r1, VCPU_GPR(r1)(r4) | ||
311 | ld r2, VCPU_GPR(r2)(r4) | ||
312 | ld r3, VCPU_GPR(r3)(r4) | ||
313 | ld r5, VCPU_GPR(r5)(r4) | ||
314 | ld r6, VCPU_GPR(r6)(r4) | ||
315 | ld r7, VCPU_GPR(r7)(r4) | ||
316 | ld r8, VCPU_GPR(r8)(r4) | ||
317 | ld r9, VCPU_GPR(r9)(r4) | ||
318 | ld r10, VCPU_GPR(r10)(r4) | ||
319 | ld r11, VCPU_GPR(r11)(r4) | ||
320 | ld r12, VCPU_GPR(r12)(r4) | ||
321 | ld r13, VCPU_GPR(r13)(r4) | ||
322 | |||
323 | ld r4, VCPU_GPR(r4)(r4) | ||
324 | |||
325 | hrfid | ||
326 | b . | ||
327 | |||
328 | /****************************************************************************** | ||
329 | * * | ||
330 | * Exit code * | ||
331 | * * | ||
332 | *****************************************************************************/ | ||
333 | |||
334 | /* | ||
335 | * We come here from the first-level interrupt handlers. | ||
336 | */ | ||
337 | .globl kvmppc_interrupt | ||
338 | kvmppc_interrupt: | ||
339 | /* | ||
340 | * Register contents: | ||
341 | * R12 = interrupt vector | ||
342 | * R13 = PACA | ||
343 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | ||
344 | * guest R13 saved in SPRN_SCRATCH0 | ||
345 | */ | ||
346 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | ||
347 | std r9, HSTATE_HOST_R2(r13) | ||
348 | ld r9, HSTATE_KVM_VCPU(r13) | ||
349 | |||
350 | /* Save registers */ | ||
351 | |||
352 | std r0, VCPU_GPR(r0)(r9) | ||
353 | std r1, VCPU_GPR(r1)(r9) | ||
354 | std r2, VCPU_GPR(r2)(r9) | ||
355 | std r3, VCPU_GPR(r3)(r9) | ||
356 | std r4, VCPU_GPR(r4)(r9) | ||
357 | std r5, VCPU_GPR(r5)(r9) | ||
358 | std r6, VCPU_GPR(r6)(r9) | ||
359 | std r7, VCPU_GPR(r7)(r9) | ||
360 | std r8, VCPU_GPR(r8)(r9) | ||
361 | ld r0, HSTATE_HOST_R2(r13) | ||
362 | std r0, VCPU_GPR(r9)(r9) | ||
363 | std r10, VCPU_GPR(r10)(r9) | ||
364 | std r11, VCPU_GPR(r11)(r9) | ||
365 | ld r3, HSTATE_SCRATCH0(r13) | ||
366 | lwz r4, HSTATE_SCRATCH1(r13) | ||
367 | std r3, VCPU_GPR(r12)(r9) | ||
368 | stw r4, VCPU_CR(r9) | ||
369 | |||
370 | /* Restore R1/R2 so we can handle faults */ | ||
371 | ld r1, HSTATE_HOST_R1(r13) | ||
372 | ld r2, PACATOC(r13) | ||
373 | |||
374 | mfspr r10, SPRN_SRR0 | ||
375 | mfspr r11, SPRN_SRR1 | ||
376 | std r10, VCPU_SRR0(r9) | ||
377 | std r11, VCPU_SRR1(r9) | ||
378 | andi. r0, r12, 2 /* need to read HSRR0/1? */ | ||
379 | beq 1f | ||
380 | mfspr r10, SPRN_HSRR0 | ||
381 | mfspr r11, SPRN_HSRR1 | ||
382 | clrrdi r12, r12, 2 | ||
383 | 1: std r10, VCPU_PC(r9) | ||
384 | std r11, VCPU_MSR(r9) | ||
385 | |||
386 | GET_SCRATCH0(r3) | ||
387 | mflr r4 | ||
388 | std r3, VCPU_GPR(r13)(r9) | ||
389 | std r4, VCPU_LR(r9) | ||
390 | |||
391 | /* Unset guest mode */ | ||
392 | li r0, KVM_GUEST_MODE_NONE | ||
393 | stb r0, HSTATE_IN_GUEST(r13) | ||
394 | |||
395 | stw r12,VCPU_TRAP(r9) | ||
396 | |||
397 | /* See if this is a leftover HDEC interrupt */ | ||
398 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER | ||
399 | bne 2f | ||
400 | mfspr r3,SPRN_HDEC | ||
401 | cmpwi r3,0 | ||
402 | bge ignore_hdec | ||
403 | 2: | ||
404 | |||
405 | /* Check for mediated interrupts (could be done earlier really ...) */ | ||
406 | cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL | ||
407 | bne+ 1f | ||
408 | ld r5,VCPU_LPCR(r9) | ||
409 | andi. r0,r11,MSR_EE | ||
410 | beq 1f | ||
411 | andi. r0,r5,LPCR_MER | ||
412 | bne bounce_ext_interrupt | ||
413 | 1: | ||
414 | |||
415 | /* Save DEC */ | ||
416 | mfspr r5,SPRN_DEC | ||
417 | mftb r6 | ||
418 | extsw r5,r5 | ||
419 | add r5,r5,r6 | ||
420 | std r5,VCPU_DEC_EXPIRES(r9) | ||
421 | |||
422 | /* Save HEIR (HV emulation assist reg) in last_inst | ||
423 | if this is an HEI (HV emulation interrupt, e40) */ | ||
424 | li r3,-1 | ||
425 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST | ||
426 | bne 11f | ||
427 | mfspr r3,SPRN_HEIR | ||
428 | 11: stw r3,VCPU_LAST_INST(r9) | ||
429 | |||
430 | /* Save more register state */ | ||
431 | mfxer r5 | ||
432 | mfdar r6 | ||
433 | mfdsisr r7 | ||
434 | mfctr r8 | ||
435 | |||
436 | stw r5, VCPU_XER(r9) | ||
437 | std r6, VCPU_DAR(r9) | ||
438 | stw r7, VCPU_DSISR(r9) | ||
439 | std r8, VCPU_CTR(r9) | ||
440 | /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ | ||
441 | cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | ||
442 | beq 6f | ||
443 | 7: std r6, VCPU_FAULT_DAR(r9) | ||
444 | stw r7, VCPU_FAULT_DSISR(r9) | ||
445 | |||
446 | /* Save guest CTRL register, set runlatch to 1 */ | ||
447 | mfspr r6,SPRN_CTRLF | ||
448 | stw r6,VCPU_CTRL(r9) | ||
449 | andi. r0,r6,1 | ||
450 | bne 4f | ||
451 | ori r6,r6,1 | ||
452 | mtspr SPRN_CTRLT,r6 | ||
453 | 4: | ||
454 | /* Read the guest SLB and save it away */ | ||
455 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ | ||
456 | mtctr r0 | ||
457 | li r6,0 | ||
458 | addi r7,r9,VCPU_SLB | ||
459 | li r5,0 | ||
460 | 1: slbmfee r8,r6 | ||
461 | andis. r0,r8,SLB_ESID_V@h | ||
462 | beq 2f | ||
463 | add r8,r8,r6 /* put index in */ | ||
464 | slbmfev r3,r6 | ||
465 | std r8,VCPU_SLB_E(r7) | ||
466 | std r3,VCPU_SLB_V(r7) | ||
467 | addi r7,r7,VCPU_SLB_SIZE | ||
468 | addi r5,r5,1 | ||
469 | 2: addi r6,r6,1 | ||
470 | bdnz 1b | ||
471 | stw r5,VCPU_SLB_MAX(r9) | ||
472 | |||
473 | /* | ||
474 | * Save the guest PURR/SPURR | ||
475 | */ | ||
476 | mfspr r5,SPRN_PURR | ||
477 | mfspr r6,SPRN_SPURR | ||
478 | ld r7,VCPU_PURR(r9) | ||
479 | ld r8,VCPU_SPURR(r9) | ||
480 | std r5,VCPU_PURR(r9) | ||
481 | std r6,VCPU_SPURR(r9) | ||
482 | subf r5,r7,r5 | ||
483 | subf r6,r8,r6 | ||
484 | |||
485 | /* | ||
486 | * Restore host PURR/SPURR and add guest times | ||
487 | * so that the time in the guest gets accounted. | ||
488 | */ | ||
489 | ld r3,HSTATE_PURR(r13) | ||
490 | ld r4,HSTATE_SPURR(r13) | ||
491 | add r3,r3,r5 | ||
492 | add r4,r4,r6 | ||
493 | mtspr SPRN_PURR,r3 | ||
494 | mtspr SPRN_SPURR,r4 | ||
495 | |||
496 | /* Clear out SLB */ | ||
497 | li r5,0 | ||
498 | slbmte r5,r5 | ||
499 | slbia | ||
500 | ptesync | ||
501 | |||
502 | hdec_soon: | ||
503 | /* Switch back to host partition */ | ||
504 | ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ | ||
505 | ld r6,KVM_HOST_SDR1(r4) | ||
506 | lwz r7,KVM_HOST_LPID(r4) | ||
507 | li r8,LPID_RSVD /* switch to reserved LPID */ | ||
508 | mtspr SPRN_LPID,r8 | ||
509 | ptesync | ||
510 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | ||
511 | mtspr SPRN_LPID,r7 | ||
512 | isync | ||
513 | lis r8,0x7fff /* MAX_INT@h */ | ||
514 | mtspr SPRN_HDEC,r8 | ||
515 | |||
516 | ld r8,KVM_HOST_LPCR(r4) | ||
517 | mtspr SPRN_LPCR,r8 | ||
518 | isync | ||
519 | |||
520 | /* load host SLB entries */ | ||
521 | ld r8,PACA_SLBSHADOWPTR(r13) | ||
522 | |||
523 | .rept SLB_NUM_BOLTED | ||
524 | ld r5,SLBSHADOW_SAVEAREA(r8) | ||
525 | ld r6,SLBSHADOW_SAVEAREA+8(r8) | ||
526 | andis. r7,r5,SLB_ESID_V@h | ||
527 | beq 1f | ||
528 | slbmte r6,r5 | ||
529 | 1: addi r8,r8,16 | ||
530 | .endr | ||
531 | |||
532 | /* Save and reset AMR and UAMOR before turning on the MMU */ | ||
533 | mfspr r5,SPRN_AMR | ||
534 | mfspr r6,SPRN_UAMOR | ||
535 | std r5,VCPU_AMR(r9) | ||
536 | std r6,VCPU_UAMOR(r9) | ||
537 | li r6,0 | ||
538 | mtspr SPRN_AMR,r6 | ||
539 | |||
540 | /* Restore host DABR and DABRX */ | ||
541 | ld r5,HSTATE_DABR(r13) | ||
542 | li r6,7 | ||
543 | mtspr SPRN_DABR,r5 | ||
544 | mtspr SPRN_DABRX,r6 | ||
545 | |||
546 | /* Switch DSCR back to host value */ | ||
547 | mfspr r8, SPRN_DSCR | ||
548 | ld r7, HSTATE_DSCR(r13) | ||
549 | std r8, VCPU_DSCR(r7) | ||
550 | mtspr SPRN_DSCR, r7 | ||
551 | |||
552 | /* Save non-volatile GPRs */ | ||
553 | std r14, VCPU_GPR(r14)(r9) | ||
554 | std r15, VCPU_GPR(r15)(r9) | ||
555 | std r16, VCPU_GPR(r16)(r9) | ||
556 | std r17, VCPU_GPR(r17)(r9) | ||
557 | std r18, VCPU_GPR(r18)(r9) | ||
558 | std r19, VCPU_GPR(r19)(r9) | ||
559 | std r20, VCPU_GPR(r20)(r9) | ||
560 | std r21, VCPU_GPR(r21)(r9) | ||
561 | std r22, VCPU_GPR(r22)(r9) | ||
562 | std r23, VCPU_GPR(r23)(r9) | ||
563 | std r24, VCPU_GPR(r24)(r9) | ||
564 | std r25, VCPU_GPR(r25)(r9) | ||
565 | std r26, VCPU_GPR(r26)(r9) | ||
566 | std r27, VCPU_GPR(r27)(r9) | ||
567 | std r28, VCPU_GPR(r28)(r9) | ||
568 | std r29, VCPU_GPR(r29)(r9) | ||
569 | std r30, VCPU_GPR(r30)(r9) | ||
570 | std r31, VCPU_GPR(r31)(r9) | ||
571 | |||
572 | /* Save SPRGs */ | ||
573 | mfspr r3, SPRN_SPRG0 | ||
574 | mfspr r4, SPRN_SPRG1 | ||
575 | mfspr r5, SPRN_SPRG2 | ||
576 | mfspr r6, SPRN_SPRG3 | ||
577 | std r3, VCPU_SPRG0(r9) | ||
578 | std r4, VCPU_SPRG1(r9) | ||
579 | std r5, VCPU_SPRG2(r9) | ||
580 | std r6, VCPU_SPRG3(r9) | ||
581 | |||
582 | /* Save PMU registers */ | ||
583 | li r3, 1 | ||
584 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | ||
585 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | ||
586 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | ||
587 | isync | ||
588 | mfspr r5, SPRN_MMCR1 | ||
589 | mfspr r6, SPRN_MMCRA | ||
590 | std r4, VCPU_MMCR(r9) | ||
591 | std r5, VCPU_MMCR + 8(r9) | ||
592 | std r6, VCPU_MMCR + 16(r9) | ||
593 | mfspr r3, SPRN_PMC1 | ||
594 | mfspr r4, SPRN_PMC2 | ||
595 | mfspr r5, SPRN_PMC3 | ||
596 | mfspr r6, SPRN_PMC4 | ||
597 | mfspr r7, SPRN_PMC5 | ||
598 | mfspr r8, SPRN_PMC6 | ||
599 | stw r3, VCPU_PMC(r9) | ||
600 | stw r4, VCPU_PMC + 4(r9) | ||
601 | stw r5, VCPU_PMC + 8(r9) | ||
602 | stw r6, VCPU_PMC + 12(r9) | ||
603 | stw r7, VCPU_PMC + 16(r9) | ||
604 | stw r8, VCPU_PMC + 20(r9) | ||
605 | 22: | ||
606 | /* save FP state */ | ||
607 | mr r3, r9 | ||
608 | bl .kvmppc_save_fp | ||
609 | |||
610 | /* | ||
611 | * Reload DEC. HDEC interrupts were disabled when | ||
612 | * we reloaded the host's LPCR value. | ||
613 | */ | ||
614 | ld r3, HSTATE_DECEXP(r13) | ||
615 | mftb r4 | ||
616 | subf r4, r4, r3 | ||
617 | mtspr SPRN_DEC, r4 | ||
618 | |||
619 | /* Reload the host's PMU registers */ | ||
620 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | ||
621 | lbz r4, LPPACA_PMCINUSE(r3) | ||
622 | cmpwi r4, 0 | ||
623 | beq 23f /* skip if not */ | ||
624 | lwz r3, HSTATE_PMC(r13) | ||
625 | lwz r4, HSTATE_PMC + 4(r13) | ||
626 | lwz r5, HSTATE_PMC + 8(r13) | ||
627 | lwz r6, HSTATE_PMC + 12(r13) | ||
628 | lwz r8, HSTATE_PMC + 16(r13) | ||
629 | lwz r9, HSTATE_PMC + 20(r13) | ||
630 | mtspr SPRN_PMC1, r3 | ||
631 | mtspr SPRN_PMC2, r4 | ||
632 | mtspr SPRN_PMC3, r5 | ||
633 | mtspr SPRN_PMC4, r6 | ||
634 | mtspr SPRN_PMC5, r8 | ||
635 | mtspr SPRN_PMC6, r9 | ||
636 | ld r3, HSTATE_MMCR(r13) | ||
637 | ld r4, HSTATE_MMCR + 8(r13) | ||
638 | ld r5, HSTATE_MMCR + 16(r13) | ||
639 | mtspr SPRN_MMCR1, r4 | ||
640 | mtspr SPRN_MMCRA, r5 | ||
641 | mtspr SPRN_MMCR0, r3 | ||
642 | isync | ||
643 | 23: | ||
644 | /* | ||
645 | * For external and machine check interrupts, we need | ||
646 | * to call the Linux handler to process the interrupt. | ||
647 | * We do that by jumping to the interrupt vector address | ||
648 | * which we have in r12. The [h]rfid at the end of the | ||
649 | * handler will return to the book3s_hv_interrupts.S code. | ||
650 | * For other interrupts we do the rfid to get back | ||
651 | * to the book3s_interrupts.S code here. | ||
652 | */ | ||
653 | ld r8, HSTATE_VMHANDLER(r13) | ||
654 | ld r7, HSTATE_HOST_MSR(r13) | ||
655 | |||
656 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | ||
657 | beq 11f | ||
658 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK | ||
659 | |||
660 | /* RFI into the highmem handler, or branch to interrupt handler */ | ||
661 | mfmsr r6 | ||
662 | mtctr r12 | ||
663 | li r0, MSR_RI | ||
664 | andc r6, r6, r0 | ||
665 | mtmsrd r6, 1 /* Clear RI in MSR */ | ||
666 | mtsrr0 r8 | ||
667 | mtsrr1 r7 | ||
668 | beqctr | ||
669 | RFI | ||
670 | |||
671 | 11: mtspr SPRN_HSRR0, r8 | ||
672 | mtspr SPRN_HSRR1, r7 | ||
673 | ba 0x500 | ||
674 | |||
675 | 6: mfspr r6,SPRN_HDAR | ||
676 | mfspr r7,SPRN_HDSISR | ||
677 | b 7b | ||
678 | |||
679 | ignore_hdec: | ||
680 | mr r4,r9 | ||
681 | b fast_guest_return | ||
682 | |||
683 | bounce_ext_interrupt: | ||
684 | mr r4,r9 | ||
685 | mtspr SPRN_SRR0,r10 | ||
686 | mtspr SPRN_SRR1,r11 | ||
687 | li r10,BOOK3S_INTERRUPT_EXTERNAL | ||
688 | LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME); | ||
689 | b fast_guest_return | ||
690 | |||
691 | /* | ||
692 | * Save away FP, VMX and VSX registers. | ||
693 | * r3 = vcpu pointer | ||
694 | */ | ||
695 | _GLOBAL(kvmppc_save_fp) | ||
696 | mfmsr r9 | ||
697 | ori r8,r9,MSR_FP | ||
698 | #ifdef CONFIG_ALTIVEC | ||
699 | BEGIN_FTR_SECTION | ||
700 | oris r8,r8,MSR_VEC@h | ||
701 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
702 | #endif | ||
703 | #ifdef CONFIG_VSX | ||
704 | BEGIN_FTR_SECTION | ||
705 | oris r8,r8,MSR_VSX@h | ||
706 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
707 | #endif | ||
708 | mtmsrd r8 | ||
709 | isync | ||
710 | #ifdef CONFIG_VSX | ||
711 | BEGIN_FTR_SECTION | ||
712 | reg = 0 | ||
713 | .rept 32 | ||
714 | li r6,reg*16+VCPU_VSRS | ||
715 | stxvd2x reg,r6,r3 | ||
716 | reg = reg + 1 | ||
717 | .endr | ||
718 | FTR_SECTION_ELSE | ||
719 | #endif | ||
720 | reg = 0 | ||
721 | .rept 32 | ||
722 | stfd reg,reg*8+VCPU_FPRS(r3) | ||
723 | reg = reg + 1 | ||
724 | .endr | ||
725 | #ifdef CONFIG_VSX | ||
726 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | ||
727 | #endif | ||
728 | mffs fr0 | ||
729 | stfd fr0,VCPU_FPSCR(r3) | ||
730 | |||
731 | #ifdef CONFIG_ALTIVEC | ||
732 | BEGIN_FTR_SECTION | ||
733 | reg = 0 | ||
734 | .rept 32 | ||
735 | li r6,reg*16+VCPU_VRS | ||
736 | stvx reg,r6,r3 | ||
737 | reg = reg + 1 | ||
738 | .endr | ||
739 | mfvscr vr0 | ||
740 | li r6,VCPU_VSCR | ||
741 | stvx vr0,r6,r3 | ||
742 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
743 | #endif | ||
744 | mfspr r6,SPRN_VRSAVE | ||
745 | stw r6,VCPU_VRSAVE(r3) | ||
746 | mtmsrd r9 | ||
747 | isync | ||
748 | blr | ||
749 | |||
750 | /* | ||
751 | * Load up FP, VMX and VSX registers | ||
752 | * r4 = vcpu pointer | ||
753 | */ | ||
754 | .globl kvmppc_load_fp | ||
755 | kvmppc_load_fp: | ||
756 | mfmsr r9 | ||
757 | ori r8,r9,MSR_FP | ||
758 | #ifdef CONFIG_ALTIVEC | ||
759 | BEGIN_FTR_SECTION | ||
760 | oris r8,r8,MSR_VEC@h | ||
761 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
762 | #endif | ||
763 | #ifdef CONFIG_VSX | ||
764 | BEGIN_FTR_SECTION | ||
765 | oris r8,r8,MSR_VSX@h | ||
766 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
767 | #endif | ||
768 | mtmsrd r8 | ||
769 | isync | ||
770 | lfd fr0,VCPU_FPSCR(r4) | ||
771 | MTFSF_L(fr0) | ||
772 | #ifdef CONFIG_VSX | ||
773 | BEGIN_FTR_SECTION | ||
774 | reg = 0 | ||
775 | .rept 32 | ||
776 | li r7,reg*16+VCPU_VSRS | ||
777 | lxvd2x reg,r7,r4 | ||
778 | reg = reg + 1 | ||
779 | .endr | ||
780 | FTR_SECTION_ELSE | ||
781 | #endif | ||
782 | reg = 0 | ||
783 | .rept 32 | ||
784 | lfd reg,reg*8+VCPU_FPRS(r4) | ||
785 | reg = reg + 1 | ||
786 | .endr | ||
787 | #ifdef CONFIG_VSX | ||
788 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | ||
789 | #endif | ||
790 | |||
791 | #ifdef CONFIG_ALTIVEC | ||
792 | BEGIN_FTR_SECTION | ||
793 | li r7,VCPU_VSCR | ||
794 | lvx vr0,r7,r4 | ||
795 | mtvscr vr0 | ||
796 | reg = 0 | ||
797 | .rept 32 | ||
798 | li r7,reg*16+VCPU_VRS | ||
799 | lvx reg,r7,r4 | ||
800 | reg = reg + 1 | ||
801 | .endr | ||
802 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
803 | #endif | ||
804 | lwz r7,VCPU_VRSAVE(r4) | ||
805 | mtspr SPRN_VRSAVE,r7 | ||
806 | blr | ||
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 1cc25e8c0cf1..134501691ad0 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -89,29 +89,29 @@ kvmppc_handler_trampoline_enter: | |||
89 | 89 | ||
90 | /* Enter guest */ | 90 | /* Enter guest */ |
91 | 91 | ||
92 | PPC_LL r4, (SVCPU_CTR)(r3) | 92 | PPC_LL r4, SVCPU_CTR(r3) |
93 | PPC_LL r5, (SVCPU_LR)(r3) | 93 | PPC_LL r5, SVCPU_LR(r3) |
94 | lwz r6, (SVCPU_CR)(r3) | 94 | lwz r6, SVCPU_CR(r3) |
95 | lwz r7, (SVCPU_XER)(r3) | 95 | lwz r7, SVCPU_XER(r3) |
96 | 96 | ||
97 | mtctr r4 | 97 | mtctr r4 |
98 | mtlr r5 | 98 | mtlr r5 |
99 | mtcr r6 | 99 | mtcr r6 |
100 | mtxer r7 | 100 | mtxer r7 |
101 | 101 | ||
102 | PPC_LL r0, (SVCPU_R0)(r3) | 102 | PPC_LL r0, SVCPU_R0(r3) |
103 | PPC_LL r1, (SVCPU_R1)(r3) | 103 | PPC_LL r1, SVCPU_R1(r3) |
104 | PPC_LL r2, (SVCPU_R2)(r3) | 104 | PPC_LL r2, SVCPU_R2(r3) |
105 | PPC_LL r4, (SVCPU_R4)(r3) | 105 | PPC_LL r4, SVCPU_R4(r3) |
106 | PPC_LL r5, (SVCPU_R5)(r3) | 106 | PPC_LL r5, SVCPU_R5(r3) |
107 | PPC_LL r6, (SVCPU_R6)(r3) | 107 | PPC_LL r6, SVCPU_R6(r3) |
108 | PPC_LL r7, (SVCPU_R7)(r3) | 108 | PPC_LL r7, SVCPU_R7(r3) |
109 | PPC_LL r8, (SVCPU_R8)(r3) | 109 | PPC_LL r8, SVCPU_R8(r3) |
110 | PPC_LL r9, (SVCPU_R9)(r3) | 110 | PPC_LL r9, SVCPU_R9(r3) |
111 | PPC_LL r10, (SVCPU_R10)(r3) | 111 | PPC_LL r10, SVCPU_R10(r3) |
112 | PPC_LL r11, (SVCPU_R11)(r3) | 112 | PPC_LL r11, SVCPU_R11(r3) |
113 | PPC_LL r12, (SVCPU_R12)(r3) | 113 | PPC_LL r12, SVCPU_R12(r3) |
114 | PPC_LL r13, (SVCPU_R13)(r3) | 114 | PPC_LL r13, SVCPU_R13(r3) |
115 | 115 | ||
116 | PPC_LL r3, (SVCPU_R3)(r3) | 116 | PPC_LL r3, (SVCPU_R3)(r3) |
117 | 117 | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 026036efcde0..3a4f379ee70f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -38,8 +38,12 @@ | |||
38 | 38 | ||
39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
40 | { | 40 | { |
41 | #ifndef CONFIG_KVM_BOOK3S_64_HV | ||
41 | return !(v->arch.shared->msr & MSR_WE) || | 42 | return !(v->arch.shared->msr & MSR_WE) || |
42 | !!(v->arch.pending_exceptions); | 43 | !!(v->arch.pending_exceptions); |
44 | #else | ||
45 | return 1; | ||
46 | #endif | ||
43 | } | 47 | } |
44 | 48 | ||
45 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | 49 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
@@ -184,10 +188,13 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
184 | #else | 188 | #else |
185 | case KVM_CAP_PPC_SEGSTATE: | 189 | case KVM_CAP_PPC_SEGSTATE: |
186 | #endif | 190 | #endif |
187 | case KVM_CAP_PPC_PAIRED_SINGLES: | ||
188 | case KVM_CAP_PPC_UNSET_IRQ: | 191 | case KVM_CAP_PPC_UNSET_IRQ: |
189 | case KVM_CAP_PPC_IRQ_LEVEL: | 192 | case KVM_CAP_PPC_IRQ_LEVEL: |
190 | case KVM_CAP_ENABLE_CAP: | 193 | case KVM_CAP_ENABLE_CAP: |
194 | r = 1; | ||
195 | break; | ||
196 | #ifndef CONFIG_KVM_BOOK3S_64_HV | ||
197 | case KVM_CAP_PPC_PAIRED_SINGLES: | ||
191 | case KVM_CAP_PPC_OSI: | 198 | case KVM_CAP_PPC_OSI: |
192 | case KVM_CAP_PPC_GET_PVINFO: | 199 | case KVM_CAP_PPC_GET_PVINFO: |
193 | r = 1; | 200 | r = 1; |
@@ -195,6 +202,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
195 | case KVM_CAP_COALESCED_MMIO: | 202 | case KVM_CAP_COALESCED_MMIO: |
196 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 203 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
197 | break; | 204 | break; |
205 | #endif | ||
198 | default: | 206 | default: |
199 | r = 0; | 207 | r = 0; |
200 | break; | 208 | break; |
@@ -291,6 +299,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
291 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 299 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
292 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); | 300 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); |
293 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | 301 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
302 | vcpu->arch.dec_expires = ~(u64)0; | ||
294 | 303 | ||
295 | #ifdef CONFIG_KVM_EXIT_TIMING | 304 | #ifdef CONFIG_KVM_EXIT_TIMING |
296 | mutex_init(&vcpu->arch.exit_timing_lock); | 305 | mutex_init(&vcpu->arch.exit_timing_lock); |
@@ -317,6 +326,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
317 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | 326 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); |
318 | #endif | 327 | #endif |
319 | kvmppc_core_vcpu_load(vcpu, cpu); | 328 | kvmppc_core_vcpu_load(vcpu, cpu); |
329 | vcpu->cpu = smp_processor_id(); | ||
320 | } | 330 | } |
321 | 331 | ||
322 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 332 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
@@ -325,6 +335,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
325 | #ifdef CONFIG_BOOKE | 335 | #ifdef CONFIG_BOOKE |
326 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | 336 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); |
327 | #endif | 337 | #endif |
338 | vcpu->cpu = -1; | ||
328 | } | 339 | } |
329 | 340 | ||
330 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 341 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
@@ -496,6 +507,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
496 | for (i = 0; i < 32; i++) | 507 | for (i = 0; i < 32; i++) |
497 | kvmppc_set_gpr(vcpu, i, gprs[i]); | 508 | kvmppc_set_gpr(vcpu, i, gprs[i]); |
498 | vcpu->arch.osi_needed = 0; | 509 | vcpu->arch.osi_needed = 0; |
510 | } else if (vcpu->arch.hcall_needed) { | ||
511 | int i; | ||
512 | |||
513 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | ||
514 | for (i = 0; i < 9; ++i) | ||
515 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | ||
516 | vcpu->arch.hcall_needed = 0; | ||
499 | } | 517 | } |
500 | 518 | ||
501 | kvmppc_core_deliver_interrupts(vcpu); | 519 | kvmppc_core_deliver_interrupts(vcpu); |
@@ -518,6 +536,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |||
518 | if (waitqueue_active(&vcpu->wq)) { | 536 | if (waitqueue_active(&vcpu->wq)) { |
519 | wake_up_interruptible(&vcpu->wq); | 537 | wake_up_interruptible(&vcpu->wq); |
520 | vcpu->stat.halt_wakeup++; | 538 | vcpu->stat.halt_wakeup++; |
539 | } else if (vcpu->cpu != -1) { | ||
540 | smp_send_reschedule(vcpu->cpu); | ||
521 | } | 541 | } |
522 | 542 | ||
523 | return 0; | 543 | return 0; |
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index d62a14b2cd0f..b135d3d397db 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h | |||
@@ -103,7 +103,7 @@ TRACE_EVENT(kvm_gtlb_write, | |||
103 | * Book3S trace points * | 103 | * Book3S trace points * |
104 | *************************************************************************/ | 104 | *************************************************************************/ |
105 | 105 | ||
106 | #ifdef CONFIG_PPC_BOOK3S | 106 | #ifdef CONFIG_KVM_BOOK3S_PR |
107 | 107 | ||
108 | TRACE_EVENT(kvm_book3s_exit, | 108 | TRACE_EVENT(kvm_book3s_exit, |
109 | TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), | 109 | TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 9c9ca7c7b491..a156294fc22a 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -161,6 +161,7 @@ struct kvm_pit_config { | |||
161 | #define KVM_EXIT_NMI 16 | 161 | #define KVM_EXIT_NMI 16 |
162 | #define KVM_EXIT_INTERNAL_ERROR 17 | 162 | #define KVM_EXIT_INTERNAL_ERROR 17 |
163 | #define KVM_EXIT_OSI 18 | 163 | #define KVM_EXIT_OSI 18 |
164 | #define KVM_EXIT_PAPR_HCALL 19 | ||
164 | 165 | ||
165 | /* For KVM_EXIT_INTERNAL_ERROR */ | 166 | /* For KVM_EXIT_INTERNAL_ERROR */ |
166 | #define KVM_INTERNAL_ERROR_EMULATION 1 | 167 | #define KVM_INTERNAL_ERROR_EMULATION 1 |
@@ -264,6 +265,11 @@ struct kvm_run { | |||
264 | struct { | 265 | struct { |
265 | __u64 gprs[32]; | 266 | __u64 gprs[32]; |
266 | } osi; | 267 | } osi; |
268 | struct { | ||
269 | __u64 nr; | ||
270 | __u64 ret; | ||
271 | __u64 args[9]; | ||
272 | } papr_hcall; | ||
267 | /* Fix the size of the union. */ | 273 | /* Fix the size of the union. */ |
268 | char padding[256]; | 274 | char padding[256]; |
269 | }; | 275 | }; |