diff options
author | Scott Wood <scottwood@freescale.com> | 2011-12-20 10:34:43 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-04-08 05:51:19 -0400 |
commit | d30f6e480055e5be12e7a03fd11ea912a451daa5 (patch) | |
tree | e6c367e6f1da4da67b3a395a1a735a09e52067c0 /arch/powerpc/kvm | |
parent | cfac57847a67c4903f34a77e971521531bbc7c77 (diff) |
KVM: PPC: booke: category E.HV (GS-mode) support
Chips such as e500mc that implement category E.HV in Power ISA 2.06
provide hardware virtualization features, including a new MSR mode for
guest state. The guest OS can perform many operations without trapping
into the hypervisor, including transitions to and from guest userspace.
Since we can use SRR1[GS] to reliably tell whether an exception came from
guest state, instead of messing around with IVPR, we use DO_KVM similarly
to book3s.
Current issues include:
- Machine checks from guest state are not routed to the host handler.
- The guest can cause a host oops by executing an emulated instruction
in a page that lacks read permission. Existing e500/4xx support has
the same problem.
Includes work by Ashish Kalra <Ashish.Kalra@freescale.com>,
Varun Sethi <Varun.Sethi@freescale.com>, and
Liu Yu <yu.liu@freescale.com>.
Signed-off-by: Scott Wood <scottwood@freescale.com>
[agraf: remove pt_regs usage]
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/Kconfig | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 309 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.h | 24 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_emulate.c | 23 | ||||
-rw-r--r-- | arch/powerpc/kvm/bookehv_interrupts.S | 587 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kvm/timing.h | 6 |
7 files changed, 897 insertions, 60 deletions
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 8f64709ae331..2c33cd336434 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR | |||
90 | depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV | 90 | depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV |
91 | select KVM_BOOK3S_PR | 91 | select KVM_BOOK3S_PR |
92 | 92 | ||
93 | config KVM_BOOKE_HV | ||
94 | bool | ||
95 | |||
93 | config KVM_440 | 96 | config KVM_440 |
94 | bool "KVM support for PowerPC 440 processors" | 97 | bool "KVM support for PowerPC 440 processors" |
95 | depends on EXPERIMENTAL && 44x | 98 | depends on EXPERIMENTAL && 44x |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 2ee9bae38328..75dbaeb2efa3 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -17,6 +17,8 @@ | |||
17 | * | 17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | 19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
20 | * Scott Wood <scottwood@freescale.com> | ||
21 | * Varun Sethi <varun.sethi@freescale.com> | ||
20 | */ | 22 | */ |
21 | 23 | ||
22 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
@@ -30,9 +32,12 @@ | |||
30 | #include <asm/cputable.h> | 32 | #include <asm/cputable.h> |
31 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
32 | #include <asm/kvm_ppc.h> | 34 | #include <asm/kvm_ppc.h> |
33 | #include "timing.h" | ||
34 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
36 | #include <asm/dbell.h> | ||
37 | #include <asm/hw_irq.h> | ||
38 | #include <asm/irq.h> | ||
35 | 39 | ||
40 | #include "timing.h" | ||
36 | #include "booke.h" | 41 | #include "booke.h" |
37 | 42 | ||
38 | unsigned long kvmppc_booke_handlers; | 43 | unsigned long kvmppc_booke_handlers; |
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
55 | { "dec", VCPU_STAT(dec_exits) }, | 60 | { "dec", VCPU_STAT(dec_exits) }, |
56 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | 61 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
57 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 62 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
63 | { "doorbell", VCPU_STAT(dbell_exits) }, | ||
64 | { "guest doorbell", VCPU_STAT(gdbell_exits) }, | ||
58 | { NULL } | 65 | { NULL } |
59 | }; | 66 | }; |
60 | 67 | ||
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | |||
121 | { | 128 | { |
122 | u32 old_msr = vcpu->arch.shared->msr; | 129 | u32 old_msr = vcpu->arch.shared->msr; |
123 | 130 | ||
131 | #ifdef CONFIG_KVM_BOOKE_HV | ||
132 | new_msr |= MSR_GS; | ||
133 | #endif | ||
134 | |||
124 | vcpu->arch.shared->msr = new_msr; | 135 | vcpu->arch.shared->msr = new_msr; |
125 | 136 | ||
126 | kvmppc_mmu_msr_notify(vcpu, old_msr); | 137 | kvmppc_mmu_msr_notify(vcpu, old_msr); |
@@ -195,6 +206,75 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | |||
195 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); | 206 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); |
196 | } | 207 | } |
197 | 208 | ||
209 | static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
210 | { | ||
211 | #ifdef CONFIG_KVM_BOOKE_HV | ||
212 | mtspr(SPRN_GSRR0, srr0); | ||
213 | mtspr(SPRN_GSRR1, srr1); | ||
214 | #else | ||
215 | vcpu->arch.shared->srr0 = srr0; | ||
216 | vcpu->arch.shared->srr1 = srr1; | ||
217 | #endif | ||
218 | } | ||
219 | |||
220 | static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
221 | { | ||
222 | vcpu->arch.csrr0 = srr0; | ||
223 | vcpu->arch.csrr1 = srr1; | ||
224 | } | ||
225 | |||
226 | static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
227 | { | ||
228 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { | ||
229 | vcpu->arch.dsrr0 = srr0; | ||
230 | vcpu->arch.dsrr1 = srr1; | ||
231 | } else { | ||
232 | set_guest_csrr(vcpu, srr0, srr1); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
237 | { | ||
238 | vcpu->arch.mcsrr0 = srr0; | ||
239 | vcpu->arch.mcsrr1 = srr1; | ||
240 | } | ||
241 | |||
242 | static unsigned long get_guest_dear(struct kvm_vcpu *vcpu) | ||
243 | { | ||
244 | #ifdef CONFIG_KVM_BOOKE_HV | ||
245 | return mfspr(SPRN_GDEAR); | ||
246 | #else | ||
247 | return vcpu->arch.shared->dar; | ||
248 | #endif | ||
249 | } | ||
250 | |||
251 | static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear) | ||
252 | { | ||
253 | #ifdef CONFIG_KVM_BOOKE_HV | ||
254 | mtspr(SPRN_GDEAR, dear); | ||
255 | #else | ||
256 | vcpu->arch.shared->dar = dear; | ||
257 | #endif | ||
258 | } | ||
259 | |||
260 | static unsigned long get_guest_esr(struct kvm_vcpu *vcpu) | ||
261 | { | ||
262 | #ifdef CONFIG_KVM_BOOKE_HV | ||
263 | return mfspr(SPRN_GESR); | ||
264 | #else | ||
265 | return vcpu->arch.shared->esr; | ||
266 | #endif | ||
267 | } | ||
268 | |||
269 | static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) | ||
270 | { | ||
271 | #ifdef CONFIG_KVM_BOOKE_HV | ||
272 | mtspr(SPRN_GESR, esr); | ||
273 | #else | ||
274 | vcpu->arch.shared->esr = esr; | ||
275 | #endif | ||
276 | } | ||
277 | |||
198 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 278 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
199 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | 279 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, |
200 | unsigned int priority) | 280 | unsigned int priority) |
@@ -206,6 +286,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
206 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | 286 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); |
207 | bool crit; | 287 | bool crit; |
208 | bool keep_irq = false; | 288 | bool keep_irq = false; |
289 | enum int_class int_class; | ||
209 | 290 | ||
210 | /* Truncate crit indicators in 32 bit mode */ | 291 | /* Truncate crit indicators in 32 bit mode */ |
211 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | 292 | if (!(vcpu->arch.shared->msr & MSR_SF)) { |
@@ -241,16 +322,20 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
241 | case BOOKE_IRQPRIO_AP_UNAVAIL: | 322 | case BOOKE_IRQPRIO_AP_UNAVAIL: |
242 | case BOOKE_IRQPRIO_ALIGNMENT: | 323 | case BOOKE_IRQPRIO_ALIGNMENT: |
243 | allowed = 1; | 324 | allowed = 1; |
244 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 325 | msr_mask = MSR_GS | MSR_CE | MSR_ME | MSR_DE; |
326 | int_class = INT_CLASS_NONCRIT; | ||
245 | break; | 327 | break; |
246 | case BOOKE_IRQPRIO_CRITICAL: | 328 | case BOOKE_IRQPRIO_CRITICAL: |
247 | case BOOKE_IRQPRIO_WATCHDOG: | ||
248 | allowed = vcpu->arch.shared->msr & MSR_CE; | 329 | allowed = vcpu->arch.shared->msr & MSR_CE; |
249 | msr_mask = MSR_ME; | 330 | allowed = allowed && !crit; |
331 | msr_mask = MSR_GS | MSR_ME; | ||
332 | int_class = INT_CLASS_CRIT; | ||
250 | break; | 333 | break; |
251 | case BOOKE_IRQPRIO_MACHINE_CHECK: | 334 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
252 | allowed = vcpu->arch.shared->msr & MSR_ME; | 335 | allowed = vcpu->arch.shared->msr & MSR_ME; |
253 | msr_mask = 0; | 336 | allowed = allowed && !crit; |
337 | msr_mask = MSR_GS; | ||
338 | int_class = INT_CLASS_MC; | ||
254 | break; | 339 | break; |
255 | case BOOKE_IRQPRIO_DECREMENTER: | 340 | case BOOKE_IRQPRIO_DECREMENTER: |
256 | case BOOKE_IRQPRIO_FIT: | 341 | case BOOKE_IRQPRIO_FIT: |
@@ -259,28 +344,62 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
259 | case BOOKE_IRQPRIO_EXTERNAL: | 344 | case BOOKE_IRQPRIO_EXTERNAL: |
260 | allowed = vcpu->arch.shared->msr & MSR_EE; | 345 | allowed = vcpu->arch.shared->msr & MSR_EE; |
261 | allowed = allowed && !crit; | 346 | allowed = allowed && !crit; |
262 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 347 | msr_mask = MSR_GS | MSR_CE | MSR_ME | MSR_DE; |
348 | int_class = INT_CLASS_NONCRIT; | ||
263 | break; | 349 | break; |
264 | case BOOKE_IRQPRIO_DEBUG: | 350 | case BOOKE_IRQPRIO_DEBUG: |
265 | allowed = vcpu->arch.shared->msr & MSR_DE; | 351 | allowed = vcpu->arch.shared->msr & MSR_DE; |
266 | msr_mask = MSR_ME; | 352 | allowed = allowed && !crit; |
353 | msr_mask = MSR_GS | MSR_ME; | ||
354 | int_class = INT_CLASS_CRIT; | ||
267 | break; | 355 | break; |
268 | } | 356 | } |
269 | 357 | ||
270 | if (allowed) { | 358 | if (allowed) { |
271 | vcpu->arch.shared->srr0 = vcpu->arch.pc; | 359 | switch (int_class) { |
272 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; | 360 | case INT_CLASS_NONCRIT: |
361 | set_guest_srr(vcpu, vcpu->arch.pc, | ||
362 | vcpu->arch.shared->msr); | ||
363 | break; | ||
364 | case INT_CLASS_CRIT: | ||
365 | set_guest_csrr(vcpu, vcpu->arch.pc, | ||
366 | vcpu->arch.shared->msr); | ||
367 | break; | ||
368 | case INT_CLASS_DBG: | ||
369 | set_guest_dsrr(vcpu, vcpu->arch.pc, | ||
370 | vcpu->arch.shared->msr); | ||
371 | break; | ||
372 | case INT_CLASS_MC: | ||
373 | set_guest_mcsrr(vcpu, vcpu->arch.pc, | ||
374 | vcpu->arch.shared->msr); | ||
375 | break; | ||
376 | } | ||
377 | |||
273 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 378 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
274 | if (update_esr == true) | 379 | if (update_esr == true) |
275 | vcpu->arch.shared->esr = vcpu->arch.queued_esr; | 380 | set_guest_esr(vcpu, vcpu->arch.queued_esr); |
276 | if (update_dear == true) | 381 | if (update_dear == true) |
277 | vcpu->arch.shared->dar = vcpu->arch.queued_dear; | 382 | set_guest_dear(vcpu, vcpu->arch.queued_dear); |
278 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); | 383 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); |
279 | 384 | ||
280 | if (!keep_irq) | 385 | if (!keep_irq) |
281 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 386 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
282 | } | 387 | } |
283 | 388 | ||
389 | #ifdef CONFIG_KVM_BOOKE_HV | ||
390 | /* | ||
391 | * If an interrupt is pending but masked, raise a guest doorbell | ||
392 | * so that we are notified when the guest enables the relevant | ||
393 | * MSR bit. | ||
394 | */ | ||
395 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) | ||
396 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); | ||
397 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) | ||
398 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); | ||
399 | if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) | ||
400 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); | ||
401 | #endif | ||
402 | |||
284 | return allowed; | 403 | return allowed; |
285 | } | 404 | } |
286 | 405 | ||
@@ -344,6 +463,11 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
344 | return -EINVAL; | 463 | return -EINVAL; |
345 | } | 464 | } |
346 | 465 | ||
466 | if (!current->thread.kvm_vcpu) { | ||
467 | WARN(1, "no vcpu\n"); | ||
468 | return -EPERM; | ||
469 | } | ||
470 | |||
347 | local_irq_disable(); | 471 | local_irq_disable(); |
348 | 472 | ||
349 | kvmppc_core_prepare_to_enter(vcpu); | 473 | kvmppc_core_prepare_to_enter(vcpu); |
@@ -363,6 +487,38 @@ out: | |||
363 | return ret; | 487 | return ret; |
364 | } | 488 | } |
365 | 489 | ||
490 | static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
491 | { | ||
492 | enum emulation_result er; | ||
493 | |||
494 | er = kvmppc_emulate_instruction(run, vcpu); | ||
495 | switch (er) { | ||
496 | case EMULATE_DONE: | ||
497 | /* don't overwrite subtypes, just account kvm_stats */ | ||
498 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
499 | /* Future optimization: only reload non-volatiles if | ||
500 | * they were actually modified by emulation. */ | ||
501 | return RESUME_GUEST_NV; | ||
502 | |||
503 | case EMULATE_DO_DCR: | ||
504 | run->exit_reason = KVM_EXIT_DCR; | ||
505 | return RESUME_HOST; | ||
506 | |||
507 | case EMULATE_FAIL: | ||
508 | /* XXX Deliver Program interrupt to guest. */ | ||
509 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | ||
510 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | ||
511 | /* For debugging, encode the failing instruction and | ||
512 | * report it to userspace. */ | ||
513 | run->hw.hardware_exit_reason = ~0ULL << 32; | ||
514 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | ||
515 | return RESUME_HOST; | ||
516 | |||
517 | default: | ||
518 | BUG(); | ||
519 | } | ||
520 | } | ||
521 | |||
366 | /** | 522 | /** |
367 | * kvmppc_handle_exit | 523 | * kvmppc_handle_exit |
368 | * | 524 | * |
@@ -371,12 +527,30 @@ out: | |||
371 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 527 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
372 | unsigned int exit_nr) | 528 | unsigned int exit_nr) |
373 | { | 529 | { |
374 | enum emulation_result er; | ||
375 | int r = RESUME_HOST; | 530 | int r = RESUME_HOST; |
376 | 531 | ||
377 | /* update before a new last_exit_type is rewritten */ | 532 | /* update before a new last_exit_type is rewritten */ |
378 | kvmppc_update_timing_stats(vcpu); | 533 | kvmppc_update_timing_stats(vcpu); |
379 | 534 | ||
535 | switch (exit_nr) { | ||
536 | case BOOKE_INTERRUPT_EXTERNAL: | ||
537 | do_IRQ(current->thread.regs); | ||
538 | break; | ||
539 | |||
540 | case BOOKE_INTERRUPT_DECREMENTER: | ||
541 | timer_interrupt(current->thread.regs); | ||
542 | break; | ||
543 | |||
544 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64) | ||
545 | case BOOKE_INTERRUPT_DOORBELL: | ||
546 | doorbell_exception(current->thread.regs); | ||
547 | break; | ||
548 | #endif | ||
549 | case BOOKE_INTERRUPT_MACHINE_CHECK: | ||
550 | /* FIXME */ | ||
551 | break; | ||
552 | } | ||
553 | |||
380 | local_irq_enable(); | 554 | local_irq_enable(); |
381 | 555 | ||
382 | run->exit_reason = KVM_EXIT_UNKNOWN; | 556 | run->exit_reason = KVM_EXIT_UNKNOWN; |
@@ -384,30 +558,56 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
384 | 558 | ||
385 | switch (exit_nr) { | 559 | switch (exit_nr) { |
386 | case BOOKE_INTERRUPT_MACHINE_CHECK: | 560 | case BOOKE_INTERRUPT_MACHINE_CHECK: |
387 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); | 561 | kvm_resched(vcpu); |
388 | kvmppc_dump_vcpu(vcpu); | 562 | r = RESUME_GUEST; |
389 | r = RESUME_HOST; | ||
390 | break; | 563 | break; |
391 | 564 | ||
392 | case BOOKE_INTERRUPT_EXTERNAL: | 565 | case BOOKE_INTERRUPT_EXTERNAL: |
393 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); | 566 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); |
394 | if (need_resched()) | 567 | kvm_resched(vcpu); |
395 | cond_resched(); | ||
396 | r = RESUME_GUEST; | 568 | r = RESUME_GUEST; |
397 | break; | 569 | break; |
398 | 570 | ||
399 | case BOOKE_INTERRUPT_DECREMENTER: | 571 | case BOOKE_INTERRUPT_DECREMENTER: |
400 | /* Since we switched IVPR back to the host's value, the host | ||
401 | * handled this interrupt the moment we enabled interrupts. | ||
402 | * Now we just offer it a chance to reschedule the guest. */ | ||
403 | kvmppc_account_exit(vcpu, DEC_EXITS); | 572 | kvmppc_account_exit(vcpu, DEC_EXITS); |
404 | if (need_resched()) | 573 | kvm_resched(vcpu); |
405 | cond_resched(); | ||
406 | r = RESUME_GUEST; | 574 | r = RESUME_GUEST; |
407 | break; | 575 | break; |
408 | 576 | ||
577 | case BOOKE_INTERRUPT_DOORBELL: | ||
578 | kvmppc_account_exit(vcpu, DBELL_EXITS); | ||
579 | kvm_resched(vcpu); | ||
580 | r = RESUME_GUEST; | ||
581 | break; | ||
582 | |||
583 | case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: | ||
584 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | ||
585 | |||
586 | /* | ||
587 | * We are here because there is a pending guest interrupt | ||
588 | * which could not be delivered as MSR_CE or MSR_ME was not | ||
589 | * set. Once we break from here we will retry delivery. | ||
590 | */ | ||
591 | r = RESUME_GUEST; | ||
592 | break; | ||
593 | |||
594 | case BOOKE_INTERRUPT_GUEST_DBELL: | ||
595 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | ||
596 | |||
597 | /* | ||
598 | * We are here because there is a pending guest interrupt | ||
599 | * which could not be delivered as MSR_EE was not set. Once | ||
600 | * we break from here we will retry delivery. | ||
601 | */ | ||
602 | r = RESUME_GUEST; | ||
603 | break; | ||
604 | |||
605 | case BOOKE_INTERRUPT_HV_PRIV: | ||
606 | r = emulation_exit(run, vcpu); | ||
607 | break; | ||
608 | |||
409 | case BOOKE_INTERRUPT_PROGRAM: | 609 | case BOOKE_INTERRUPT_PROGRAM: |
410 | if (vcpu->arch.shared->msr & MSR_PR) { | 610 | if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { |
411 | /* Program traps generated by user-level software must be handled | 611 | /* Program traps generated by user-level software must be handled |
412 | * by the guest kernel. */ | 612 | * by the guest kernel. */ |
413 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); | 613 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
@@ -416,32 +616,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
416 | break; | 616 | break; |
417 | } | 617 | } |
418 | 618 | ||
419 | er = kvmppc_emulate_instruction(run, vcpu); | 619 | r = emulation_exit(run, vcpu); |
420 | switch (er) { | ||
421 | case EMULATE_DONE: | ||
422 | /* don't overwrite subtypes, just account kvm_stats */ | ||
423 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
424 | /* Future optimization: only reload non-volatiles if | ||
425 | * they were actually modified by emulation. */ | ||
426 | r = RESUME_GUEST_NV; | ||
427 | break; | ||
428 | case EMULATE_DO_DCR: | ||
429 | run->exit_reason = KVM_EXIT_DCR; | ||
430 | r = RESUME_HOST; | ||
431 | break; | ||
432 | case EMULATE_FAIL: | ||
433 | /* XXX Deliver Program interrupt to guest. */ | ||
434 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | ||
435 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | ||
436 | /* For debugging, encode the failing instruction and | ||
437 | * report it to userspace. */ | ||
438 | run->hw.hardware_exit_reason = ~0ULL << 32; | ||
439 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | ||
440 | r = RESUME_HOST; | ||
441 | break; | ||
442 | default: | ||
443 | BUG(); | ||
444 | } | ||
445 | break; | 620 | break; |
446 | 621 | ||
447 | case BOOKE_INTERRUPT_FP_UNAVAIL: | 622 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
@@ -506,6 +681,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
506 | r = RESUME_GUEST; | 681 | r = RESUME_GUEST; |
507 | break; | 682 | break; |
508 | 683 | ||
684 | #ifdef CONFIG_KVM_BOOKE_HV | ||
685 | case BOOKE_INTERRUPT_HV_SYSCALL: | ||
686 | if (!(vcpu->arch.shared->msr & MSR_PR)) { | ||
687 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | ||
688 | } else { | ||
689 | /* | ||
690 | * hcall from guest userspace -- send privileged | ||
691 | * instruction program check. | ||
692 | */ | ||
693 | kvmppc_core_queue_program(vcpu, ESR_PPR); | ||
694 | } | ||
695 | |||
696 | r = RESUME_GUEST; | ||
697 | break; | ||
698 | #else | ||
509 | case BOOKE_INTERRUPT_SYSCALL: | 699 | case BOOKE_INTERRUPT_SYSCALL: |
510 | if (!(vcpu->arch.shared->msr & MSR_PR) && | 700 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
511 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | 701 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
@@ -519,6 +709,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
519 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); | 709 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
520 | r = RESUME_GUEST; | 710 | r = RESUME_GUEST; |
521 | break; | 711 | break; |
712 | #endif | ||
522 | 713 | ||
523 | case BOOKE_INTERRUPT_DTLB_MISS: { | 714 | case BOOKE_INTERRUPT_DTLB_MISS: { |
524 | unsigned long eaddr = vcpu->arch.fault_dear; | 715 | unsigned long eaddr = vcpu->arch.fault_dear; |
@@ -659,12 +850,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
659 | int r; | 850 | int r; |
660 | 851 | ||
661 | vcpu->arch.pc = 0; | 852 | vcpu->arch.pc = 0; |
662 | vcpu->arch.shared->msr = 0; | ||
663 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | ||
664 | vcpu->arch.shared->pir = vcpu->vcpu_id; | 853 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
665 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 854 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
855 | kvmppc_set_msr(vcpu, 0); | ||
666 | 856 | ||
857 | #ifndef CONFIG_KVM_BOOKE_HV | ||
858 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | ||
667 | vcpu->arch.shadow_pid = 1; | 859 | vcpu->arch.shadow_pid = 1; |
860 | vcpu->arch.shared->msr = 0; | ||
861 | #endif | ||
668 | 862 | ||
669 | /* Eye-catching numbers so we know if the guest takes an interrupt | 863 | /* Eye-catching numbers so we know if the guest takes an interrupt |
670 | * before it's programmed its own IVPR/IVORs. */ | 864 | * before it's programmed its own IVPR/IVORs. */ |
@@ -745,8 +939,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu, | |||
745 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | 939 | sregs->u.e.csrr0 = vcpu->arch.csrr0; |
746 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | 940 | sregs->u.e.csrr1 = vcpu->arch.csrr1; |
747 | sregs->u.e.mcsr = vcpu->arch.mcsr; | 941 | sregs->u.e.mcsr = vcpu->arch.mcsr; |
748 | sregs->u.e.esr = vcpu->arch.shared->esr; | 942 | sregs->u.e.esr = get_guest_esr(vcpu); |
749 | sregs->u.e.dear = vcpu->arch.shared->dar; | 943 | sregs->u.e.dear = get_guest_dear(vcpu); |
750 | sregs->u.e.tsr = vcpu->arch.tsr; | 944 | sregs->u.e.tsr = vcpu->arch.tsr; |
751 | sregs->u.e.tcr = vcpu->arch.tcr; | 945 | sregs->u.e.tcr = vcpu->arch.tcr; |
752 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | 946 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); |
@@ -763,8 +957,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu, | |||
763 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | 957 | vcpu->arch.csrr0 = sregs->u.e.csrr0; |
764 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | 958 | vcpu->arch.csrr1 = sregs->u.e.csrr1; |
765 | vcpu->arch.mcsr = sregs->u.e.mcsr; | 959 | vcpu->arch.mcsr = sregs->u.e.mcsr; |
766 | vcpu->arch.shared->esr = sregs->u.e.esr; | 960 | set_guest_esr(vcpu, sregs->u.e.esr); |
767 | vcpu->arch.shared->dar = sregs->u.e.dear; | 961 | set_guest_dear(vcpu, sregs->u.e.dear); |
768 | vcpu->arch.vrsave = sregs->u.e.vrsave; | 962 | vcpu->arch.vrsave = sregs->u.e.vrsave; |
769 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); | 963 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); |
770 | 964 | ||
@@ -961,14 +1155,17 @@ void kvmppc_decrementer_func(unsigned long data) | |||
961 | 1155 | ||
962 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1156 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
963 | { | 1157 | { |
1158 | current->thread.kvm_vcpu = vcpu; | ||
964 | } | 1159 | } |
965 | 1160 | ||
966 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) | 1161 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) |
967 | { | 1162 | { |
1163 | current->thread.kvm_vcpu = NULL; | ||
968 | } | 1164 | } |
969 | 1165 | ||
970 | int __init kvmppc_booke_init(void) | 1166 | int __init kvmppc_booke_init(void) |
971 | { | 1167 | { |
1168 | #ifndef CONFIG_KVM_BOOKE_HV | ||
972 | unsigned long ivor[16]; | 1169 | unsigned long ivor[16]; |
973 | unsigned long max_ivor = 0; | 1170 | unsigned long max_ivor = 0; |
974 | int i; | 1171 | int i; |
@@ -1011,7 +1208,7 @@ int __init kvmppc_booke_init(void) | |||
1011 | } | 1208 | } |
1012 | flush_icache_range(kvmppc_booke_handlers, | 1209 | flush_icache_range(kvmppc_booke_handlers, |
1013 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | 1210 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); |
1014 | 1211 | #endif /* !BOOKE_HV */ | |
1015 | return 0; | 1212 | return 0; |
1016 | } | 1213 | } |
1017 | 1214 | ||
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 05d1d99428ce..d53bcf2558f5 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -48,7 +48,20 @@ | |||
48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 | 48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 |
49 | /* Internal pseudo-irqprio for level triggered externals */ | 49 | /* Internal pseudo-irqprio for level triggered externals */ |
50 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 | 50 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 |
51 | #define BOOKE_IRQPRIO_MAX 20 | 51 | #define BOOKE_IRQPRIO_DBELL 21 |
52 | #define BOOKE_IRQPRIO_DBELL_CRIT 22 | ||
53 | #define BOOKE_IRQPRIO_MAX 23 | ||
54 | |||
55 | #define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \ | ||
56 | (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \ | ||
57 | (1 << BOOKE_IRQPRIO_DBELL) | \ | ||
58 | (1 << BOOKE_IRQPRIO_DECREMENTER) | \ | ||
59 | (1 << BOOKE_IRQPRIO_FIT) | \ | ||
60 | (1 << BOOKE_IRQPRIO_EXTERNAL)) | ||
61 | |||
62 | #define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \ | ||
63 | (1 << BOOKE_IRQPRIO_WATCHDOG) | \ | ||
64 | (1 << BOOKE_IRQPRIO_CRITICAL)) | ||
52 | 65 | ||
53 | extern unsigned long kvmppc_booke_handlers; | 66 | extern unsigned long kvmppc_booke_handlers; |
54 | 67 | ||
@@ -74,4 +87,13 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); | |||
74 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 87 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
75 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu); | 88 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu); |
76 | 89 | ||
90 | enum int_class { | ||
91 | INT_CLASS_NONCRIT, | ||
92 | INT_CLASS_CRIT, | ||
93 | INT_CLASS_MC, | ||
94 | INT_CLASS_DBG, | ||
95 | }; | ||
96 | |||
97 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); | ||
98 | |||
77 | #endif /* __KVM_BOOKE_H__ */ | 99 | #endif /* __KVM_BOOKE_H__ */ |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 3e652da36534..904412bbea40 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -99,6 +99,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
99 | return emulated; | 99 | return emulated; |
100 | } | 100 | } |
101 | 101 | ||
102 | /* | ||
103 | * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode). | ||
104 | * Their backing store is in real registers, and these functions | ||
105 | * will return the wrong result if called for them in another context | ||
106 | * (such as debugging). | ||
107 | */ | ||
102 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 108 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
103 | { | 109 | { |
104 | int emulated = EMULATE_DONE; | 110 | int emulated = EMULATE_DONE; |
@@ -122,9 +128,11 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
122 | kvmppc_set_tcr(vcpu, spr_val); | 128 | kvmppc_set_tcr(vcpu, spr_val); |
123 | break; | 129 | break; |
124 | 130 | ||
125 | /* Note: SPRG4-7 are user-readable. These values are | 131 | /* |
126 | * loaded into the real SPRGs when resuming the | 132 | * Note: SPRG4-7 are user-readable. |
127 | * guest. */ | 133 | * These values are loaded into the real SPRGs when resuming the |
134 | * guest (PR-mode only). | ||
135 | */ | ||
128 | case SPRN_SPRG4: | 136 | case SPRN_SPRG4: |
129 | vcpu->arch.shared->sprg4 = spr_val; break; | 137 | vcpu->arch.shared->sprg4 = spr_val; break; |
130 | case SPRN_SPRG5: | 138 | case SPRN_SPRG5: |
@@ -136,6 +144,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
136 | 144 | ||
137 | case SPRN_IVPR: | 145 | case SPRN_IVPR: |
138 | vcpu->arch.ivpr = spr_val; | 146 | vcpu->arch.ivpr = spr_val; |
147 | #ifdef CONFIG_KVM_BOOKE_HV | ||
148 | mtspr(SPRN_GIVPR, spr_val); | ||
149 | #endif | ||
139 | break; | 150 | break; |
140 | case SPRN_IVOR0: | 151 | case SPRN_IVOR0: |
141 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; | 152 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; |
@@ -145,6 +156,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
145 | break; | 156 | break; |
146 | case SPRN_IVOR2: | 157 | case SPRN_IVOR2: |
147 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; | 158 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; |
159 | #ifdef CONFIG_KVM_BOOKE_HV | ||
160 | mtspr(SPRN_GIVOR2, spr_val); | ||
161 | #endif | ||
148 | break; | 162 | break; |
149 | case SPRN_IVOR3: | 163 | case SPRN_IVOR3: |
150 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; | 164 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; |
@@ -163,6 +177,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
163 | break; | 177 | break; |
164 | case SPRN_IVOR8: | 178 | case SPRN_IVOR8: |
165 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; | 179 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; |
180 | #ifdef CONFIG_KVM_BOOKE_HV | ||
181 | mtspr(SPRN_GIVOR8, spr_val); | ||
182 | #endif | ||
166 | break; | 183 | break; |
167 | case SPRN_IVOR9: | 184 | case SPRN_IVOR9: |
168 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; | 185 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; |
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S new file mode 100644 index 000000000000..9eaeebd86e44 --- /dev/null +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -0,0 +1,587 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | ||
16 | * | ||
17 | * Author: Varun Sethi <varun.sethi@freescale.com> | ||
18 | * Author: Scott Wood <scotwood@freescale.com> | ||
19 | * | ||
20 | * This file is derived from arch/powerpc/kvm/booke_interrupts.S | ||
21 | */ | ||
22 | |||
23 | #include <asm/ppc_asm.h> | ||
24 | #include <asm/kvm_asm.h> | ||
25 | #include <asm/reg.h> | ||
26 | #include <asm/mmu-44x.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/asm-compat.h> | ||
29 | #include <asm/asm-offsets.h> | ||
30 | #include <asm/bitsperlong.h> | ||
31 | |||
32 | #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ | ||
33 | |||
34 | #define GET_VCPU(vcpu, thread) \ | ||
35 | PPC_LL vcpu, THREAD_KVM_VCPU(thread) | ||
36 | |||
37 | #define SET_VCPU(vcpu) \ | ||
38 | PPC_STL vcpu, (THREAD + THREAD_KVM_VCPU)(r2) | ||
39 | |||
40 | #define LONGBYTES (BITS_PER_LONG / 8) | ||
41 | |||
42 | #define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES)) | ||
43 | #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) | ||
44 | |||
45 | /* The host stack layout: */ | ||
46 | #define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */ | ||
47 | #define HOST_CALLEE_LR (1 * LONGBYTES) | ||
48 | #define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */ | ||
49 | /* | ||
50 | * r2 is special: it holds 'current', and it made nonvolatile in the | ||
51 | * kernel with the -ffixed-r2 gcc option. | ||
52 | */ | ||
53 | #define HOST_R2 (3 * LONGBYTES) | ||
54 | #define HOST_NV_GPRS (4 * LONGBYTES) | ||
55 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) | ||
56 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES) | ||
57 | #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ | ||
58 | #define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ | ||
59 | |||
60 | #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ | ||
61 | #define NEED_DEAR 0x00000002 /* save faulting DEAR */ | ||
62 | #define NEED_ESR 0x00000004 /* save faulting ESR */ | ||
63 | |||
64 | /* | ||
65 | * On entry: | ||
66 | * r4 = vcpu, r5 = srr0, r6 = srr1 | ||
67 | * saved in vcpu: cr, ctr, r3-r13 | ||
68 | */ | ||
69 | .macro kvm_handler_common intno, srr0, flags | ||
70 | mfspr r10, SPRN_PID | ||
71 | lwz r8, VCPU_HOST_PID(r4) | ||
72 | PPC_LL r11, VCPU_SHARED(r4) | ||
73 | PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ | ||
74 | li r14, \intno | ||
75 | |||
76 | stw r10, VCPU_GUEST_PID(r4) | ||
77 | mtspr SPRN_PID, r8 | ||
78 | |||
79 | .if \flags & NEED_EMU | ||
80 | lwz r9, VCPU_KVM(r4) | ||
81 | .endif | ||
82 | |||
83 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
84 | /* save exit time */ | ||
85 | 1: mfspr r7, SPRN_TBRU | ||
86 | mfspr r8, SPRN_TBRL | ||
87 | mfspr r9, SPRN_TBRU | ||
88 | cmpw r9, r7 | ||
89 | PPC_STL r8, VCPU_TIMING_EXIT_TBL(r4) | ||
90 | bne- 1b | ||
91 | PPC_STL r9, VCPU_TIMING_EXIT_TBU(r4) | ||
92 | #endif | ||
93 | |||
94 | oris r8, r6, MSR_CE@h | ||
95 | #ifndef CONFIG_64BIT | ||
96 | stw r6, (VCPU_SHARED_MSR + 4)(r11) | ||
97 | #else | ||
98 | std r6, (VCPU_SHARED_MSR)(r11) | ||
99 | #endif | ||
100 | ori r8, r8, MSR_ME | MSR_RI | ||
101 | PPC_STL r5, VCPU_PC(r4) | ||
102 | |||
103 | /* | ||
104 | * Make sure CE/ME/RI are set (if appropriate for exception type) | ||
105 | * whether or not the guest had it set. Since mfmsr/mtmsr are | ||
106 | * somewhat expensive, skip in the common case where the guest | ||
107 | * had all these bits set (and thus they're still set if | ||
108 | * appropriate for the exception type). | ||
109 | */ | ||
110 | cmpw r6, r8 | ||
111 | .if \flags & NEED_EMU | ||
112 | lwz r9, KVM_LPID(r9) | ||
113 | .endif | ||
114 | beq 1f | ||
115 | mfmsr r7 | ||
116 | .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0 | ||
117 | oris r7, r7, MSR_CE@h | ||
118 | .endif | ||
119 | .if \srr0 != SPRN_MCSRR0 | ||
120 | ori r7, r7, MSR_ME | MSR_RI | ||
121 | .endif | ||
122 | mtmsr r7 | ||
123 | 1: | ||
124 | |||
125 | .if \flags & NEED_EMU | ||
126 | /* | ||
127 | * This assumes you have external PID support. | ||
128 | * To support a bookehv CPU without external PID, you'll | ||
129 | * need to look up the TLB entry and create a temporary mapping. | ||
130 | * | ||
131 | * FIXME: we don't currently handle if the lwepx faults. PR-mode | ||
132 | * booke doesn't handle it either. Since Linux doesn't use | ||
133 | * broadcast tlbivax anymore, the only way this should happen is | ||
134 | * if the guest maps its memory execute-but-not-read, or if we | ||
135 | * somehow take a TLB miss in the middle of this entry code and | ||
136 | * evict the relevant entry. On e500mc, all kernel lowmem is | ||
137 | * bolted into TLB1 large page mappings, and we don't use | ||
138 | * broadcast invalidates, so we should not take a TLB miss here. | ||
139 | * | ||
140 | * Later we'll need to deal with faults here. Disallowing guest | ||
141 | * mappings that are execute-but-not-read could be an option on | ||
142 | * e500mc, but not on chips with an LRAT if it is used. | ||
143 | */ | ||
144 | |||
145 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ | ||
146 | PPC_STL r15, VCPU_GPR(r15)(r4) | ||
147 | PPC_STL r16, VCPU_GPR(r16)(r4) | ||
148 | PPC_STL r17, VCPU_GPR(r17)(r4) | ||
149 | PPC_STL r18, VCPU_GPR(r18)(r4) | ||
150 | PPC_STL r19, VCPU_GPR(r19)(r4) | ||
151 | mr r8, r3 | ||
152 | PPC_STL r20, VCPU_GPR(r20)(r4) | ||
153 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS | ||
154 | PPC_STL r21, VCPU_GPR(r21)(r4) | ||
155 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR | ||
156 | PPC_STL r22, VCPU_GPR(r22)(r4) | ||
157 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID | ||
158 | PPC_STL r23, VCPU_GPR(r23)(r4) | ||
159 | PPC_STL r24, VCPU_GPR(r24)(r4) | ||
160 | PPC_STL r25, VCPU_GPR(r25)(r4) | ||
161 | PPC_STL r26, VCPU_GPR(r26)(r4) | ||
162 | PPC_STL r27, VCPU_GPR(r27)(r4) | ||
163 | PPC_STL r28, VCPU_GPR(r28)(r4) | ||
164 | PPC_STL r29, VCPU_GPR(r29)(r4) | ||
165 | PPC_STL r30, VCPU_GPR(r30)(r4) | ||
166 | PPC_STL r31, VCPU_GPR(r31)(r4) | ||
167 | mtspr SPRN_EPLC, r8 | ||
168 | isync | ||
169 | lwepx r9, 0, r5 | ||
170 | mtspr SPRN_EPLC, r3 | ||
171 | stw r9, VCPU_LAST_INST(r4) | ||
172 | .endif | ||
173 | |||
174 | .if \flags & NEED_ESR | ||
175 | mfspr r8, SPRN_ESR | ||
176 | PPC_STL r8, VCPU_FAULT_ESR(r4) | ||
177 | .endif | ||
178 | |||
179 | .if \flags & NEED_DEAR | ||
180 | mfspr r9, SPRN_DEAR | ||
181 | PPC_STL r9, VCPU_FAULT_DEAR(r4) | ||
182 | .endif | ||
183 | |||
184 | b kvmppc_resume_host | ||
185 | .endm | ||
186 | |||
187 | /* | ||
188 | * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h | ||
189 | */ | ||
190 | .macro kvm_handler intno srr0, srr1, flags | ||
191 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | ||
192 | GET_VCPU(r11, r10) | ||
193 | PPC_STL r3, VCPU_GPR(r3)(r11) | ||
194 | mfspr r3, SPRN_SPRG_RSCRATCH0 | ||
195 | PPC_STL r4, VCPU_GPR(r4)(r11) | ||
196 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) | ||
197 | PPC_STL r5, VCPU_GPR(r5)(r11) | ||
198 | PPC_STL r13, VCPU_CR(r11) | ||
199 | mfspr r5, \srr0 | ||
200 | PPC_STL r3, VCPU_GPR(r10)(r11) | ||
201 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) | ||
202 | PPC_STL r6, VCPU_GPR(r6)(r11) | ||
203 | PPC_STL r4, VCPU_GPR(r11)(r11) | ||
204 | mfspr r6, \srr1 | ||
205 | PPC_STL r7, VCPU_GPR(r7)(r11) | ||
206 | PPC_STL r8, VCPU_GPR(r8)(r11) | ||
207 | PPC_STL r9, VCPU_GPR(r9)(r11) | ||
208 | PPC_STL r3, VCPU_GPR(r13)(r11) | ||
209 | mfctr r7 | ||
210 | PPC_STL r12, VCPU_GPR(r12)(r11) | ||
211 | PPC_STL r7, VCPU_CTR(r11) | ||
212 | mr r4, r11 | ||
213 | kvm_handler_common \intno, \srr0, \flags | ||
214 | .endm | ||
215 | |||
216 | .macro kvm_lvl_handler intno scratch srr0, srr1, flags | ||
217 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | ||
218 | mfspr r10, SPRN_SPRG_THREAD | ||
219 | GET_VCPU(r11, r10) | ||
220 | PPC_STL r3, VCPU_GPR(r3)(r11) | ||
221 | mfspr r3, \scratch | ||
222 | PPC_STL r4, VCPU_GPR(r4)(r11) | ||
223 | PPC_LL r4, GPR9(r8) | ||
224 | PPC_STL r5, VCPU_GPR(r5)(r11) | ||
225 | PPC_STL r9, VCPU_CR(r11) | ||
226 | mfspr r5, \srr0 | ||
227 | PPC_STL r3, VCPU_GPR(r8)(r11) | ||
228 | PPC_LL r3, GPR10(r8) | ||
229 | PPC_STL r6, VCPU_GPR(r6)(r11) | ||
230 | PPC_STL r4, VCPU_GPR(r9)(r11) | ||
231 | mfspr r6, \srr1 | ||
232 | PPC_LL r4, GPR11(r8) | ||
233 | PPC_STL r7, VCPU_GPR(r7)(r11) | ||
234 | PPC_STL r8, VCPU_GPR(r8)(r11) | ||
235 | PPC_STL r3, VCPU_GPR(r10)(r11) | ||
236 | mfctr r7 | ||
237 | PPC_STL r12, VCPU_GPR(r12)(r11) | ||
238 | PPC_STL r4, VCPU_GPR(r11)(r11) | ||
239 | PPC_STL r7, VCPU_CTR(r11) | ||
240 | mr r4, r11 | ||
241 | kvm_handler_common \intno, \srr0, \flags | ||
242 | .endm | ||
243 | |||
244 | kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ | ||
245 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
246 | kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ | ||
247 | SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 | ||
248 | kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ | ||
249 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) | ||
250 | kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR | ||
251 | kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 | ||
252 | kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ | ||
253 | SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) | ||
254 | kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR | ||
255 | kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
256 | kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | ||
257 | kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
258 | kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0 | ||
259 | kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0 | ||
260 | kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \ | ||
261 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
262 | kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ | ||
263 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) | ||
264 | kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 | ||
265 | kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
266 | kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0 | ||
267 | kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0 | ||
268 | kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 | ||
269 | kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 | ||
270 | kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ | ||
271 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
272 | kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU | ||
273 | kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | ||
274 | kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0 | ||
275 | kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \ | ||
276 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
277 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | ||
278 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
279 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | ||
280 | SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 | ||
281 | |||
282 | |||
283 | /* Registers: | ||
284 | * SPRG_SCRATCH0: guest r10 | ||
285 | * r4: vcpu pointer | ||
286 | * r11: vcpu->arch.shared | ||
287 | * r14: KVM exit number | ||
288 | */ | ||
289 | _GLOBAL(kvmppc_resume_host) | ||
290 | /* Save remaining volatile guest register state to vcpu. */ | ||
291 | mfspr r3, SPRN_VRSAVE | ||
292 | PPC_STL r0, VCPU_GPR(r0)(r4) | ||
293 | PPC_STL r1, VCPU_GPR(r1)(r4) | ||
294 | mflr r5 | ||
295 | mfspr r6, SPRN_SPRG4 | ||
296 | PPC_STL r2, VCPU_GPR(r2)(r4) | ||
297 | PPC_STL r5, VCPU_LR(r4) | ||
298 | mfspr r7, SPRN_SPRG5 | ||
299 | PPC_STL r3, VCPU_VRSAVE(r4) | ||
300 | PPC_STL r6, VCPU_SHARED_SPRG4(r11) | ||
301 | mfspr r8, SPRN_SPRG6 | ||
302 | PPC_STL r7, VCPU_SHARED_SPRG5(r11) | ||
303 | mfspr r9, SPRN_SPRG7 | ||
304 | PPC_STL r8, VCPU_SHARED_SPRG6(r11) | ||
305 | mfxer r3 | ||
306 | PPC_STL r9, VCPU_SHARED_SPRG7(r11) | ||
307 | |||
308 | /* save guest MAS registers and restore host mas4 & mas6 */ | ||
309 | mfspr r5, SPRN_MAS0 | ||
310 | PPC_STL r3, VCPU_XER(r4) | ||
311 | mfspr r6, SPRN_MAS1 | ||
312 | stw r5, VCPU_SHARED_MAS0(r11) | ||
313 | mfspr r7, SPRN_MAS2 | ||
314 | stw r6, VCPU_SHARED_MAS1(r11) | ||
315 | #ifndef CONFIG_64BIT | ||
316 | stw r7, (VCPU_SHARED_MAS2 + 4)(r11) | ||
317 | #else | ||
318 | std r7, (VCPU_SHARED_MAS2)(r11) | ||
319 | #endif | ||
320 | mfspr r5, SPRN_MAS3 | ||
321 | mfspr r6, SPRN_MAS4 | ||
322 | stw r5, VCPU_SHARED_MAS7_3+4(r11) | ||
323 | mfspr r7, SPRN_MAS6 | ||
324 | stw r6, VCPU_SHARED_MAS4(r11) | ||
325 | mfspr r5, SPRN_MAS7 | ||
326 | lwz r6, VCPU_HOST_MAS4(r4) | ||
327 | stw r7, VCPU_SHARED_MAS6(r11) | ||
328 | lwz r8, VCPU_HOST_MAS6(r4) | ||
329 | mtspr SPRN_MAS4, r6 | ||
330 | stw r5, VCPU_SHARED_MAS7_3+0(r11) | ||
331 | mtspr SPRN_MAS6, r8 | ||
332 | mfspr r3, SPRN_EPCR | ||
333 | rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH | ||
334 | mtspr SPRN_EPCR, r3 | ||
335 | isync | ||
336 | |||
337 | /* Restore host stack pointer */ | ||
338 | PPC_LL r1, VCPU_HOST_STACK(r4) | ||
339 | PPC_LL r2, HOST_R2(r1) | ||
340 | |||
341 | /* Switch to kernel stack and jump to handler. */ | ||
342 | PPC_LL r3, HOST_RUN(r1) | ||
343 | mr r5, r14 /* intno */ | ||
344 | mr r14, r4 /* Save vcpu pointer. */ | ||
345 | bl kvmppc_handle_exit | ||
346 | |||
347 | /* Restore vcpu pointer and the nonvolatiles we used. */ | ||
348 | mr r4, r14 | ||
349 | PPC_LL r14, VCPU_GPR(r14)(r4) | ||
350 | |||
351 | andi. r5, r3, RESUME_FLAG_NV | ||
352 | beq skip_nv_load | ||
353 | PPC_LL r15, VCPU_GPR(r15)(r4) | ||
354 | PPC_LL r16, VCPU_GPR(r16)(r4) | ||
355 | PPC_LL r17, VCPU_GPR(r17)(r4) | ||
356 | PPC_LL r18, VCPU_GPR(r18)(r4) | ||
357 | PPC_LL r19, VCPU_GPR(r19)(r4) | ||
358 | PPC_LL r20, VCPU_GPR(r20)(r4) | ||
359 | PPC_LL r21, VCPU_GPR(r21)(r4) | ||
360 | PPC_LL r22, VCPU_GPR(r22)(r4) | ||
361 | PPC_LL r23, VCPU_GPR(r23)(r4) | ||
362 | PPC_LL r24, VCPU_GPR(r24)(r4) | ||
363 | PPC_LL r25, VCPU_GPR(r25)(r4) | ||
364 | PPC_LL r26, VCPU_GPR(r26)(r4) | ||
365 | PPC_LL r27, VCPU_GPR(r27)(r4) | ||
366 | PPC_LL r28, VCPU_GPR(r28)(r4) | ||
367 | PPC_LL r29, VCPU_GPR(r29)(r4) | ||
368 | PPC_LL r30, VCPU_GPR(r30)(r4) | ||
369 | PPC_LL r31, VCPU_GPR(r31)(r4) | ||
370 | skip_nv_load: | ||
371 | /* Should we return to the guest? */ | ||
372 | andi. r5, r3, RESUME_FLAG_HOST | ||
373 | beq lightweight_exit | ||
374 | |||
375 | srawi r3, r3, 2 /* Shift -ERR back down. */ | ||
376 | |||
377 | heavyweight_exit: | ||
378 | /* Not returning to guest. */ | ||
379 | PPC_LL r5, HOST_STACK_LR(r1) | ||
380 | |||
381 | /* | ||
382 | * We already saved guest volatile register state; now save the | ||
383 | * non-volatiles. | ||
384 | */ | ||
385 | |||
386 | PPC_STL r15, VCPU_GPR(r15)(r4) | ||
387 | PPC_STL r16, VCPU_GPR(r16)(r4) | ||
388 | PPC_STL r17, VCPU_GPR(r17)(r4) | ||
389 | PPC_STL r18, VCPU_GPR(r18)(r4) | ||
390 | PPC_STL r19, VCPU_GPR(r19)(r4) | ||
391 | PPC_STL r20, VCPU_GPR(r20)(r4) | ||
392 | PPC_STL r21, VCPU_GPR(r21)(r4) | ||
393 | PPC_STL r22, VCPU_GPR(r22)(r4) | ||
394 | PPC_STL r23, VCPU_GPR(r23)(r4) | ||
395 | PPC_STL r24, VCPU_GPR(r24)(r4) | ||
396 | PPC_STL r25, VCPU_GPR(r25)(r4) | ||
397 | PPC_STL r26, VCPU_GPR(r26)(r4) | ||
398 | PPC_STL r27, VCPU_GPR(r27)(r4) | ||
399 | PPC_STL r28, VCPU_GPR(r28)(r4) | ||
400 | PPC_STL r29, VCPU_GPR(r29)(r4) | ||
401 | PPC_STL r30, VCPU_GPR(r30)(r4) | ||
402 | PPC_STL r31, VCPU_GPR(r31)(r4) | ||
403 | |||
404 | /* Load host non-volatile register state from host stack. */ | ||
405 | PPC_LL r14, HOST_NV_GPR(r14)(r1) | ||
406 | PPC_LL r15, HOST_NV_GPR(r15)(r1) | ||
407 | PPC_LL r16, HOST_NV_GPR(r16)(r1) | ||
408 | PPC_LL r17, HOST_NV_GPR(r17)(r1) | ||
409 | PPC_LL r18, HOST_NV_GPR(r18)(r1) | ||
410 | PPC_LL r19, HOST_NV_GPR(r19)(r1) | ||
411 | PPC_LL r20, HOST_NV_GPR(r20)(r1) | ||
412 | PPC_LL r21, HOST_NV_GPR(r21)(r1) | ||
413 | PPC_LL r22, HOST_NV_GPR(r22)(r1) | ||
414 | PPC_LL r23, HOST_NV_GPR(r23)(r1) | ||
415 | PPC_LL r24, HOST_NV_GPR(r24)(r1) | ||
416 | PPC_LL r25, HOST_NV_GPR(r25)(r1) | ||
417 | PPC_LL r26, HOST_NV_GPR(r26)(r1) | ||
418 | PPC_LL r27, HOST_NV_GPR(r27)(r1) | ||
419 | PPC_LL r28, HOST_NV_GPR(r28)(r1) | ||
420 | PPC_LL r29, HOST_NV_GPR(r29)(r1) | ||
421 | PPC_LL r30, HOST_NV_GPR(r30)(r1) | ||
422 | PPC_LL r31, HOST_NV_GPR(r31)(r1) | ||
423 | |||
424 | /* Return to kvm_vcpu_run(). */ | ||
425 | mtlr r5 | ||
426 | addi r1, r1, HOST_STACK_SIZE | ||
427 | /* r3 still contains the return code from kvmppc_handle_exit(). */ | ||
428 | blr | ||
429 | |||
430 | /* Registers: | ||
431 | * r3: kvm_run pointer | ||
432 | * r4: vcpu pointer | ||
433 | */ | ||
434 | _GLOBAL(__kvmppc_vcpu_run) | ||
435 | stwu r1, -HOST_STACK_SIZE(r1) | ||
436 | PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | ||
437 | |||
438 | /* Save host state to stack. */ | ||
439 | PPC_STL r3, HOST_RUN(r1) | ||
440 | mflr r3 | ||
441 | PPC_STL r3, HOST_STACK_LR(r1) | ||
442 | |||
443 | /* Save host non-volatile register state to stack. */ | ||
444 | PPC_STL r14, HOST_NV_GPR(r14)(r1) | ||
445 | PPC_STL r15, HOST_NV_GPR(r15)(r1) | ||
446 | PPC_STL r16, HOST_NV_GPR(r16)(r1) | ||
447 | PPC_STL r17, HOST_NV_GPR(r17)(r1) | ||
448 | PPC_STL r18, HOST_NV_GPR(r18)(r1) | ||
449 | PPC_STL r19, HOST_NV_GPR(r19)(r1) | ||
450 | PPC_STL r20, HOST_NV_GPR(r20)(r1) | ||
451 | PPC_STL r21, HOST_NV_GPR(r21)(r1) | ||
452 | PPC_STL r22, HOST_NV_GPR(r22)(r1) | ||
453 | PPC_STL r23, HOST_NV_GPR(r23)(r1) | ||
454 | PPC_STL r24, HOST_NV_GPR(r24)(r1) | ||
455 | PPC_STL r25, HOST_NV_GPR(r25)(r1) | ||
456 | PPC_STL r26, HOST_NV_GPR(r26)(r1) | ||
457 | PPC_STL r27, HOST_NV_GPR(r27)(r1) | ||
458 | PPC_STL r28, HOST_NV_GPR(r28)(r1) | ||
459 | PPC_STL r29, HOST_NV_GPR(r29)(r1) | ||
460 | PPC_STL r30, HOST_NV_GPR(r30)(r1) | ||
461 | PPC_STL r31, HOST_NV_GPR(r31)(r1) | ||
462 | |||
463 | /* Load guest non-volatiles. */ | ||
464 | PPC_LL r14, VCPU_GPR(r14)(r4) | ||
465 | PPC_LL r15, VCPU_GPR(r15)(r4) | ||
466 | PPC_LL r16, VCPU_GPR(r16)(r4) | ||
467 | PPC_LL r17, VCPU_GPR(r17)(r4) | ||
468 | PPC_LL r18, VCPU_GPR(r18)(r4) | ||
469 | PPC_LL r19, VCPU_GPR(r19)(r4) | ||
470 | PPC_LL r20, VCPU_GPR(r20)(r4) | ||
471 | PPC_LL r21, VCPU_GPR(r21)(r4) | ||
472 | PPC_LL r22, VCPU_GPR(r22)(r4) | ||
473 | PPC_LL r23, VCPU_GPR(r23)(r4) | ||
474 | PPC_LL r24, VCPU_GPR(r24)(r4) | ||
475 | PPC_LL r25, VCPU_GPR(r25)(r4) | ||
476 | PPC_LL r26, VCPU_GPR(r26)(r4) | ||
477 | PPC_LL r27, VCPU_GPR(r27)(r4) | ||
478 | PPC_LL r28, VCPU_GPR(r28)(r4) | ||
479 | PPC_LL r29, VCPU_GPR(r29)(r4) | ||
480 | PPC_LL r30, VCPU_GPR(r30)(r4) | ||
481 | PPC_LL r31, VCPU_GPR(r31)(r4) | ||
482 | |||
483 | |||
484 | lightweight_exit: | ||
485 | PPC_STL r2, HOST_R2(r1) | ||
486 | |||
487 | mfspr r3, SPRN_PID | ||
488 | stw r3, VCPU_HOST_PID(r4) | ||
489 | lwz r3, VCPU_GUEST_PID(r4) | ||
490 | mtspr SPRN_PID, r3 | ||
491 | |||
492 | /* Save vcpu pointer for the exception handlers | ||
493 | * must be done before loading guest r2. | ||
494 | */ | ||
495 | // SET_VCPU(r4) | ||
496 | |||
497 | PPC_LL r11, VCPU_SHARED(r4) | ||
498 | /* Save host mas4 and mas6 and load guest MAS registers */ | ||
499 | mfspr r3, SPRN_MAS4 | ||
500 | stw r3, VCPU_HOST_MAS4(r4) | ||
501 | mfspr r3, SPRN_MAS6 | ||
502 | stw r3, VCPU_HOST_MAS6(r4) | ||
503 | lwz r3, VCPU_SHARED_MAS0(r11) | ||
504 | lwz r5, VCPU_SHARED_MAS1(r11) | ||
505 | #ifndef CONFIG_64BIT | ||
506 | lwz r6, (VCPU_SHARED_MAS2 + 4)(r11) | ||
507 | #else | ||
508 | ld r6, (VCPU_SHARED_MAS2)(r11) | ||
509 | #endif | ||
510 | lwz r7, VCPU_SHARED_MAS7_3+4(r11) | ||
511 | lwz r8, VCPU_SHARED_MAS4(r11) | ||
512 | mtspr SPRN_MAS0, r3 | ||
513 | mtspr SPRN_MAS1, r5 | ||
514 | mtspr SPRN_MAS2, r6 | ||
515 | mtspr SPRN_MAS3, r7 | ||
516 | mtspr SPRN_MAS4, r8 | ||
517 | lwz r3, VCPU_SHARED_MAS6(r11) | ||
518 | lwz r5, VCPU_SHARED_MAS7_3+0(r11) | ||
519 | mtspr SPRN_MAS6, r3 | ||
520 | mtspr SPRN_MAS7, r5 | ||
521 | /* Disable MAS register updates via exception */ | ||
522 | mfspr r3, SPRN_EPCR | ||
523 | oris r3, r3, SPRN_EPCR_DMIUH@h | ||
524 | mtspr SPRN_EPCR, r3 | ||
525 | |||
526 | /* | ||
527 | * Host interrupt handlers may have clobbered these guest-readable | ||
528 | * SPRGs, so we need to reload them here with the guest's values. | ||
529 | */ | ||
530 | lwz r3, VCPU_VRSAVE(r4) | ||
531 | lwz r5, VCPU_SHARED_SPRG4(r11) | ||
532 | mtspr SPRN_VRSAVE, r3 | ||
533 | lwz r6, VCPU_SHARED_SPRG5(r11) | ||
534 | mtspr SPRN_SPRG4W, r5 | ||
535 | lwz r7, VCPU_SHARED_SPRG6(r11) | ||
536 | mtspr SPRN_SPRG5W, r6 | ||
537 | lwz r8, VCPU_SHARED_SPRG7(r11) | ||
538 | mtspr SPRN_SPRG6W, r7 | ||
539 | mtspr SPRN_SPRG7W, r8 | ||
540 | |||
541 | /* Load some guest volatiles. */ | ||
542 | PPC_LL r3, VCPU_LR(r4) | ||
543 | PPC_LL r5, VCPU_XER(r4) | ||
544 | PPC_LL r6, VCPU_CTR(r4) | ||
545 | PPC_LL r7, VCPU_CR(r4) | ||
546 | PPC_LL r8, VCPU_PC(r4) | ||
547 | #ifndef CONFIG_64BIT | ||
548 | lwz r9, (VCPU_SHARED_MSR + 4)(r11) | ||
549 | #else | ||
550 | ld r9, (VCPU_SHARED_MSR)(r11) | ||
551 | #endif | ||
552 | PPC_LL r0, VCPU_GPR(r0)(r4) | ||
553 | PPC_LL r1, VCPU_GPR(r1)(r4) | ||
554 | PPC_LL r2, VCPU_GPR(r2)(r4) | ||
555 | PPC_LL r10, VCPU_GPR(r10)(r4) | ||
556 | PPC_LL r11, VCPU_GPR(r11)(r4) | ||
557 | PPC_LL r12, VCPU_GPR(r12)(r4) | ||
558 | PPC_LL r13, VCPU_GPR(r13)(r4) | ||
559 | mtlr r3 | ||
560 | mtxer r5 | ||
561 | mtctr r6 | ||
562 | mtcr r7 | ||
563 | mtsrr0 r8 | ||
564 | mtsrr1 r9 | ||
565 | |||
566 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
567 | /* save enter time */ | ||
568 | 1: | ||
569 | mfspr r6, SPRN_TBRU | ||
570 | mfspr r7, SPRN_TBRL | ||
571 | mfspr r8, SPRN_TBRU | ||
572 | cmpw r8, r6 | ||
573 | PPC_STL r7, VCPU_TIMING_LAST_ENTER_TBL(r4) | ||
574 | bne 1b | ||
575 | PPC_STL r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | ||
576 | #endif | ||
577 | |||
578 | /* Finish loading guest volatiles and jump to guest. */ | ||
579 | PPC_LL r5, VCPU_GPR(r5)(r4) | ||
580 | PPC_LL r6, VCPU_GPR(r6)(r4) | ||
581 | PPC_LL r7, VCPU_GPR(r7)(r4) | ||
582 | PPC_LL r8, VCPU_GPR(r8)(r4) | ||
583 | PPC_LL r9, VCPU_GPR(r9)(r4) | ||
584 | |||
585 | PPC_LL r3, VCPU_GPR(r3)(r4) | ||
586 | PPC_LL r4, VCPU_GPR(r4)(r4) | ||
587 | rfi | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index cd53e08403b3..6a530e4b3e7c 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -114,6 +114,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu) | |||
114 | goto out; | 114 | goto out; |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | #ifdef CONFIG_KVM_BOOKE_HV | ||
118 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | ||
119 | goto out; | ||
120 | #endif | ||
121 | |||
117 | r = true; | 122 | r = true; |
118 | 123 | ||
119 | out: | 124 | out: |
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h index 8167d42a776f..bf191e72b2d8 100644 --- a/arch/powerpc/kvm/timing.h +++ b/arch/powerpc/kvm/timing.h | |||
@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) | |||
93 | case SIGNAL_EXITS: | 93 | case SIGNAL_EXITS: |
94 | vcpu->stat.signal_exits++; | 94 | vcpu->stat.signal_exits++; |
95 | break; | 95 | break; |
96 | case DBELL_EXITS: | ||
97 | vcpu->stat.dbell_exits++; | ||
98 | break; | ||
99 | case GDBELL_EXITS: | ||
100 | vcpu->stat.gdbell_exits++; | ||
101 | break; | ||
96 | } | 102 | } |
97 | } | 103 | } |
98 | 104 | ||