diff options
author | James Hogan <james.hogan@imgtec.com> | 2016-11-18 08:14:37 -0500 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2017-02-03 10:20:46 -0500 |
commit | a2c046e40ff16ef6c20d534b0d77d526bc02a684 (patch) | |
tree | a016109fa9b7cbcdf636924f30d4972371f5621f | |
parent | c550d53934d821dbdd867ca314d417f2e918c72c (diff) |
KVM: MIPS: Add vcpu_run() & vcpu_reenter() callbacks
Add implementation callbacks for entering the guest (vcpu_run()) and
reentering the guest (vcpu_reenter()), allowing implementation specific
operations to be performed before entering the guest or after returning
to the host without cluttering kvm_arch_vcpu_ioctl_run().
This allows the T&E specific lazy user GVA flush to be moved into
trap_emul.c, along with disabling of the HTW. We also move
kvm_mips_deliver_interrupts() as VZ will need to restore the guest timer
state prior to delivering interrupts.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
-rw-r--r-- | arch/mips/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/mips/kvm/mips.c | 43 | ||||
-rw-r--r-- | arch/mips/kvm/trap_emul.c | 48 |
3 files changed, 52 insertions, 41 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 923f81dc6115..9f319375835a 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -539,6 +539,8 @@ struct kvm_mips_callbacks { | |||
539 | const struct kvm_one_reg *reg, s64 v); | 539 | const struct kvm_one_reg *reg, s64 v); |
540 | int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | 540 | int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
541 | int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); | 541 | int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); |
542 | int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); | ||
543 | void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu); | ||
542 | }; | 544 | }; |
543 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; | 545 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; |
544 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); | 546 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 85bc54f35695..1733877d8a53 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
@@ -410,32 +410,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
410 | return -ENOIOCTLCMD; | 410 | return -ENOIOCTLCMD; |
411 | } | 411 | } |
412 | 412 | ||
413 | /* Must be called with preemption disabled, just before entering guest */ | ||
414 | static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) | ||
415 | { | ||
416 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
417 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
418 | int i, cpu = smp_processor_id(); | ||
419 | unsigned int gasid; | ||
420 | |||
421 | /* | ||
422 | * Lazy host ASID regeneration for guest user mode. | ||
423 | * If the guest ASID has changed since the last guest usermode | ||
424 | * execution, regenerate the host ASID so as to invalidate stale TLB | ||
425 | * entries. | ||
426 | */ | ||
427 | if (!KVM_GUEST_KERNEL_MODE(vcpu)) { | ||
428 | gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; | ||
429 | if (gasid != vcpu->arch.last_user_gasid) { | ||
430 | kvm_get_new_mmu_context(user_mm, cpu, vcpu); | ||
431 | for_each_possible_cpu(i) | ||
432 | if (i != cpu) | ||
433 | cpu_context(i, user_mm) = 0; | ||
434 | vcpu->arch.last_user_gasid = gasid; | ||
435 | } | ||
436 | } | ||
437 | } | ||
438 | |||
439 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | 413 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
440 | { | 414 | { |
441 | int r = 0; | 415 | int r = 0; |
@@ -453,25 +427,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
453 | lose_fpu(1); | 427 | lose_fpu(1); |
454 | 428 | ||
455 | local_irq_disable(); | 429 | local_irq_disable(); |
456 | /* Check if we have any exceptions/interrupts pending */ | ||
457 | kvm_mips_deliver_interrupts(vcpu, | ||
458 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); | ||
459 | |||
460 | guest_enter_irqoff(); | 430 | guest_enter_irqoff(); |
461 | |||
462 | /* Disable hardware page table walking while in guest */ | ||
463 | htw_stop(); | ||
464 | |||
465 | trace_kvm_enter(vcpu); | 431 | trace_kvm_enter(vcpu); |
466 | 432 | ||
467 | kvm_mips_check_asids(vcpu); | 433 | r = kvm_mips_callbacks->vcpu_run(run, vcpu); |
468 | 434 | ||
469 | r = vcpu->arch.vcpu_run(run, vcpu); | ||
470 | trace_kvm_out(vcpu); | 435 | trace_kvm_out(vcpu); |
471 | |||
472 | /* Re-enable HTW before enabling interrupts */ | ||
473 | htw_start(); | ||
474 | |||
475 | guest_exit_irqoff(); | 436 | guest_exit_irqoff(); |
476 | local_irq_enable(); | 437 | local_irq_enable(); |
477 | 438 | ||
@@ -1570,7 +1531,7 @@ skip_emul: | |||
1570 | if (ret == RESUME_GUEST) { | 1531 | if (ret == RESUME_GUEST) { |
1571 | trace_kvm_reenter(vcpu); | 1532 | trace_kvm_reenter(vcpu); |
1572 | 1533 | ||
1573 | kvm_mips_check_asids(vcpu); | 1534 | kvm_mips_callbacks->vcpu_reenter(run, vcpu); |
1574 | 1535 | ||
1575 | /* | 1536 | /* |
1576 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context | 1537 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context |
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index c7854d32fd64..92734d095c94 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c | |||
@@ -692,6 +692,52 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) | |||
692 | return 0; | 692 | return 0; |
693 | } | 693 | } |
694 | 694 | ||
695 | static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, | ||
696 | struct kvm_vcpu *vcpu) | ||
697 | { | ||
698 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
699 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
700 | int i, cpu = smp_processor_id(); | ||
701 | unsigned int gasid; | ||
702 | |||
703 | /* | ||
704 | * Lazy host ASID regeneration for guest user mode. | ||
705 | * If the guest ASID has changed since the last guest usermode | ||
706 | * execution, regenerate the host ASID so as to invalidate stale TLB | ||
707 | * entries. | ||
708 | */ | ||
709 | if (!KVM_GUEST_KERNEL_MODE(vcpu)) { | ||
710 | gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; | ||
711 | if (gasid != vcpu->arch.last_user_gasid) { | ||
712 | kvm_get_new_mmu_context(user_mm, cpu, vcpu); | ||
713 | for_each_possible_cpu(i) | ||
714 | if (i != cpu) | ||
715 | cpu_context(i, user_mm) = 0; | ||
716 | vcpu->arch.last_user_gasid = gasid; | ||
717 | } | ||
718 | } | ||
719 | } | ||
720 | |||
721 | static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
722 | { | ||
723 | int r; | ||
724 | |||
725 | /* Check if we have any exceptions/interrupts pending */ | ||
726 | kvm_mips_deliver_interrupts(vcpu, | ||
727 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); | ||
728 | |||
729 | kvm_trap_emul_vcpu_reenter(run, vcpu); | ||
730 | |||
731 | /* Disable hardware page table walking while in guest */ | ||
732 | htw_stop(); | ||
733 | |||
734 | r = vcpu->arch.vcpu_run(run, vcpu); | ||
735 | |||
736 | htw_start(); | ||
737 | |||
738 | return r; | ||
739 | } | ||
740 | |||
695 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | 741 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
696 | /* exit handlers */ | 742 | /* exit handlers */ |
697 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | 743 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, |
@@ -724,6 +770,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | |||
724 | .set_one_reg = kvm_trap_emul_set_one_reg, | 770 | .set_one_reg = kvm_trap_emul_set_one_reg, |
725 | .vcpu_load = kvm_trap_emul_vcpu_load, | 771 | .vcpu_load = kvm_trap_emul_vcpu_load, |
726 | .vcpu_put = kvm_trap_emul_vcpu_put, | 772 | .vcpu_put = kvm_trap_emul_vcpu_put, |
773 | .vcpu_run = kvm_trap_emul_vcpu_run, | ||
774 | .vcpu_reenter = kvm_trap_emul_vcpu_reenter, | ||
727 | }; | 775 | }; |
728 | 776 | ||
729 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | 777 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) |