diff options
author | Avi Kivity <avi@redhat.com> | 2009-08-24 04:10:17 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-12-03 02:32:06 -0500 |
commit | 851ba6922ac575b749f63dee0ae072808163ba6a (patch) | |
tree | 665111285e65fea316ce6614f1208261a255fb70 /arch/x86/kvm/vmx.c | |
parent | d8769fedd4e8323d8afea9a1b2bdebff4f1d2d37 (diff) |
KVM: Don't pass kvm_run arguments
They're just copies of vcpu->run, which is readily accessible.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 113 |
1 files changed, 54 insertions, 59 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ed53b42caba1..4635298d000a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2659,7 +2659,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
2659 | * Cause the #SS fault with 0 error code in VM86 mode. | 2659 | * Cause the #SS fault with 0 error code in VM86 mode. |
2660 | */ | 2660 | */ |
2661 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) | 2661 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) |
2662 | if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) | 2662 | if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE) |
2663 | return 1; | 2663 | return 1; |
2664 | /* | 2664 | /* |
2665 | * Forward all other exceptions that are valid in real mode. | 2665 | * Forward all other exceptions that are valid in real mode. |
@@ -2710,15 +2710,16 @@ static void kvm_machine_check(void) | |||
2710 | #endif | 2710 | #endif |
2711 | } | 2711 | } |
2712 | 2712 | ||
2713 | static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2713 | static int handle_machine_check(struct kvm_vcpu *vcpu) |
2714 | { | 2714 | { |
2715 | /* already handled by vcpu_run */ | 2715 | /* already handled by vcpu_run */ |
2716 | return 1; | 2716 | return 1; |
2717 | } | 2717 | } |
2718 | 2718 | ||
2719 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2719 | static int handle_exception(struct kvm_vcpu *vcpu) |
2720 | { | 2720 | { |
2721 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2721 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2722 | struct kvm_run *kvm_run = vcpu->run; | ||
2722 | u32 intr_info, ex_no, error_code; | 2723 | u32 intr_info, ex_no, error_code; |
2723 | unsigned long cr2, rip, dr6; | 2724 | unsigned long cr2, rip, dr6; |
2724 | u32 vect_info; | 2725 | u32 vect_info; |
@@ -2728,7 +2729,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2728 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 2729 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
2729 | 2730 | ||
2730 | if (is_machine_check(intr_info)) | 2731 | if (is_machine_check(intr_info)) |
2731 | return handle_machine_check(vcpu, kvm_run); | 2732 | return handle_machine_check(vcpu); |
2732 | 2733 | ||
2733 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | 2734 | if ((vect_info & VECTORING_INFO_VALID_MASK) && |
2734 | !is_page_fault(intr_info)) | 2735 | !is_page_fault(intr_info)) |
@@ -2744,7 +2745,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2744 | } | 2745 | } |
2745 | 2746 | ||
2746 | if (is_invalid_opcode(intr_info)) { | 2747 | if (is_invalid_opcode(intr_info)) { |
2747 | er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD); | 2748 | er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD); |
2748 | if (er != EMULATE_DONE) | 2749 | if (er != EMULATE_DONE) |
2749 | kvm_queue_exception(vcpu, UD_VECTOR); | 2750 | kvm_queue_exception(vcpu, UD_VECTOR); |
2750 | return 1; | 2751 | return 1; |
@@ -2803,20 +2804,19 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2803 | return 0; | 2804 | return 0; |
2804 | } | 2805 | } |
2805 | 2806 | ||
2806 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | 2807 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) |
2807 | struct kvm_run *kvm_run) | ||
2808 | { | 2808 | { |
2809 | ++vcpu->stat.irq_exits; | 2809 | ++vcpu->stat.irq_exits; |
2810 | return 1; | 2810 | return 1; |
2811 | } | 2811 | } |
2812 | 2812 | ||
2813 | static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2813 | static int handle_triple_fault(struct kvm_vcpu *vcpu) |
2814 | { | 2814 | { |
2815 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | 2815 | vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; |
2816 | return 0; | 2816 | return 0; |
2817 | } | 2817 | } |
2818 | 2818 | ||
2819 | static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2819 | static int handle_io(struct kvm_vcpu *vcpu) |
2820 | { | 2820 | { |
2821 | unsigned long exit_qualification; | 2821 | unsigned long exit_qualification; |
2822 | int size, in, string; | 2822 | int size, in, string; |
@@ -2827,8 +2827,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2827 | string = (exit_qualification & 16) != 0; | 2827 | string = (exit_qualification & 16) != 0; |
2828 | 2828 | ||
2829 | if (string) { | 2829 | if (string) { |
2830 | if (emulate_instruction(vcpu, | 2830 | if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO) |
2831 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | ||
2832 | return 0; | 2831 | return 0; |
2833 | return 1; | 2832 | return 1; |
2834 | } | 2833 | } |
@@ -2838,7 +2837,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2838 | port = exit_qualification >> 16; | 2837 | port = exit_qualification >> 16; |
2839 | 2838 | ||
2840 | skip_emulated_instruction(vcpu); | 2839 | skip_emulated_instruction(vcpu); |
2841 | return kvm_emulate_pio(vcpu, kvm_run, in, size, port); | 2840 | return kvm_emulate_pio(vcpu, in, size, port); |
2842 | } | 2841 | } |
2843 | 2842 | ||
2844 | static void | 2843 | static void |
@@ -2852,7 +2851,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |||
2852 | hypercall[2] = 0xc1; | 2851 | hypercall[2] = 0xc1; |
2853 | } | 2852 | } |
2854 | 2853 | ||
2855 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2854 | static int handle_cr(struct kvm_vcpu *vcpu) |
2856 | { | 2855 | { |
2857 | unsigned long exit_qualification, val; | 2856 | unsigned long exit_qualification, val; |
2858 | int cr; | 2857 | int cr; |
@@ -2887,7 +2886,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2887 | return 1; | 2886 | return 1; |
2888 | if (cr8_prev <= cr8) | 2887 | if (cr8_prev <= cr8) |
2889 | return 1; | 2888 | return 1; |
2890 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | 2889 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
2891 | return 0; | 2890 | return 0; |
2892 | } | 2891 | } |
2893 | }; | 2892 | }; |
@@ -2922,13 +2921,13 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2922 | default: | 2921 | default: |
2923 | break; | 2922 | break; |
2924 | } | 2923 | } |
2925 | kvm_run->exit_reason = 0; | 2924 | vcpu->run->exit_reason = 0; |
2926 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", | 2925 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", |
2927 | (int)(exit_qualification >> 4) & 3, cr); | 2926 | (int)(exit_qualification >> 4) & 3, cr); |
2928 | return 0; | 2927 | return 0; |
2929 | } | 2928 | } |
2930 | 2929 | ||
2931 | static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2930 | static int handle_dr(struct kvm_vcpu *vcpu) |
2932 | { | 2931 | { |
2933 | unsigned long exit_qualification; | 2932 | unsigned long exit_qualification; |
2934 | unsigned long val; | 2933 | unsigned long val; |
@@ -2944,13 +2943,13 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2944 | * guest debugging itself. | 2943 | * guest debugging itself. |
2945 | */ | 2944 | */ |
2946 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { | 2945 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { |
2947 | kvm_run->debug.arch.dr6 = vcpu->arch.dr6; | 2946 | vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; |
2948 | kvm_run->debug.arch.dr7 = dr; | 2947 | vcpu->run->debug.arch.dr7 = dr; |
2949 | kvm_run->debug.arch.pc = | 2948 | vcpu->run->debug.arch.pc = |
2950 | vmcs_readl(GUEST_CS_BASE) + | 2949 | vmcs_readl(GUEST_CS_BASE) + |
2951 | vmcs_readl(GUEST_RIP); | 2950 | vmcs_readl(GUEST_RIP); |
2952 | kvm_run->debug.arch.exception = DB_VECTOR; | 2951 | vcpu->run->debug.arch.exception = DB_VECTOR; |
2953 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 2952 | vcpu->run->exit_reason = KVM_EXIT_DEBUG; |
2954 | return 0; | 2953 | return 0; |
2955 | } else { | 2954 | } else { |
2956 | vcpu->arch.dr7 &= ~DR7_GD; | 2955 | vcpu->arch.dr7 &= ~DR7_GD; |
@@ -3016,13 +3015,13 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3016 | return 1; | 3015 | return 1; |
3017 | } | 3016 | } |
3018 | 3017 | ||
3019 | static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3018 | static int handle_cpuid(struct kvm_vcpu *vcpu) |
3020 | { | 3019 | { |
3021 | kvm_emulate_cpuid(vcpu); | 3020 | kvm_emulate_cpuid(vcpu); |
3022 | return 1; | 3021 | return 1; |
3023 | } | 3022 | } |
3024 | 3023 | ||
3025 | static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3024 | static int handle_rdmsr(struct kvm_vcpu *vcpu) |
3026 | { | 3025 | { |
3027 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | 3026 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
3028 | u64 data; | 3027 | u64 data; |
@@ -3041,7 +3040,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3041 | return 1; | 3040 | return 1; |
3042 | } | 3041 | } |
3043 | 3042 | ||
3044 | static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3043 | static int handle_wrmsr(struct kvm_vcpu *vcpu) |
3045 | { | 3044 | { |
3046 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | 3045 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
3047 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | 3046 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
@@ -3058,14 +3057,12 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3058 | return 1; | 3057 | return 1; |
3059 | } | 3058 | } |
3060 | 3059 | ||
3061 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu, | 3060 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) |
3062 | struct kvm_run *kvm_run) | ||
3063 | { | 3061 | { |
3064 | return 1; | 3062 | return 1; |
3065 | } | 3063 | } |
3066 | 3064 | ||
3067 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, | 3065 | static int handle_interrupt_window(struct kvm_vcpu *vcpu) |
3068 | struct kvm_run *kvm_run) | ||
3069 | { | 3066 | { |
3070 | u32 cpu_based_vm_exec_control; | 3067 | u32 cpu_based_vm_exec_control; |
3071 | 3068 | ||
@@ -3081,34 +3078,34 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
3081 | * possible | 3078 | * possible |
3082 | */ | 3079 | */ |
3083 | if (!irqchip_in_kernel(vcpu->kvm) && | 3080 | if (!irqchip_in_kernel(vcpu->kvm) && |
3084 | kvm_run->request_interrupt_window && | 3081 | vcpu->run->request_interrupt_window && |
3085 | !kvm_cpu_has_interrupt(vcpu)) { | 3082 | !kvm_cpu_has_interrupt(vcpu)) { |
3086 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 3083 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
3087 | return 0; | 3084 | return 0; |
3088 | } | 3085 | } |
3089 | return 1; | 3086 | return 1; |
3090 | } | 3087 | } |
3091 | 3088 | ||
3092 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3089 | static int handle_halt(struct kvm_vcpu *vcpu) |
3093 | { | 3090 | { |
3094 | skip_emulated_instruction(vcpu); | 3091 | skip_emulated_instruction(vcpu); |
3095 | return kvm_emulate_halt(vcpu); | 3092 | return kvm_emulate_halt(vcpu); |
3096 | } | 3093 | } |
3097 | 3094 | ||
3098 | static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3095 | static int handle_vmcall(struct kvm_vcpu *vcpu) |
3099 | { | 3096 | { |
3100 | skip_emulated_instruction(vcpu); | 3097 | skip_emulated_instruction(vcpu); |
3101 | kvm_emulate_hypercall(vcpu); | 3098 | kvm_emulate_hypercall(vcpu); |
3102 | return 1; | 3099 | return 1; |
3103 | } | 3100 | } |
3104 | 3101 | ||
3105 | static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3102 | static int handle_vmx_insn(struct kvm_vcpu *vcpu) |
3106 | { | 3103 | { |
3107 | kvm_queue_exception(vcpu, UD_VECTOR); | 3104 | kvm_queue_exception(vcpu, UD_VECTOR); |
3108 | return 1; | 3105 | return 1; |
3109 | } | 3106 | } |
3110 | 3107 | ||
3111 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3108 | static int handle_invlpg(struct kvm_vcpu *vcpu) |
3112 | { | 3109 | { |
3113 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 3110 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
3114 | 3111 | ||
@@ -3117,14 +3114,14 @@ static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3117 | return 1; | 3114 | return 1; |
3118 | } | 3115 | } |
3119 | 3116 | ||
3120 | static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3117 | static int handle_wbinvd(struct kvm_vcpu *vcpu) |
3121 | { | 3118 | { |
3122 | skip_emulated_instruction(vcpu); | 3119 | skip_emulated_instruction(vcpu); |
3123 | /* TODO: Add support for VT-d/pass-through device */ | 3120 | /* TODO: Add support for VT-d/pass-through device */ |
3124 | return 1; | 3121 | return 1; |
3125 | } | 3122 | } |
3126 | 3123 | ||
3127 | static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3124 | static int handle_apic_access(struct kvm_vcpu *vcpu) |
3128 | { | 3125 | { |
3129 | unsigned long exit_qualification; | 3126 | unsigned long exit_qualification; |
3130 | enum emulation_result er; | 3127 | enum emulation_result er; |
@@ -3133,7 +3130,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3133 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 3130 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
3134 | offset = exit_qualification & 0xffful; | 3131 | offset = exit_qualification & 0xffful; |
3135 | 3132 | ||
3136 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3133 | er = emulate_instruction(vcpu, 0, 0, 0); |
3137 | 3134 | ||
3138 | if (er != EMULATE_DONE) { | 3135 | if (er != EMULATE_DONE) { |
3139 | printk(KERN_ERR | 3136 | printk(KERN_ERR |
@@ -3144,7 +3141,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3144 | return 1; | 3141 | return 1; |
3145 | } | 3142 | } |
3146 | 3143 | ||
3147 | static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3144 | static int handle_task_switch(struct kvm_vcpu *vcpu) |
3148 | { | 3145 | { |
3149 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3146 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3150 | unsigned long exit_qualification; | 3147 | unsigned long exit_qualification; |
@@ -3198,7 +3195,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3198 | return 1; | 3195 | return 1; |
3199 | } | 3196 | } |
3200 | 3197 | ||
3201 | static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3198 | static int handle_ept_violation(struct kvm_vcpu *vcpu) |
3202 | { | 3199 | { |
3203 | unsigned long exit_qualification; | 3200 | unsigned long exit_qualification; |
3204 | gpa_t gpa; | 3201 | gpa_t gpa; |
@@ -3219,8 +3216,8 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3219 | vmcs_readl(GUEST_LINEAR_ADDRESS)); | 3216 | vmcs_readl(GUEST_LINEAR_ADDRESS)); |
3220 | printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", | 3217 | printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", |
3221 | (long unsigned int)exit_qualification); | 3218 | (long unsigned int)exit_qualification); |
3222 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3219 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3223 | kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; | 3220 | vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; |
3224 | return 0; | 3221 | return 0; |
3225 | } | 3222 | } |
3226 | 3223 | ||
@@ -3290,7 +3287,7 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, | |||
3290 | } | 3287 | } |
3291 | } | 3288 | } |
3292 | 3289 | ||
3293 | static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3290 | static int handle_ept_misconfig(struct kvm_vcpu *vcpu) |
3294 | { | 3291 | { |
3295 | u64 sptes[4]; | 3292 | u64 sptes[4]; |
3296 | int nr_sptes, i; | 3293 | int nr_sptes, i; |
@@ -3306,13 +3303,13 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3306 | for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) | 3303 | for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) |
3307 | ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); | 3304 | ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); |
3308 | 3305 | ||
3309 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3306 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3310 | kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; | 3307 | vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; |
3311 | 3308 | ||
3312 | return 0; | 3309 | return 0; |
3313 | } | 3310 | } |
3314 | 3311 | ||
3315 | static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3312 | static int handle_nmi_window(struct kvm_vcpu *vcpu) |
3316 | { | 3313 | { |
3317 | u32 cpu_based_vm_exec_control; | 3314 | u32 cpu_based_vm_exec_control; |
3318 | 3315 | ||
@@ -3325,8 +3322,7 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3325 | return 1; | 3322 | return 1; |
3326 | } | 3323 | } |
3327 | 3324 | ||
3328 | static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | 3325 | static void handle_invalid_guest_state(struct kvm_vcpu *vcpu) |
3329 | struct kvm_run *kvm_run) | ||
3330 | { | 3326 | { |
3331 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3327 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3332 | enum emulation_result err = EMULATE_DONE; | 3328 | enum emulation_result err = EMULATE_DONE; |
@@ -3335,7 +3331,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
3335 | preempt_enable(); | 3331 | preempt_enable(); |
3336 | 3332 | ||
3337 | while (!guest_state_valid(vcpu)) { | 3333 | while (!guest_state_valid(vcpu)) { |
3338 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3334 | err = emulate_instruction(vcpu, 0, 0, 0); |
3339 | 3335 | ||
3340 | if (err == EMULATE_DO_MMIO) | 3336 | if (err == EMULATE_DO_MMIO) |
3341 | break; | 3337 | break; |
@@ -3362,8 +3358,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
3362 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | 3358 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
3363 | * to be done to userspace and return 0. | 3359 | * to be done to userspace and return 0. |
3364 | */ | 3360 | */ |
3365 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | 3361 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
3366 | struct kvm_run *kvm_run) = { | ||
3367 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | 3362 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, |
3368 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 3363 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
3369 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, | 3364 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, |
@@ -3403,7 +3398,7 @@ static const int kvm_vmx_max_exit_handlers = | |||
3403 | * The guest has exited. See if we can fix it or if we need userspace | 3398 | * The guest has exited. See if we can fix it or if we need userspace |
3404 | * assistance. | 3399 | * assistance. |
3405 | */ | 3400 | */ |
3406 | static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 3401 | static int vmx_handle_exit(struct kvm_vcpu *vcpu) |
3407 | { | 3402 | { |
3408 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3403 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3409 | u32 exit_reason = vmx->exit_reason; | 3404 | u32 exit_reason = vmx->exit_reason; |
@@ -3425,8 +3420,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3425 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | 3420 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
3426 | 3421 | ||
3427 | if (unlikely(vmx->fail)) { | 3422 | if (unlikely(vmx->fail)) { |
3428 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 3423 | vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
3429 | kvm_run->fail_entry.hardware_entry_failure_reason | 3424 | vcpu->run->fail_entry.hardware_entry_failure_reason |
3430 | = vmcs_read32(VM_INSTRUCTION_ERROR); | 3425 | = vmcs_read32(VM_INSTRUCTION_ERROR); |
3431 | return 0; | 3426 | return 0; |
3432 | } | 3427 | } |
@@ -3459,10 +3454,10 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3459 | 3454 | ||
3460 | if (exit_reason < kvm_vmx_max_exit_handlers | 3455 | if (exit_reason < kvm_vmx_max_exit_handlers |
3461 | && kvm_vmx_exit_handlers[exit_reason]) | 3456 | && kvm_vmx_exit_handlers[exit_reason]) |
3462 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); | 3457 | return kvm_vmx_exit_handlers[exit_reason](vcpu); |
3463 | else { | 3458 | else { |
3464 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3459 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3465 | kvm_run->hw.hardware_exit_reason = exit_reason; | 3460 | vcpu->run->hw.hardware_exit_reason = exit_reason; |
3466 | } | 3461 | } |
3467 | return 0; | 3462 | return 0; |
3468 | } | 3463 | } |
@@ -3600,7 +3595,7 @@ static void fixup_rmode_irq(struct vcpu_vmx *vmx) | |||
3600 | #define Q "l" | 3595 | #define Q "l" |
3601 | #endif | 3596 | #endif |
3602 | 3597 | ||
3603 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3598 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu) |
3604 | { | 3599 | { |
3605 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3600 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3606 | 3601 | ||
@@ -3614,7 +3609,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3614 | 3609 | ||
3615 | /* Handle invalid guest state instead of entering VMX */ | 3610 | /* Handle invalid guest state instead of entering VMX */ |
3616 | if (vmx->emulation_required && emulate_invalid_guest_state) { | 3611 | if (vmx->emulation_required && emulate_invalid_guest_state) { |
3617 | handle_invalid_guest_state(vcpu, kvm_run); | 3612 | handle_invalid_guest_state(vcpu); |
3618 | return; | 3613 | return; |
3619 | } | 3614 | } |
3620 | 3615 | ||