diff options
author | Avi Kivity <avi@redhat.com> | 2012-08-21 10:07:00 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-08-27 19:02:19 -0400 |
commit | f5f7b2fe3bf849b58c8144729aba78b8e29e1e4c (patch) | |
tree | fed4708d3776377030604f8f5983578b91e4aeae | |
parent | 72fbefec26841699fee9ad0b050624aeb43d5bae (diff) |
KVM: VMX: Use kvm_segment to save protected-mode segments when entering realmode
Instead of using struct kvm_save_segment, use struct kvm_segment, which is what
the other APIs use. This leads to some simplification.
We replace save_rmode_seg() with a call to vmx_save_segment(). Since this depends
on rmode.vm86_active, we move the call to before setting the flag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 85 |
1 files changed, 24 insertions, 61 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1d93079432b..7e95ff68b9d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -405,16 +405,16 @@ struct vcpu_vmx { | |||
405 | struct { | 405 | struct { |
406 | int vm86_active; | 406 | int vm86_active; |
407 | ulong save_rflags; | 407 | ulong save_rflags; |
408 | struct kvm_segment segs[8]; | ||
409 | } rmode; | ||
410 | struct { | ||
411 | u32 bitmask; /* 4 bits per segment (1 bit per field) */ | ||
408 | struct kvm_save_segment { | 412 | struct kvm_save_segment { |
409 | u16 selector; | 413 | u16 selector; |
410 | unsigned long base; | 414 | unsigned long base; |
411 | u32 limit; | 415 | u32 limit; |
412 | u32 ar; | 416 | u32 ar; |
413 | } tr, es, ds, fs, gs; | 417 | } seg[8]; |
414 | } rmode; | ||
415 | struct { | ||
416 | u32 bitmask; /* 4 bits per segment (1 bit per field) */ | ||
417 | struct kvm_save_segment seg[8]; | ||
418 | } segment_cache; | 418 | } segment_cache; |
419 | int vpid; | 419 | int vpid; |
420 | bool emulation_required; | 420 | bool emulation_required; |
@@ -2693,15 +2693,12 @@ static __exit void hardware_unsetup(void) | |||
2693 | free_kvm_area(); | 2693 | free_kvm_area(); |
2694 | } | 2694 | } |
2695 | 2695 | ||
2696 | static void fix_pmode_dataseg(int seg, struct kvm_segment *save) | 2696 | static void fix_pmode_dataseg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) |
2697 | { | 2697 | { |
2698 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | 2698 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
2699 | 2699 | ||
2700 | if (vmcs_readl(sf->base) == save->base && (save->ar_bytes & AR_S_MASK)) { | 2700 | if (vmcs_readl(sf->base) == save->base && save->s) { |
2701 | vmcs_write16(sf->selector, save->selector); | 2701 | vmx_set_segment(vcpu, save, seg); |
2702 | vmcs_writel(sf->base, save->base); | ||
2703 | vmcs_write32(sf->limit, save->limit); | ||
2704 | vmcs_write32(sf->ar_bytes, save->ar); | ||
2705 | } else { | 2702 | } else { |
2706 | u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK) | 2703 | u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK) |
2707 | << AR_DPL_SHIFT; | 2704 | << AR_DPL_SHIFT; |
@@ -2719,10 +2716,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
2719 | 2716 | ||
2720 | vmx_segment_cache_clear(vmx); | 2717 | vmx_segment_cache_clear(vmx); |
2721 | 2718 | ||
2722 | vmcs_write16(GUEST_TR_SELECTOR, vmx->rmode.tr.selector); | 2719 | vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); |
2723 | vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base); | ||
2724 | vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit); | ||
2725 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); | ||
2726 | 2720 | ||
2727 | flags = vmcs_readl(GUEST_RFLAGS); | 2721 | flags = vmcs_readl(GUEST_RFLAGS); |
2728 | flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; | 2722 | flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
@@ -2737,10 +2731,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
2737 | if (emulate_invalid_guest_state) | 2731 | if (emulate_invalid_guest_state) |
2738 | return; | 2732 | return; |
2739 | 2733 | ||
2740 | fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es); | 2734 | fix_pmode_dataseg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); |
2741 | fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds); | 2735 | fix_pmode_dataseg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); |
2742 | fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs); | 2736 | fix_pmode_dataseg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); |
2743 | fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs); | 2737 | fix_pmode_dataseg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); |
2744 | 2738 | ||
2745 | vmx_segment_cache_clear(vmx); | 2739 | vmx_segment_cache_clear(vmx); |
2746 | 2740 | ||
@@ -2768,17 +2762,7 @@ static gva_t rmode_tss_base(struct kvm *kvm) | |||
2768 | return kvm->arch.tss_addr; | 2762 | return kvm->arch.tss_addr; |
2769 | } | 2763 | } |
2770 | 2764 | ||
2771 | static void save_rmode_seg(int seg, struct kvm_save_segment *save) | 2765 | static void fix_rmode_seg(int seg, struct kvm_segment *save) |
2772 | { | ||
2773 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
2774 | |||
2775 | save->selector = vmcs_read16(sf->selector); | ||
2776 | save->base = vmcs_readl(sf->base); | ||
2777 | save->limit = vmcs_read32(sf->limit); | ||
2778 | save->ar = vmcs_read32(sf->ar_bytes); | ||
2779 | } | ||
2780 | |||
2781 | static void fix_rmode_seg(int seg, struct kvm_save_segment *save) | ||
2782 | { | 2766 | { |
2783 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | 2767 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
2784 | 2768 | ||
@@ -2801,14 +2785,15 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
2801 | if (enable_unrestricted_guest) | 2785 | if (enable_unrestricted_guest) |
2802 | return; | 2786 | return; |
2803 | 2787 | ||
2788 | vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); | ||
2789 | vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); | ||
2790 | vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); | ||
2791 | vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); | ||
2792 | vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); | ||
2793 | |||
2804 | vmx->emulation_required = 1; | 2794 | vmx->emulation_required = 1; |
2805 | vmx->rmode.vm86_active = 1; | 2795 | vmx->rmode.vm86_active = 1; |
2806 | 2796 | ||
2807 | save_rmode_seg(VCPU_SREG_TR, &vmx->rmode.tr); | ||
2808 | save_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es); | ||
2809 | save_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds); | ||
2810 | save_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs); | ||
2811 | save_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs); | ||
2812 | 2797 | ||
2813 | /* | 2798 | /* |
2814 | * Very old userspace does not call KVM_SET_TSS_ADDR before entering | 2799 | * Very old userspace does not call KVM_SET_TSS_ADDR before entering |
@@ -3118,7 +3103,6 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, | |||
3118 | struct kvm_segment *var, int seg) | 3103 | struct kvm_segment *var, int seg) |
3119 | { | 3104 | { |
3120 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3105 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3121 | struct kvm_save_segment *save; | ||
3122 | u32 ar; | 3106 | u32 ar; |
3123 | 3107 | ||
3124 | if (vmx->rmode.vm86_active | 3108 | if (vmx->rmode.vm86_active |
@@ -3126,27 +3110,15 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, | |||
3126 | || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS | 3110 | || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS |
3127 | || seg == VCPU_SREG_GS) | 3111 | || seg == VCPU_SREG_GS) |
3128 | && !emulate_invalid_guest_state) { | 3112 | && !emulate_invalid_guest_state) { |
3129 | switch (seg) { | 3113 | *var = vmx->rmode.segs[seg]; |
3130 | case VCPU_SREG_TR: save = &vmx->rmode.tr; break; | ||
3131 | case VCPU_SREG_ES: save = &vmx->rmode.es; break; | ||
3132 | case VCPU_SREG_DS: save = &vmx->rmode.ds; break; | ||
3133 | case VCPU_SREG_FS: save = &vmx->rmode.fs; break; | ||
3134 | case VCPU_SREG_GS: save = &vmx->rmode.gs; break; | ||
3135 | default: BUG(); | ||
3136 | } | ||
3137 | var->selector = save->selector; | ||
3138 | var->base = save->base; | ||
3139 | var->limit = save->limit; | ||
3140 | ar = save->ar; | ||
3141 | if (seg == VCPU_SREG_TR | 3114 | if (seg == VCPU_SREG_TR |
3142 | || var->selector == vmx_read_guest_seg_selector(vmx, seg)) | 3115 | || var->selector == vmx_read_guest_seg_selector(vmx, seg)) |
3143 | goto use_saved_rmode_seg; | 3116 | return; |
3144 | } | 3117 | } |
3145 | var->base = vmx_read_guest_seg_base(vmx, seg); | 3118 | var->base = vmx_read_guest_seg_base(vmx, seg); |
3146 | var->limit = vmx_read_guest_seg_limit(vmx, seg); | 3119 | var->limit = vmx_read_guest_seg_limit(vmx, seg); |
3147 | var->selector = vmx_read_guest_seg_selector(vmx, seg); | 3120 | var->selector = vmx_read_guest_seg_selector(vmx, seg); |
3148 | ar = vmx_read_guest_seg_ar(vmx, seg); | 3121 | ar = vmx_read_guest_seg_ar(vmx, seg); |
3149 | use_saved_rmode_seg: | ||
3150 | if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) | 3122 | if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) |
3151 | ar = 0; | 3123 | ar = 0; |
3152 | var->type = ar & 15; | 3124 | var->type = ar & 15; |
@@ -3235,10 +3207,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
3235 | 3207 | ||
3236 | if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) { | 3208 | if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) { |
3237 | vmcs_write16(sf->selector, var->selector); | 3209 | vmcs_write16(sf->selector, var->selector); |
3238 | vmx->rmode.tr.selector = var->selector; | 3210 | vmx->rmode.segs[VCPU_SREG_TR] = *var; |
3239 | vmx->rmode.tr.base = var->base; | ||
3240 | vmx->rmode.tr.limit = var->limit; | ||
3241 | vmx->rmode.tr.ar = vmx_segment_access_rights(var); | ||
3242 | return; | 3211 | return; |
3243 | } | 3212 | } |
3244 | vmcs_writel(sf->base, var->base); | 3213 | vmcs_writel(sf->base, var->base); |
@@ -3289,16 +3258,10 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
3289 | vmcs_readl(GUEST_CS_BASE) >> 4); | 3258 | vmcs_readl(GUEST_CS_BASE) >> 4); |
3290 | break; | 3259 | break; |
3291 | case VCPU_SREG_ES: | 3260 | case VCPU_SREG_ES: |
3292 | fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es); | ||
3293 | break; | ||
3294 | case VCPU_SREG_DS: | 3261 | case VCPU_SREG_DS: |
3295 | fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds); | ||
3296 | break; | ||
3297 | case VCPU_SREG_GS: | 3262 | case VCPU_SREG_GS: |
3298 | fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs); | ||
3299 | break; | ||
3300 | case VCPU_SREG_FS: | 3263 | case VCPU_SREG_FS: |
3301 | fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs); | 3264 | fix_rmode_seg(seg, &vmx->rmode.segs[seg]); |
3302 | break; | 3265 | break; |
3303 | case VCPU_SREG_SS: | 3266 | case VCPU_SREG_SS: |
3304 | vmcs_write16(GUEST_SS_SELECTOR, | 3267 | vmcs_write16(GUEST_SS_SELECTOR, |