diff options
author | Sheng Yang <sheng@linux.intel.com> | 2008-10-09 04:01:54 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 09:51:44 -0500 |
commit | 0bed3b568b68e5835ef5da888a372b9beabf7544 (patch) | |
tree | ec8d816662d845831055a411496c97523bdd5de1 /arch | |
parent | 932d27a7913fc6b3c64c6e6082628b0a1561dec9 (diff) |
KVM: Improve MTRR structure
As well as reset mmu context when set MTRR.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 5 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 61 |
2 files changed, 63 insertions, 3 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a40fa8478920..8082e87f628d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <asm/pvclock-abi.h> | 22 | #include <asm/pvclock-abi.h> |
23 | #include <asm/desc.h> | 23 | #include <asm/desc.h> |
24 | #include <asm/mtrr.h> | ||
24 | 25 | ||
25 | #define KVM_MAX_VCPUS 16 | 26 | #define KVM_MAX_VCPUS 16 |
26 | #define KVM_MEMORY_SLOTS 32 | 27 | #define KVM_MEMORY_SLOTS 32 |
@@ -86,6 +87,7 @@ | |||
86 | #define KVM_MIN_FREE_MMU_PAGES 5 | 87 | #define KVM_MIN_FREE_MMU_PAGES 5 |
87 | #define KVM_REFILL_PAGES 25 | 88 | #define KVM_REFILL_PAGES 25 |
88 | #define KVM_MAX_CPUID_ENTRIES 40 | 89 | #define KVM_MAX_CPUID_ENTRIES 40 |
90 | #define KVM_NR_FIXED_MTRR_REGION 88 | ||
89 | #define KVM_NR_VAR_MTRR 8 | 91 | #define KVM_NR_VAR_MTRR 8 |
90 | 92 | ||
91 | extern spinlock_t kvm_lock; | 93 | extern spinlock_t kvm_lock; |
@@ -329,7 +331,8 @@ struct kvm_vcpu_arch { | |||
329 | bool nmi_injected; | 331 | bool nmi_injected; |
330 | bool nmi_window_open; | 332 | bool nmi_window_open; |
331 | 333 | ||
332 | u64 mtrr[0x100]; | 334 | struct mtrr_state_type mtrr_state; |
335 | u32 pat; | ||
333 | }; | 336 | }; |
334 | 337 | ||
335 | struct kvm_mem_alias { | 338 | struct kvm_mem_alias { |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a2c4b5594555..f5b2334c6bda 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
40 | #include <asm/msr.h> | 40 | #include <asm/msr.h> |
41 | #include <asm/desc.h> | 41 | #include <asm/desc.h> |
42 | #include <asm/mtrr.h> | ||
42 | 43 | ||
43 | #define MAX_IO_MSRS 256 | 44 | #define MAX_IO_MSRS 256 |
44 | #define CR0_RESERVED_BITS \ | 45 | #define CR0_RESERVED_BITS \ |
@@ -650,10 +651,38 @@ static bool msr_mtrr_valid(unsigned msr) | |||
650 | 651 | ||
651 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 652 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
652 | { | 653 | { |
654 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; | ||
655 | |||
653 | if (!msr_mtrr_valid(msr)) | 656 | if (!msr_mtrr_valid(msr)) |
654 | return 1; | 657 | return 1; |
655 | 658 | ||
656 | vcpu->arch.mtrr[msr - 0x200] = data; | 659 | if (msr == MSR_MTRRdefType) { |
660 | vcpu->arch.mtrr_state.def_type = data; | ||
661 | vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; | ||
662 | } else if (msr == MSR_MTRRfix64K_00000) | ||
663 | p[0] = data; | ||
664 | else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) | ||
665 | p[1 + msr - MSR_MTRRfix16K_80000] = data; | ||
666 | else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) | ||
667 | p[3 + msr - MSR_MTRRfix4K_C0000] = data; | ||
668 | else if (msr == MSR_IA32_CR_PAT) | ||
669 | vcpu->arch.pat = data; | ||
670 | else { /* Variable MTRRs */ | ||
671 | int idx, is_mtrr_mask; | ||
672 | u64 *pt; | ||
673 | |||
674 | idx = (msr - 0x200) / 2; | ||
675 | is_mtrr_mask = msr - 0x200 - 2 * idx; | ||
676 | if (!is_mtrr_mask) | ||
677 | pt = | ||
678 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; | ||
679 | else | ||
680 | pt = | ||
681 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; | ||
682 | *pt = data; | ||
683 | } | ||
684 | |||
685 | kvm_mmu_reset_context(vcpu); | ||
657 | return 0; | 686 | return 0; |
658 | } | 687 | } |
659 | 688 | ||
@@ -749,10 +778,37 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
749 | 778 | ||
750 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | 779 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
751 | { | 780 | { |
781 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; | ||
782 | |||
752 | if (!msr_mtrr_valid(msr)) | 783 | if (!msr_mtrr_valid(msr)) |
753 | return 1; | 784 | return 1; |
754 | 785 | ||
755 | *pdata = vcpu->arch.mtrr[msr - 0x200]; | 786 | if (msr == MSR_MTRRdefType) |
787 | *pdata = vcpu->arch.mtrr_state.def_type + | ||
788 | (vcpu->arch.mtrr_state.enabled << 10); | ||
789 | else if (msr == MSR_MTRRfix64K_00000) | ||
790 | *pdata = p[0]; | ||
791 | else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) | ||
792 | *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; | ||
793 | else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) | ||
794 | *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; | ||
795 | else if (msr == MSR_IA32_CR_PAT) | ||
796 | *pdata = vcpu->arch.pat; | ||
797 | else { /* Variable MTRRs */ | ||
798 | int idx, is_mtrr_mask; | ||
799 | u64 *pt; | ||
800 | |||
801 | idx = (msr - 0x200) / 2; | ||
802 | is_mtrr_mask = msr - 0x200 - 2 * idx; | ||
803 | if (!is_mtrr_mask) | ||
804 | pt = | ||
805 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; | ||
806 | else | ||
807 | pt = | ||
808 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; | ||
809 | *pdata = *pt; | ||
810 | } | ||
811 | |||
756 | return 0; | 812 | return 0; |
757 | } | 813 | } |
758 | 814 | ||
@@ -3942,6 +3998,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
3942 | /* We do fxsave: this must be aligned. */ | 3998 | /* We do fxsave: this must be aligned. */ |
3943 | BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF); | 3999 | BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF); |
3944 | 4000 | ||
4001 | vcpu->arch.mtrr_state.have_fixed = 1; | ||
3945 | vcpu_load(vcpu); | 4002 | vcpu_load(vcpu); |
3946 | r = kvm_arch_vcpu_reset(vcpu); | 4003 | r = kvm_arch_vcpu_reset(vcpu); |
3947 | if (r == 0) | 4004 | if (r == 0) |