diff options
author | Sheng Yang <sheng@linux.intel.com> | 2008-10-09 04:01:57 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 09:51:45 -0500 |
commit | 64d4d521757117aa5c1cfe79d3baa6cf57703f81 (patch) | |
tree | c12f1615e794408edd2930bd220722b6d5323938 /arch/x86/kvm | |
parent | 74be52e3e6285fc6e872a2a7baea544106f399ea (diff) |
KVM: Enable MTRR for EPT
The effective memory type of EPT is the mixture of MSR_IA32_CR_PAT and memory
type field of EPT entry.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 11 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 |
4 files changed, 25 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ac2304fd173e..09d05f57bf66 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ | |||
168 | static u64 __read_mostly shadow_user_mask; | 168 | static u64 __read_mostly shadow_user_mask; |
169 | static u64 __read_mostly shadow_accessed_mask; | 169 | static u64 __read_mostly shadow_accessed_mask; |
170 | static u64 __read_mostly shadow_dirty_mask; | 170 | static u64 __read_mostly shadow_dirty_mask; |
171 | static u64 __read_mostly shadow_mt_mask; | ||
171 | 172 | ||
172 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) | 173 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) |
173 | { | 174 | { |
@@ -183,13 +184,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte) | |||
183 | EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); | 184 | EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); |
184 | 185 | ||
185 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 186 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
186 | u64 dirty_mask, u64 nx_mask, u64 x_mask) | 187 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask) |
187 | { | 188 | { |
188 | shadow_user_mask = user_mask; | 189 | shadow_user_mask = user_mask; |
189 | shadow_accessed_mask = accessed_mask; | 190 | shadow_accessed_mask = accessed_mask; |
190 | shadow_dirty_mask = dirty_mask; | 191 | shadow_dirty_mask = dirty_mask; |
191 | shadow_nx_mask = nx_mask; | 192 | shadow_nx_mask = nx_mask; |
192 | shadow_x_mask = x_mask; | 193 | shadow_x_mask = x_mask; |
194 | shadow_mt_mask = mt_mask; | ||
193 | } | 195 | } |
194 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); | 196 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
195 | 197 | ||
@@ -1546,6 +1548,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1546 | { | 1548 | { |
1547 | u64 spte; | 1549 | u64 spte; |
1548 | int ret = 0; | 1550 | int ret = 0; |
1551 | u64 mt_mask = shadow_mt_mask; | ||
1552 | |||
1549 | /* | 1553 | /* |
1550 | * We don't set the accessed bit, since we sometimes want to see | 1554 | * We don't set the accessed bit, since we sometimes want to see |
1551 | * whether the guest actually used the pte (in order to detect | 1555 | * whether the guest actually used the pte (in order to detect |
@@ -1564,6 +1568,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1564 | spte |= shadow_user_mask; | 1568 | spte |= shadow_user_mask; |
1565 | if (largepage) | 1569 | if (largepage) |
1566 | spte |= PT_PAGE_SIZE_MASK; | 1570 | spte |= PT_PAGE_SIZE_MASK; |
1571 | if (mt_mask) { | ||
1572 | mt_mask = get_memory_type(vcpu, gfn) << | ||
1573 | kvm_x86_ops->get_mt_mask_shift(); | ||
1574 | spte |= mt_mask; | ||
1575 | } | ||
1567 | 1576 | ||
1568 | spte |= (u64)pfn << PAGE_SHIFT; | 1577 | spte |= (u64)pfn << PAGE_SHIFT; |
1569 | 1578 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9c4ce657d963..05efc4ef75a6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1912,6 +1912,11 @@ static int get_npt_level(void) | |||
1912 | #endif | 1912 | #endif |
1913 | } | 1913 | } |
1914 | 1914 | ||
1915 | static int svm_get_mt_mask_shift(void) | ||
1916 | { | ||
1917 | return 0; | ||
1918 | } | ||
1919 | |||
1915 | static struct kvm_x86_ops svm_x86_ops = { | 1920 | static struct kvm_x86_ops svm_x86_ops = { |
1916 | .cpu_has_kvm_support = has_svm, | 1921 | .cpu_has_kvm_support = has_svm, |
1917 | .disabled_by_bios = is_disabled, | 1922 | .disabled_by_bios = is_disabled, |
@@ -1967,6 +1972,7 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
1967 | 1972 | ||
1968 | .set_tss_addr = svm_set_tss_addr, | 1973 | .set_tss_addr = svm_set_tss_addr, |
1969 | .get_tdp_level = get_npt_level, | 1974 | .get_tdp_level = get_npt_level, |
1975 | .get_mt_mask_shift = svm_get_mt_mask_shift, | ||
1970 | }; | 1976 | }; |
1971 | 1977 | ||
1972 | static int __init svm_init(void) | 1978 | static int __init svm_init(void) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b4c95a501cca..dae134fa09e7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3574,6 +3574,11 @@ static int get_ept_level(void) | |||
3574 | return VMX_EPT_DEFAULT_GAW + 1; | 3574 | return VMX_EPT_DEFAULT_GAW + 1; |
3575 | } | 3575 | } |
3576 | 3576 | ||
3577 | static int vmx_get_mt_mask_shift(void) | ||
3578 | { | ||
3579 | return VMX_EPT_MT_EPTE_SHIFT; | ||
3580 | } | ||
3581 | |||
3577 | static struct kvm_x86_ops vmx_x86_ops = { | 3582 | static struct kvm_x86_ops vmx_x86_ops = { |
3578 | .cpu_has_kvm_support = cpu_has_kvm_support, | 3583 | .cpu_has_kvm_support = cpu_has_kvm_support, |
3579 | .disabled_by_bios = vmx_disabled_by_bios, | 3584 | .disabled_by_bios = vmx_disabled_by_bios, |
@@ -3629,6 +3634,7 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3629 | 3634 | ||
3630 | .set_tss_addr = vmx_set_tss_addr, | 3635 | .set_tss_addr = vmx_set_tss_addr, |
3631 | .get_tdp_level = get_ept_level, | 3636 | .get_tdp_level = get_ept_level, |
3637 | .get_mt_mask_shift = vmx_get_mt_mask_shift, | ||
3632 | }; | 3638 | }; |
3633 | 3639 | ||
3634 | static int __init vmx_init(void) | 3640 | static int __init vmx_init(void) |
@@ -3685,10 +3691,10 @@ static int __init vmx_init(void) | |||
3685 | bypass_guest_pf = 0; | 3691 | bypass_guest_pf = 0; |
3686 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | 3692 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | |
3687 | VMX_EPT_WRITABLE_MASK | | 3693 | VMX_EPT_WRITABLE_MASK | |
3688 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT | | ||
3689 | VMX_EPT_IGMT_BIT); | 3694 | VMX_EPT_IGMT_BIT); |
3690 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, | 3695 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, |
3691 | VMX_EPT_EXECUTABLE_MASK); | 3696 | VMX_EPT_EXECUTABLE_MASK, |
3697 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | ||
3692 | kvm_enable_tdp(); | 3698 | kvm_enable_tdp(); |
3693 | } else | 3699 | } else |
3694 | kvm_disable_tdp(); | 3700 | kvm_disable_tdp(); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0edf75339f3a..f175b796c2a6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2615,7 +2615,7 @@ int kvm_arch_init(void *opaque) | |||
2615 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); | 2615 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); |
2616 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); | 2616 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); |
2617 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, | 2617 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, |
2618 | PT_DIRTY_MASK, PT64_NX_MASK, 0); | 2618 | PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); |
2619 | return 0; | 2619 | return 0; |
2620 | 2620 | ||
2621 | out: | 2621 | out: |