diff options
author | Sheng Yang <sheng@linux.intel.com> | 2008-10-09 04:01:57 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 09:51:45 -0500 |
commit | 64d4d521757117aa5c1cfe79d3baa6cf57703f81 (patch) | |
tree | c12f1615e794408edd2930bd220722b6d5323938 /arch/x86/kvm/mmu.c | |
parent | 74be52e3e6285fc6e872a2a7baea544106f399ea (diff) |
KVM: Enable MTRR for EPT
The effective memory type of EPT is the mixture of MSR_IA32_CR_PAT and memory
type field of EPT entry.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ac2304fd173e..09d05f57bf66 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ | |||
168 | static u64 __read_mostly shadow_user_mask; | 168 | static u64 __read_mostly shadow_user_mask; |
169 | static u64 __read_mostly shadow_accessed_mask; | 169 | static u64 __read_mostly shadow_accessed_mask; |
170 | static u64 __read_mostly shadow_dirty_mask; | 170 | static u64 __read_mostly shadow_dirty_mask; |
171 | static u64 __read_mostly shadow_mt_mask; | ||
171 | 172 | ||
172 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) | 173 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) |
173 | { | 174 | { |
@@ -183,13 +184,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte) | |||
183 | EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); | 184 | EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); |
184 | 185 | ||
185 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 186 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
186 | u64 dirty_mask, u64 nx_mask, u64 x_mask) | 187 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask) |
187 | { | 188 | { |
188 | shadow_user_mask = user_mask; | 189 | shadow_user_mask = user_mask; |
189 | shadow_accessed_mask = accessed_mask; | 190 | shadow_accessed_mask = accessed_mask; |
190 | shadow_dirty_mask = dirty_mask; | 191 | shadow_dirty_mask = dirty_mask; |
191 | shadow_nx_mask = nx_mask; | 192 | shadow_nx_mask = nx_mask; |
192 | shadow_x_mask = x_mask; | 193 | shadow_x_mask = x_mask; |
194 | shadow_mt_mask = mt_mask; | ||
193 | } | 195 | } |
194 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); | 196 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
195 | 197 | ||
@@ -1546,6 +1548,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1546 | { | 1548 | { |
1547 | u64 spte; | 1549 | u64 spte; |
1548 | int ret = 0; | 1550 | int ret = 0; |
1551 | u64 mt_mask = shadow_mt_mask; | ||
1552 | |||
1549 | /* | 1553 | /* |
1550 | * We don't set the accessed bit, since we sometimes want to see | 1554 | * We don't set the accessed bit, since we sometimes want to see |
1551 | * whether the guest actually used the pte (in order to detect | 1555 | * whether the guest actually used the pte (in order to detect |
@@ -1564,6 +1568,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1564 | spte |= shadow_user_mask; | 1568 | spte |= shadow_user_mask; |
1565 | if (largepage) | 1569 | if (largepage) |
1566 | spte |= PT_PAGE_SIZE_MASK; | 1570 | spte |= PT_PAGE_SIZE_MASK; |
1571 | if (mt_mask) { | ||
1572 | mt_mask = get_memory_type(vcpu, gfn) << | ||
1573 | kvm_x86_ops->get_mt_mask_shift(); | ||
1574 | spte |= mt_mask; | ||
1575 | } | ||
1567 | 1576 | ||
1568 | spte |= (u64)pfn << PAGE_SHIFT; | 1577 | spte |= (u64)pfn << PAGE_SHIFT; |
1569 | 1578 | ||