aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSheng Yang <sheng.yang@intel.com>2008-04-25 09:13:50 -0400
committerAvi Kivity <avi@qumranet.com>2008-05-04 07:44:38 -0400
commit7b52345e2c4c7333bf7eba8034ffc4683fa63c91 (patch)
tree3b7bc1cb9c067ae18fcfcee33d57ab3a6d46f9d1
parent67253af52e9133fb4cfbf7a2448a2d3524d1fa6c (diff)
KVM: MMU: Add EPT support
Enable kvm_set_spte() to generate EPT entries. Signed-off-by: Sheng Yang <sheng.yang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--arch/x86/kvm/mmu.c43
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--include/asm-x86/kvm_host.h3
3 files changed, 39 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 20fb3c852db7..c28a36b4cbba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -152,6 +152,12 @@ static struct kmem_cache *mmu_page_header_cache;
152 152
153static u64 __read_mostly shadow_trap_nonpresent_pte; 153static u64 __read_mostly shadow_trap_nonpresent_pte;
154static u64 __read_mostly shadow_notrap_nonpresent_pte; 154static u64 __read_mostly shadow_notrap_nonpresent_pte;
155static u64 __read_mostly shadow_base_present_pte;
156static u64 __read_mostly shadow_nx_mask;
157static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
158static u64 __read_mostly shadow_user_mask;
159static u64 __read_mostly shadow_accessed_mask;
160static u64 __read_mostly shadow_dirty_mask;
155 161
156void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) 162void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
157{ 163{
@@ -160,6 +166,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
160} 166}
161EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); 167EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
162 168
169void kvm_mmu_set_base_ptes(u64 base_pte)
170{
171 shadow_base_present_pte = base_pte;
172}
173EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
174
175void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
176 u64 dirty_mask, u64 nx_mask, u64 x_mask)
177{
178 shadow_user_mask = user_mask;
179 shadow_accessed_mask = accessed_mask;
180 shadow_dirty_mask = dirty_mask;
181 shadow_nx_mask = nx_mask;
182 shadow_x_mask = x_mask;
183}
184EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
185
163static int is_write_protection(struct kvm_vcpu *vcpu) 186static int is_write_protection(struct kvm_vcpu *vcpu)
164{ 187{
165 return vcpu->arch.cr0 & X86_CR0_WP; 188 return vcpu->arch.cr0 & X86_CR0_WP;
@@ -198,7 +221,7 @@ static int is_writeble_pte(unsigned long pte)
198 221
199static int is_dirty_pte(unsigned long pte) 222static int is_dirty_pte(unsigned long pte)
200{ 223{
201 return pte & PT_DIRTY_MASK; 224 return pte & shadow_dirty_mask;
202} 225}
203 226
204static int is_rmap_pte(u64 pte) 227static int is_rmap_pte(u64 pte)
@@ -513,7 +536,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
513 return; 536 return;
514 sp = page_header(__pa(spte)); 537 sp = page_header(__pa(spte));
515 pfn = spte_to_pfn(*spte); 538 pfn = spte_to_pfn(*spte);
516 if (*spte & PT_ACCESSED_MASK) 539 if (*spte & shadow_accessed_mask)
517 kvm_set_pfn_accessed(pfn); 540 kvm_set_pfn_accessed(pfn);
518 if (is_writeble_pte(*spte)) 541 if (is_writeble_pte(*spte))
519 kvm_release_pfn_dirty(pfn); 542 kvm_release_pfn_dirty(pfn);
@@ -1039,17 +1062,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1039 * whether the guest actually used the pte (in order to detect 1062 * whether the guest actually used the pte (in order to detect
1040 * demand paging). 1063 * demand paging).
1041 */ 1064 */
1042 spte = PT_PRESENT_MASK | PT_DIRTY_MASK; 1065 spte = shadow_base_present_pte | shadow_dirty_mask;
1043 if (!speculative) 1066 if (!speculative)
1044 pte_access |= PT_ACCESSED_MASK; 1067 pte_access |= PT_ACCESSED_MASK;
1045 if (!dirty) 1068 if (!dirty)
1046 pte_access &= ~ACC_WRITE_MASK; 1069 pte_access &= ~ACC_WRITE_MASK;
1047 if (!(pte_access & ACC_EXEC_MASK)) 1070 if (pte_access & ACC_EXEC_MASK)
1048 spte |= PT64_NX_MASK; 1071 spte |= shadow_x_mask;
1049 1072 else
1050 spte |= PT_PRESENT_MASK; 1073 spte |= shadow_nx_mask;
1051 if (pte_access & ACC_USER_MASK) 1074 if (pte_access & ACC_USER_MASK)
1052 spte |= PT_USER_MASK; 1075 spte |= shadow_user_mask;
1053 if (largepage) 1076 if (largepage)
1054 spte |= PT_PAGE_SIZE_MASK; 1077 spte |= PT_PAGE_SIZE_MASK;
1055 1078
@@ -1155,7 +1178,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1155 } 1178 }
1156 1179
1157 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK 1180 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
1158 | PT_WRITABLE_MASK | PT_USER_MASK; 1181 | PT_WRITABLE_MASK | shadow_user_mask;
1159 } 1182 }
1160 table_addr = table[index] & PT64_BASE_ADDR_MASK; 1183 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1161 } 1184 }
@@ -1599,7 +1622,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1599{ 1622{
1600 u64 *spte = vcpu->arch.last_pte_updated; 1623 u64 *spte = vcpu->arch.last_pte_updated;
1601 1624
1602 return !!(spte && (*spte & PT_ACCESSED_MASK)); 1625 return !!(spte && (*spte & shadow_accessed_mask));
1603} 1626}
1604 1627
1605static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1628static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0ce556372a4d..0735efbfa712 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque)
2417 2417
2418 kvm_x86_ops = ops; 2418 kvm_x86_ops = ops;
2419 kvm_mmu_set_nonpresent_ptes(0ull, 0ull); 2419 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
2420 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2421 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2422 PT_DIRTY_MASK, PT64_NX_MASK, 0);
2420 return 0; 2423 return 0;
2421 2424
2422out: 2425out:
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 897a1be24cf7..d1dedda958ff 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -434,6 +434,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
434int kvm_mmu_create(struct kvm_vcpu *vcpu); 434int kvm_mmu_create(struct kvm_vcpu *vcpu);
435int kvm_mmu_setup(struct kvm_vcpu *vcpu); 435int kvm_mmu_setup(struct kvm_vcpu *vcpu);
436void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); 436void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
437void kvm_mmu_set_base_ptes(u64 base_pte);
438void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
439 u64 dirty_mask, u64 nx_mask, u64 x_mask);
437 440
438int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 441int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
439void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 442void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);