aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorDong, Eddie <eddie.dong@intel.com>2009-03-30 04:21:08 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:35 -0400
commit82725b20e22fb85377f61a16f6d0d5cfc28b45d3 (patch)
tree16049e38be3262efa60f0d39a85cdf97006550cf /arch/x86/kvm/mmu.c
parent362c1055e58ecd25a9393c520ab263c80b147497 (diff)
KVM: MMU: Emulate #PF error code of reserved bits violation
Detect, indicate, and propagate page faults where reserved bits are set. Take care to handle the different paging modes, each of which has different sets of reserved bits. [avi: fix pte reserved bits for efer.nxe=0] Signed-off-by: Eddie Dong <eddie.dong@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c69
1 files changed, 69 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8aac67cbd92f..b2c8e28021c9 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -126,6 +126,7 @@ module_param(oos_shadow, bool, 0644);
126#define PFERR_PRESENT_MASK (1U << 0) 126#define PFERR_PRESENT_MASK (1U << 0)
127#define PFERR_WRITE_MASK (1U << 1) 127#define PFERR_WRITE_MASK (1U << 1)
128#define PFERR_USER_MASK (1U << 2) 128#define PFERR_USER_MASK (1U << 2)
129#define PFERR_RSVD_MASK (1U << 3)
129#define PFERR_FETCH_MASK (1U << 4) 130#define PFERR_FETCH_MASK (1U << 4)
130 131
131#define PT_DIRECTORY_LEVEL 2 132#define PT_DIRECTORY_LEVEL 2
@@ -179,6 +180,11 @@ static u64 __read_mostly shadow_accessed_mask;
179static u64 __read_mostly shadow_dirty_mask; 180static u64 __read_mostly shadow_dirty_mask;
180static u64 __read_mostly shadow_mt_mask; 181static u64 __read_mostly shadow_mt_mask;
181 182
183static inline u64 rsvd_bits(int s, int e)
184{
185 return ((1ULL << (e - s + 1)) - 1) << s;
186}
187
182void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) 188void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
183{ 189{
184 shadow_trap_nonpresent_pte = trap_pte; 190 shadow_trap_nonpresent_pte = trap_pte;
@@ -2151,6 +2157,14 @@ static void paging_free(struct kvm_vcpu *vcpu)
2151 nonpaging_free(vcpu); 2157 nonpaging_free(vcpu);
2152} 2158}
2153 2159
2160static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2161{
2162 int bit7;
2163
2164 bit7 = (gpte >> 7) & 1;
2165 return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2166}
2167
2154#define PTTYPE 64 2168#define PTTYPE 64
2155#include "paging_tmpl.h" 2169#include "paging_tmpl.h"
2156#undef PTTYPE 2170#undef PTTYPE
@@ -2159,6 +2173,55 @@ static void paging_free(struct kvm_vcpu *vcpu)
2159#include "paging_tmpl.h" 2173#include "paging_tmpl.h"
2160#undef PTTYPE 2174#undef PTTYPE
2161 2175
2176static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2177{
2178 struct kvm_mmu *context = &vcpu->arch.mmu;
2179 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2180 u64 exb_bit_rsvd = 0;
2181
2182 if (!is_nx(vcpu))
2183 exb_bit_rsvd = rsvd_bits(63, 63);
2184 switch (level) {
2185 case PT32_ROOT_LEVEL:
2186 /* no rsvd bits for 2 level 4K page table entries */
2187 context->rsvd_bits_mask[0][1] = 0;
2188 context->rsvd_bits_mask[0][0] = 0;
2189 if (is_cpuid_PSE36())
2190 /* 36bits PSE 4MB page */
2191 context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2192 else
2193 /* 32 bits PSE 4MB page */
2194 context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2195 context->rsvd_bits_mask[1][0] = ~0ull;
2196 break;
2197 case PT32E_ROOT_LEVEL:
2198 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2199 rsvd_bits(maxphyaddr, 62); /* PDE */
2200 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2201 rsvd_bits(maxphyaddr, 62); /* PTE */
2202 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2203 rsvd_bits(maxphyaddr, 62) |
2204 rsvd_bits(13, 20); /* large page */
2205 context->rsvd_bits_mask[1][0] = ~0ull;
2206 break;
2207 case PT64_ROOT_LEVEL:
2208 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2209 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2210 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2211 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2212 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2213 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2214 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2215 rsvd_bits(maxphyaddr, 51);
2216 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2217 context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
2218 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2219 rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 20);
2220 context->rsvd_bits_mask[1][0] = ~0ull;
2221 break;
2222 }
2223}
2224
2162static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) 2225static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2163{ 2226{
2164 struct kvm_mmu *context = &vcpu->arch.mmu; 2227 struct kvm_mmu *context = &vcpu->arch.mmu;
@@ -2179,6 +2242,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2179 2242
2180static int paging64_init_context(struct kvm_vcpu *vcpu) 2243static int paging64_init_context(struct kvm_vcpu *vcpu)
2181{ 2244{
2245 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2182 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL); 2246 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2183} 2247}
2184 2248
@@ -2186,6 +2250,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
2186{ 2250{
2187 struct kvm_mmu *context = &vcpu->arch.mmu; 2251 struct kvm_mmu *context = &vcpu->arch.mmu;
2188 2252
2253 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2189 context->new_cr3 = paging_new_cr3; 2254 context->new_cr3 = paging_new_cr3;
2190 context->page_fault = paging32_page_fault; 2255 context->page_fault = paging32_page_fault;
2191 context->gva_to_gpa = paging32_gva_to_gpa; 2256 context->gva_to_gpa = paging32_gva_to_gpa;
@@ -2201,6 +2266,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
2201 2266
2202static int paging32E_init_context(struct kvm_vcpu *vcpu) 2267static int paging32E_init_context(struct kvm_vcpu *vcpu)
2203{ 2268{
2269 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2204 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL); 2270 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2205} 2271}
2206 2272
@@ -2221,12 +2287,15 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2221 context->gva_to_gpa = nonpaging_gva_to_gpa; 2287 context->gva_to_gpa = nonpaging_gva_to_gpa;
2222 context->root_level = 0; 2288 context->root_level = 0;
2223 } else if (is_long_mode(vcpu)) { 2289 } else if (is_long_mode(vcpu)) {
2290 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2224 context->gva_to_gpa = paging64_gva_to_gpa; 2291 context->gva_to_gpa = paging64_gva_to_gpa;
2225 context->root_level = PT64_ROOT_LEVEL; 2292 context->root_level = PT64_ROOT_LEVEL;
2226 } else if (is_pae(vcpu)) { 2293 } else if (is_pae(vcpu)) {
2294 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2227 context->gva_to_gpa = paging64_gva_to_gpa; 2295 context->gva_to_gpa = paging64_gva_to_gpa;
2228 context->root_level = PT32E_ROOT_LEVEL; 2296 context->root_level = PT32E_ROOT_LEVEL;
2229 } else { 2297 } else {
2298 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2230 context->gva_to_gpa = paging32_gva_to_gpa; 2299 context->gva_to_gpa = paging32_gva_to_gpa;
2231 context->root_level = PT32_ROOT_LEVEL; 2300 context->root_level = PT32_ROOT_LEVEL;
2232 } 2301 }