aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorDong, Eddie <eddie.dong@intel.com>2009-03-30 04:21:08 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:35 -0400
commit82725b20e22fb85377f61a16f6d0d5cfc28b45d3 (patch)
tree16049e38be3262efa60f0d39a85cdf97006550cf /arch/x86
parent362c1055e58ecd25a9393c520ab263c80b147497 (diff)
KVM: MMU: Emulate #PF error code of reserved bits violation
Detect, indicate, and propagate page faults where reserved bits are set. Take care to handle the different paging modes, each of which has different sets of reserved bits. [avi: fix pte reserved bits for efer.nxe=0] Signed-off-by: Eddie Dong <eddie.dong@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c69
-rw-r--r--arch/x86/kvm/paging_tmpl.h7
-rw-r--r--arch/x86/kvm/x86.c10
4 files changed, 88 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8351c4d00ac0..548b97d284d3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -261,6 +261,7 @@ struct kvm_mmu {
261 union kvm_mmu_page_role base_role; 261 union kvm_mmu_page_role base_role;
262 262
263 u64 *pae_root; 263 u64 *pae_root;
264 u64 rsvd_bits_mask[2][4];
264}; 265};
265 266
266struct kvm_vcpu_arch { 267struct kvm_vcpu_arch {
@@ -791,5 +792,6 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
791#define KVM_ARCH_WANT_MMU_NOTIFIER 792#define KVM_ARCH_WANT_MMU_NOTIFIER
792int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 793int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
793int kvm_age_hva(struct kvm *kvm, unsigned long hva); 794int kvm_age_hva(struct kvm *kvm, unsigned long hva);
795int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
794 796
795#endif /* _ASM_X86_KVM_HOST_H */ 797#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8aac67cbd92f..b2c8e28021c9 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -126,6 +126,7 @@ module_param(oos_shadow, bool, 0644);
126#define PFERR_PRESENT_MASK (1U << 0) 126#define PFERR_PRESENT_MASK (1U << 0)
127#define PFERR_WRITE_MASK (1U << 1) 127#define PFERR_WRITE_MASK (1U << 1)
128#define PFERR_USER_MASK (1U << 2) 128#define PFERR_USER_MASK (1U << 2)
129#define PFERR_RSVD_MASK (1U << 3)
129#define PFERR_FETCH_MASK (1U << 4) 130#define PFERR_FETCH_MASK (1U << 4)
130 131
131#define PT_DIRECTORY_LEVEL 2 132#define PT_DIRECTORY_LEVEL 2
@@ -179,6 +180,11 @@ static u64 __read_mostly shadow_accessed_mask;
179static u64 __read_mostly shadow_dirty_mask; 180static u64 __read_mostly shadow_dirty_mask;
180static u64 __read_mostly shadow_mt_mask; 181static u64 __read_mostly shadow_mt_mask;
181 182
183static inline u64 rsvd_bits(int s, int e)
184{
185 return ((1ULL << (e - s + 1)) - 1) << s;
186}
187
182void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) 188void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
183{ 189{
184 shadow_trap_nonpresent_pte = trap_pte; 190 shadow_trap_nonpresent_pte = trap_pte;
@@ -2151,6 +2157,14 @@ static void paging_free(struct kvm_vcpu *vcpu)
2151 nonpaging_free(vcpu); 2157 nonpaging_free(vcpu);
2152} 2158}
2153 2159
2160static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2161{
2162 int bit7;
2163
2164 bit7 = (gpte >> 7) & 1;
2165 return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2166}
2167
2154#define PTTYPE 64 2168#define PTTYPE 64
2155#include "paging_tmpl.h" 2169#include "paging_tmpl.h"
2156#undef PTTYPE 2170#undef PTTYPE
@@ -2159,6 +2173,55 @@ static void paging_free(struct kvm_vcpu *vcpu)
2159#include "paging_tmpl.h" 2173#include "paging_tmpl.h"
2160#undef PTTYPE 2174#undef PTTYPE
2161 2175
2176static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2177{
2178 struct kvm_mmu *context = &vcpu->arch.mmu;
2179 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2180 u64 exb_bit_rsvd = 0;
2181
2182 if (!is_nx(vcpu))
2183 exb_bit_rsvd = rsvd_bits(63, 63);
2184 switch (level) {
2185 case PT32_ROOT_LEVEL:
2186 /* no rsvd bits for 2 level 4K page table entries */
2187 context->rsvd_bits_mask[0][1] = 0;
2188 context->rsvd_bits_mask[0][0] = 0;
2189 if (is_cpuid_PSE36())
2190 /* 36bits PSE 4MB page */
2191 context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2192 else
2193 /* 32 bits PSE 4MB page */
2194 context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2195 context->rsvd_bits_mask[1][0] = ~0ull;
2196 break;
2197 case PT32E_ROOT_LEVEL:
2198 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2199 rsvd_bits(maxphyaddr, 62); /* PDE */
2200 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2201 rsvd_bits(maxphyaddr, 62); /* PTE */
2202 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2203 rsvd_bits(maxphyaddr, 62) |
2204 rsvd_bits(13, 20); /* large page */
2205 context->rsvd_bits_mask[1][0] = ~0ull;
2206 break;
2207 case PT64_ROOT_LEVEL:
2208 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2209 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2210 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2211 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2212 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2213 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2214 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2215 rsvd_bits(maxphyaddr, 51);
2216 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2217 context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
2218 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2219 rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 20);
2220 context->rsvd_bits_mask[1][0] = ~0ull;
2221 break;
2222 }
2223}
2224
2162static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) 2225static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2163{ 2226{
2164 struct kvm_mmu *context = &vcpu->arch.mmu; 2227 struct kvm_mmu *context = &vcpu->arch.mmu;
@@ -2179,6 +2242,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2179 2242
2180static int paging64_init_context(struct kvm_vcpu *vcpu) 2243static int paging64_init_context(struct kvm_vcpu *vcpu)
2181{ 2244{
2245 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2182 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL); 2246 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2183} 2247}
2184 2248
@@ -2186,6 +2250,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
2186{ 2250{
2187 struct kvm_mmu *context = &vcpu->arch.mmu; 2251 struct kvm_mmu *context = &vcpu->arch.mmu;
2188 2252
2253 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2189 context->new_cr3 = paging_new_cr3; 2254 context->new_cr3 = paging_new_cr3;
2190 context->page_fault = paging32_page_fault; 2255 context->page_fault = paging32_page_fault;
2191 context->gva_to_gpa = paging32_gva_to_gpa; 2256 context->gva_to_gpa = paging32_gva_to_gpa;
@@ -2201,6 +2266,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
2201 2266
2202static int paging32E_init_context(struct kvm_vcpu *vcpu) 2267static int paging32E_init_context(struct kvm_vcpu *vcpu)
2203{ 2268{
2269 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2204 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL); 2270 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2205} 2271}
2206 2272
@@ -2221,12 +2287,15 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2221 context->gva_to_gpa = nonpaging_gva_to_gpa; 2287 context->gva_to_gpa = nonpaging_gva_to_gpa;
2222 context->root_level = 0; 2288 context->root_level = 0;
2223 } else if (is_long_mode(vcpu)) { 2289 } else if (is_long_mode(vcpu)) {
2290 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2224 context->gva_to_gpa = paging64_gva_to_gpa; 2291 context->gva_to_gpa = paging64_gva_to_gpa;
2225 context->root_level = PT64_ROOT_LEVEL; 2292 context->root_level = PT64_ROOT_LEVEL;
2226 } else if (is_pae(vcpu)) { 2293 } else if (is_pae(vcpu)) {
2294 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2227 context->gva_to_gpa = paging64_gva_to_gpa; 2295 context->gva_to_gpa = paging64_gva_to_gpa;
2228 context->root_level = PT32E_ROOT_LEVEL; 2296 context->root_level = PT32E_ROOT_LEVEL;
2229 } else { 2297 } else {
2298 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2230 context->gva_to_gpa = paging32_gva_to_gpa; 2299 context->gva_to_gpa = paging32_gva_to_gpa;
2231 context->root_level = PT32_ROOT_LEVEL; 2300 context->root_level = PT32_ROOT_LEVEL;
2232 } 2301 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index eae949973d09..09782a982785 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -123,6 +123,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
123 gfn_t table_gfn; 123 gfn_t table_gfn;
124 unsigned index, pt_access, pte_access; 124 unsigned index, pt_access, pte_access;
125 gpa_t pte_gpa; 125 gpa_t pte_gpa;
126 int rsvd_fault = 0;
126 127
127 pgprintk("%s: addr %lx\n", __func__, addr); 128 pgprintk("%s: addr %lx\n", __func__, addr);
128walk: 129walk:
@@ -157,6 +158,10 @@ walk:
157 if (!is_present_pte(pte)) 158 if (!is_present_pte(pte))
158 goto not_present; 159 goto not_present;
159 160
161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
162 if (rsvd_fault)
163 goto access_error;
164
160 if (write_fault && !is_writeble_pte(pte)) 165 if (write_fault && !is_writeble_pte(pte))
161 if (user_fault || is_write_protection(vcpu)) 166 if (user_fault || is_write_protection(vcpu))
162 goto access_error; 167 goto access_error;
@@ -232,6 +237,8 @@ err:
232 walker->error_code |= PFERR_USER_MASK; 237 walker->error_code |= PFERR_USER_MASK;
233 if (fetch_fault) 238 if (fetch_fault)
234 walker->error_code |= PFERR_FETCH_MASK; 239 walker->error_code |= PFERR_FETCH_MASK;
240 if (rsvd_fault)
241 walker->error_code |= PFERR_RSVD_MASK;
235 return 0; 242 return 0;
236} 243}
237 244
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5bbcad345376..df866684bad1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3017,6 +3017,16 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3017 return best; 3017 return best;
3018} 3018}
3019 3019
3020int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3021{
3022 struct kvm_cpuid_entry2 *best;
3023
3024 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3025 if (best)
3026 return best->eax & 0xff;
3027 return 36;
3028}
3029
3020void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 3030void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3021{ 3031{
3022 u32 function, index; 3032 u32 function, index;