aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-26 03:56:41 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-01-26 16:50:57 -0500
commit73b1087e6176a34c01eea3db269848f72fad72c1 (patch)
tree05e74fb28c1980e8327587934f2a0fe2a3c0d683 /drivers/kvm
parent7993ba43db1c07245ada067791f91dbf018095ac (diff)
[PATCH] KVM: MMU: Report nx faults to the guest
With the recent guest page fault change, we perform access checks on our own instead of relying on the cpu. This means we have to perform the nx checks as well. Software like the google toolbar on windows appears to rely on this somehow. Signed-off-by: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/mmu.c6
-rw-r--r--drivers/kvm/paging_tmpl.h15
2 files changed, 18 insertions, 3 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index a05d0609d918..22c426cd8cb2 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -143,6 +143,7 @@ static int dbg = 1;
143#define PFERR_PRESENT_MASK (1U << 0) 143#define PFERR_PRESENT_MASK (1U << 0)
144#define PFERR_WRITE_MASK (1U << 1) 144#define PFERR_WRITE_MASK (1U << 1)
145#define PFERR_USER_MASK (1U << 2) 145#define PFERR_USER_MASK (1U << 2)
146#define PFERR_FETCH_MASK (1U << 4)
146 147
147#define PT64_ROOT_LEVEL 4 148#define PT64_ROOT_LEVEL 4
148#define PT32_ROOT_LEVEL 2 149#define PT32_ROOT_LEVEL 2
@@ -168,6 +169,11 @@ static int is_cpuid_PSE36(void)
168 return 1; 169 return 1;
169} 170}
170 171
172static int is_nx(struct kvm_vcpu *vcpu)
173{
174 return vcpu->shadow_efer & EFER_NX;
175}
176
171static int is_present_pte(unsigned long pte) 177static int is_present_pte(unsigned long pte)
172{ 178{
173 return pte & PT_PRESENT_MASK; 179 return pte & PT_PRESENT_MASK;
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index afcd2a8f45bb..149fa45fd9a5 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -71,7 +71,7 @@ struct guest_walker {
71 */ 71 */
72static int FNAME(walk_addr)(struct guest_walker *walker, 72static int FNAME(walk_addr)(struct guest_walker *walker,
73 struct kvm_vcpu *vcpu, gva_t addr, 73 struct kvm_vcpu *vcpu, gva_t addr,
74 int write_fault, int user_fault) 74 int write_fault, int user_fault, int fetch_fault)
75{ 75{
76 hpa_t hpa; 76 hpa_t hpa;
77 struct kvm_memory_slot *slot; 77 struct kvm_memory_slot *slot;
@@ -123,6 +123,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
123 if (user_fault && !(*ptep & PT_USER_MASK)) 123 if (user_fault && !(*ptep & PT_USER_MASK))
124 goto access_error; 124 goto access_error;
125 125
126#if PTTYPE == 64
127 if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
128 goto access_error;
129#endif
130
126 if (!(*ptep & PT_ACCESSED_MASK)) 131 if (!(*ptep & PT_ACCESSED_MASK))
127 *ptep |= PT_ACCESSED_MASK; /* avoid rmw */ 132 *ptep |= PT_ACCESSED_MASK; /* avoid rmw */
128 133
@@ -169,6 +174,8 @@ err:
169 walker->error_code |= PFERR_WRITE_MASK; 174 walker->error_code |= PFERR_WRITE_MASK;
170 if (user_fault) 175 if (user_fault)
171 walker->error_code |= PFERR_USER_MASK; 176 walker->error_code |= PFERR_USER_MASK;
177 if (fetch_fault)
178 walker->error_code |= PFERR_FETCH_MASK;
172 return 0; 179 return 0;
173} 180}
174 181
@@ -372,6 +379,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
372{ 379{
373 int write_fault = error_code & PFERR_WRITE_MASK; 380 int write_fault = error_code & PFERR_WRITE_MASK;
374 int user_fault = error_code & PFERR_USER_MASK; 381 int user_fault = error_code & PFERR_USER_MASK;
382 int fetch_fault = error_code & PFERR_FETCH_MASK;
375 struct guest_walker walker; 383 struct guest_walker walker;
376 u64 *shadow_pte; 384 u64 *shadow_pte;
377 int fixed; 385 int fixed;
@@ -388,7 +396,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
388 /* 396 /*
389 * Look up the shadow pte for the faulting address. 397 * Look up the shadow pte for the faulting address.
390 */ 398 */
391 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault); 399 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
400 fetch_fault);
392 401
393 /* 402 /*
394 * The page is not mapped by the guest. Let the guest handle it. 403 * The page is not mapped by the guest. Let the guest handle it.
@@ -437,7 +446,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
437 pt_element_t guest_pte; 446 pt_element_t guest_pte;
438 gpa_t gpa; 447 gpa_t gpa;
439 448
440 FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0); 449 FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
441 guest_pte = *walker.ptep; 450 guest_pte = *walker.ptep;
442 FNAME(release_walker)(&walker); 451 FNAME(release_walker)(&walker);
443 452