aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/paging_tmpl.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/kvm/paging_tmpl.h')
-rw-r--r--drivers/kvm/paging_tmpl.h79
1 files changed, 48 insertions, 31 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 2dbf4307ed9e..149fa45fd9a5 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -63,13 +63,15 @@ struct guest_walker {
63 pt_element_t *ptep; 63 pt_element_t *ptep;
64 pt_element_t inherited_ar; 64 pt_element_t inherited_ar;
65 gfn_t gfn; 65 gfn_t gfn;
66 u32 error_code;
66}; 67};
67 68
68/* 69/*
69 * Fetch a guest pte for a guest virtual address 70 * Fetch a guest pte for a guest virtual address
70 */ 71 */
71static void FNAME(walk_addr)(struct guest_walker *walker, 72static int FNAME(walk_addr)(struct guest_walker *walker,
72 struct kvm_vcpu *vcpu, gva_t addr) 73 struct kvm_vcpu *vcpu, gva_t addr,
74 int write_fault, int user_fault, int fetch_fault)
73{ 75{
74 hpa_t hpa; 76 hpa_t hpa;
75 struct kvm_memory_slot *slot; 77 struct kvm_memory_slot *slot;
@@ -86,7 +88,7 @@ static void FNAME(walk_addr)(struct guest_walker *walker,
86 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3]; 88 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
87 root = *walker->ptep; 89 root = *walker->ptep;
88 if (!(root & PT_PRESENT_MASK)) 90 if (!(root & PT_PRESENT_MASK))
89 return; 91 goto not_present;
90 --walker->level; 92 --walker->level;
91 } 93 }
92#endif 94#endif
@@ -111,11 +113,23 @@ static void FNAME(walk_addr)(struct guest_walker *walker,
111 ASSERT(((unsigned long)walker->table & PAGE_MASK) == 113 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
112 ((unsigned long)ptep & PAGE_MASK)); 114 ((unsigned long)ptep & PAGE_MASK));
113 115
114 if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK))
115 *ptep |= PT_ACCESSED_MASK;
116
117 if (!is_present_pte(*ptep)) 116 if (!is_present_pte(*ptep))
118 break; 117 goto not_present;
118
119 if (write_fault && !is_writeble_pte(*ptep))
120 if (user_fault || is_write_protection(vcpu))
121 goto access_error;
122
123 if (user_fault && !(*ptep & PT_USER_MASK))
124 goto access_error;
125
126#if PTTYPE == 64
127 if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
128 goto access_error;
129#endif
130
131 if (!(*ptep & PT_ACCESSED_MASK))
132 *ptep |= PT_ACCESSED_MASK; /* avoid rmw */
119 133
120 if (walker->level == PT_PAGE_TABLE_LEVEL) { 134 if (walker->level == PT_PAGE_TABLE_LEVEL) {
121 walker->gfn = (*ptep & PT_BASE_ADDR_MASK) 135 walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
@@ -146,6 +160,23 @@ static void FNAME(walk_addr)(struct guest_walker *walker,
146 } 160 }
147 walker->ptep = ptep; 161 walker->ptep = ptep;
148 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep); 162 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
163 return 1;
164
165not_present:
166 walker->error_code = 0;
167 goto err;
168
169access_error:
170 walker->error_code = PFERR_PRESENT_MASK;
171
172err:
173 if (write_fault)
174 walker->error_code |= PFERR_WRITE_MASK;
175 if (user_fault)
176 walker->error_code |= PFERR_USER_MASK;
177 if (fetch_fault)
178 walker->error_code |= PFERR_FETCH_MASK;
179 return 0;
149} 180}
150 181
151static void FNAME(release_walker)(struct guest_walker *walker) 182static void FNAME(release_walker)(struct guest_walker *walker)
@@ -274,7 +305,7 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
274 struct kvm_mmu_page *page; 305 struct kvm_mmu_page *page;
275 306
276 if (is_writeble_pte(*shadow_ent)) 307 if (is_writeble_pte(*shadow_ent))
277 return 0; 308 return !user || (*shadow_ent & PT_USER_MASK);
278 309
279 writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK; 310 writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
280 if (user) { 311 if (user) {
@@ -347,8 +378,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
347 u32 error_code) 378 u32 error_code)
348{ 379{
349 int write_fault = error_code & PFERR_WRITE_MASK; 380 int write_fault = error_code & PFERR_WRITE_MASK;
350 int pte_present = error_code & PFERR_PRESENT_MASK;
351 int user_fault = error_code & PFERR_USER_MASK; 381 int user_fault = error_code & PFERR_USER_MASK;
382 int fetch_fault = error_code & PFERR_FETCH_MASK;
352 struct guest_walker walker; 383 struct guest_walker walker;
353 u64 *shadow_pte; 384 u64 *shadow_pte;
354 int fixed; 385 int fixed;
@@ -365,19 +396,20 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
365 /* 396 /*
366 * Look up the shadow pte for the faulting address. 397 * Look up the shadow pte for the faulting address.
367 */ 398 */
368 FNAME(walk_addr)(&walker, vcpu, addr); 399 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
369 shadow_pte = FNAME(fetch)(vcpu, addr, &walker); 400 fetch_fault);
370 401
371 /* 402 /*
372 * The page is not mapped by the guest. Let the guest handle it. 403 * The page is not mapped by the guest. Let the guest handle it.
373 */ 404 */
374 if (!shadow_pte) { 405 if (!r) {
375 pgprintk("%s: not mapped\n", __FUNCTION__); 406 pgprintk("%s: guest page fault\n", __FUNCTION__);
376 inject_page_fault(vcpu, addr, error_code); 407 inject_page_fault(vcpu, addr, walker.error_code);
377 FNAME(release_walker)(&walker); 408 FNAME(release_walker)(&walker);
378 return 0; 409 return 0;
379 } 410 }
380 411
412 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
381 pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__, 413 pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
382 shadow_pte, *shadow_pte); 414 shadow_pte, *shadow_pte);
383 415
@@ -399,22 +431,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
399 * mmio: emulate if accessible, otherwise its a guest fault. 431 * mmio: emulate if accessible, otherwise its a guest fault.
400 */ 432 */
401 if (is_io_pte(*shadow_pte)) { 433 if (is_io_pte(*shadow_pte)) {
402 if (may_access(*shadow_pte, write_fault, user_fault)) 434 return 1;
403 return 1;
404 pgprintk("%s: io work, no access\n", __FUNCTION__);
405 inject_page_fault(vcpu, addr,
406 error_code | PFERR_PRESENT_MASK);
407 kvm_mmu_audit(vcpu, "post page fault (io)");
408 return 0;
409 }
410
411 /*
412 * pte not present, guest page fault.
413 */
414 if (pte_present && !fixed && !write_pt) {
415 inject_page_fault(vcpu, addr, error_code);
416 kvm_mmu_audit(vcpu, "post page fault (guest)");
417 return 0;
418 } 435 }
419 436
420 ++kvm_stat.pf_fixed; 437 ++kvm_stat.pf_fixed;
@@ -429,7 +446,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
429 pt_element_t guest_pte; 446 pt_element_t guest_pte;
430 gpa_t gpa; 447 gpa_t gpa;
431 448
432 FNAME(walk_addr)(&walker, vcpu, vaddr); 449 FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
433 guest_pte = *walker.ptep; 450 guest_pte = *walker.ptep;
434 FNAME(release_walker)(&walker); 451 FNAME(release_walker)(&walker);
435 452