diff options
-rw-r--r-- | drivers/kvm/mmu.c | 17 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 34 |
2 files changed, 23 insertions, 28 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index a7631502f22b..2079d69f186a 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -972,23 +972,6 @@ static void inject_page_fault(struct kvm_vcpu *vcpu, | |||
972 | kvm_arch_ops->inject_page_fault(vcpu, addr, err_code); | 972 | kvm_arch_ops->inject_page_fault(vcpu, addr, err_code); |
973 | } | 973 | } |
974 | 974 | ||
975 | static inline int fix_read_pf(u64 *shadow_ent) | ||
976 | { | ||
977 | if ((*shadow_ent & PT_SHADOW_USER_MASK) && | ||
978 | !(*shadow_ent & PT_USER_MASK)) { | ||
979 | /* | ||
980 | * If supervisor write protect is disabled, we shadow kernel | ||
981 | * pages as user pages so we can trap the write access. | ||
982 | */ | ||
983 | *shadow_ent |= PT_USER_MASK; | ||
984 | *shadow_ent &= ~PT_WRITABLE_MASK; | ||
985 | |||
986 | return 1; | ||
987 | |||
988 | } | ||
989 | return 0; | ||
990 | } | ||
991 | |||
992 | static void paging_free(struct kvm_vcpu *vcpu) | 975 | static void paging_free(struct kvm_vcpu *vcpu) |
993 | { | 976 | { |
994 | nonpaging_free(vcpu); | 977 | nonpaging_free(vcpu); |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 7e998d193849..869582befaf1 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -197,6 +197,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, | |||
197 | gpa_t gaddr, | 197 | gpa_t gaddr, |
198 | pt_element_t *gpte, | 198 | pt_element_t *gpte, |
199 | u64 access_bits, | 199 | u64 access_bits, |
200 | int write_fault, | ||
200 | gfn_t gfn) | 201 | gfn_t gfn) |
201 | { | 202 | { |
202 | hpa_t paddr; | 203 | hpa_t paddr; |
@@ -219,6 +220,17 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, | |||
219 | 220 | ||
220 | *shadow_pte |= paddr; | 221 | *shadow_pte |= paddr; |
221 | 222 | ||
223 | if (!write_fault && (*shadow_pte & PT_SHADOW_USER_MASK) && | ||
224 | !(*shadow_pte & PT_USER_MASK)) { | ||
225 | /* | ||
226 | * If supervisor write protect is disabled, we shadow kernel | ||
227 | * pages as user pages so we can trap the write access. | ||
228 | */ | ||
229 | *shadow_pte |= PT_USER_MASK; | ||
230 | *shadow_pte &= ~PT_WRITABLE_MASK; | ||
231 | access_bits &= ~PT_WRITABLE_MASK; | ||
232 | } | ||
233 | |||
222 | if (access_bits & PT_WRITABLE_MASK) { | 234 | if (access_bits & PT_WRITABLE_MASK) { |
223 | struct kvm_mmu_page *shadow; | 235 | struct kvm_mmu_page *shadow; |
224 | 236 | ||
@@ -242,13 +254,14 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, | |||
242 | } | 254 | } |
243 | 255 | ||
244 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte, | 256 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte, |
245 | u64 *shadow_pte, u64 access_bits, gfn_t gfn) | 257 | u64 *shadow_pte, u64 access_bits, |
258 | int write_fault, gfn_t gfn) | ||
246 | { | 259 | { |
247 | ASSERT(*shadow_pte == 0); | 260 | ASSERT(*shadow_pte == 0); |
248 | access_bits &= *gpte; | 261 | access_bits &= *gpte; |
249 | *shadow_pte = (*gpte & PT_PTE_COPY_MASK); | 262 | *shadow_pte = (*gpte & PT_PTE_COPY_MASK); |
250 | FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK, | 263 | FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK, |
251 | gpte, access_bits, gfn); | 264 | gpte, access_bits, write_fault, gfn); |
252 | } | 265 | } |
253 | 266 | ||
254 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | 267 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, |
@@ -262,12 +275,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
262 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) | 275 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) |
263 | return; | 276 | return; |
264 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); | 277 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); |
265 | FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, | 278 | FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0, |
266 | (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT); | 279 | (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT); |
267 | } | 280 | } |
268 | 281 | ||
269 | static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde, | 282 | static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde, |
270 | u64 *shadow_pte, u64 access_bits, gfn_t gfn) | 283 | u64 *shadow_pte, u64 access_bits, int write_fault, |
284 | gfn_t gfn) | ||
271 | { | 285 | { |
272 | gpa_t gaddr; | 286 | gpa_t gaddr; |
273 | 287 | ||
@@ -279,14 +293,14 @@ static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde, | |||
279 | (32 - PT32_DIR_PSE36_SHIFT); | 293 | (32 - PT32_DIR_PSE36_SHIFT); |
280 | *shadow_pte = *gpde & PT_PTE_COPY_MASK; | 294 | *shadow_pte = *gpde & PT_PTE_COPY_MASK; |
281 | FNAME(set_pte_common)(vcpu, shadow_pte, gaddr, | 295 | FNAME(set_pte_common)(vcpu, shadow_pte, gaddr, |
282 | gpde, access_bits, gfn); | 296 | gpde, access_bits, write_fault, gfn); |
283 | } | 297 | } |
284 | 298 | ||
285 | /* | 299 | /* |
286 | * Fetch a shadow pte for a specific level in the paging hierarchy. | 300 | * Fetch a shadow pte for a specific level in the paging hierarchy. |
287 | */ | 301 | */ |
288 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | 302 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, |
289 | struct guest_walker *walker) | 303 | struct guest_walker *walker, int write_fault) |
290 | { | 304 | { |
291 | hpa_t shadow_addr; | 305 | hpa_t shadow_addr; |
292 | int level; | 306 | int level; |
@@ -351,12 +365,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
351 | if (prev_shadow_ent) | 365 | if (prev_shadow_ent) |
352 | *prev_shadow_ent |= PT_SHADOW_PS_MARK; | 366 | *prev_shadow_ent |= PT_SHADOW_PS_MARK; |
353 | FNAME(set_pde)(vcpu, guest_ent, shadow_ent, | 367 | FNAME(set_pde)(vcpu, guest_ent, shadow_ent, |
354 | walker->inherited_ar, walker->gfn); | 368 | walker->inherited_ar, write_fault, walker->gfn); |
355 | } else { | 369 | } else { |
356 | ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); | 370 | ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); |
357 | FNAME(set_pte)(vcpu, guest_ent, shadow_ent, | 371 | FNAME(set_pte)(vcpu, guest_ent, shadow_ent, |
358 | walker->inherited_ar, | 372 | walker->inherited_ar, |
359 | walker->gfn); | 373 | write_fault, walker->gfn); |
360 | } | 374 | } |
361 | return shadow_ent; | 375 | return shadow_ent; |
362 | } | 376 | } |
@@ -489,7 +503,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
489 | return 0; | 503 | return 0; |
490 | } | 504 | } |
491 | 505 | ||
492 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker); | 506 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker, write_fault); |
493 | pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__, | 507 | pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__, |
494 | shadow_pte, *shadow_pte); | 508 | shadow_pte, *shadow_pte); |
495 | 509 | ||
@@ -499,8 +513,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
499 | if (write_fault) | 513 | if (write_fault) |
500 | fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr, | 514 | fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr, |
501 | user_fault, &write_pt); | 515 | user_fault, &write_pt); |
502 | else | ||
503 | fixed = fix_read_pf(shadow_pte); | ||
504 | 516 | ||
505 | pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__, | 517 | pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__, |
506 | shadow_pte, *shadow_pte); | 518 | shadow_pte, *shadow_pte); |