diff options
author | Avi Kivity <avi@qumranet.com> | 2007-12-09 10:40:31 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:53:21 -0500 |
commit | 1c4f1fd6d5692614e8dc75ee53f7be590f1e878b (patch) | |
tree | 8b3c8f468923537dbd87c39995661e704c4f2b80 /drivers/kvm/paging_tmpl.h | |
parent | 2fbf4cf13f777e1f61ee692fe67d16bddd747700 (diff) |
KVM: MMU: Move set_pte() into guest paging mode independent code
As set_pte() no longer references either a gpte or the guest walker, we can
move it out of paging mode dependent code (which compiles twice and is
generally nasty).
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/paging_tmpl.h')
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 93 |
1 files changed, 5 insertions, 88 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 2e5a80af22c9..3ab3fb635e16 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -235,89 +235,6 @@ err: | |||
235 | return 0; | 235 | return 0; |
236 | } | 236 | } |
237 | 237 | ||
238 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 *shadow_pte, | ||
239 | unsigned pt_access, unsigned pte_access, | ||
240 | int user_fault, int write_fault, int dirty, | ||
241 | int *ptwrite, gfn_t gfn) | ||
242 | { | ||
243 | u64 spte; | ||
244 | int was_rmapped = is_rmap_pte(*shadow_pte); | ||
245 | struct page *page; | ||
246 | |||
247 | pgprintk("%s: spte %llx gpte %llx access %x write_fault %d" | ||
248 | " user_fault %d gfn %lx\n", | ||
249 | __FUNCTION__, *shadow_pte, (u64)gpte, pt_access, | ||
250 | write_fault, user_fault, gfn); | ||
251 | |||
252 | /* | ||
253 | * We don't set the accessed bit, since we sometimes want to see | ||
254 | * whether the guest actually used the pte (in order to detect | ||
255 | * demand paging). | ||
256 | */ | ||
257 | spte = PT_PRESENT_MASK | PT_DIRTY_MASK; | ||
258 | if (!dirty) | ||
259 | pte_access &= ~ACC_WRITE_MASK; | ||
260 | if (!(pte_access & ACC_EXEC_MASK)) | ||
261 | spte |= PT64_NX_MASK; | ||
262 | |||
263 | page = gfn_to_page(vcpu->kvm, gfn); | ||
264 | |||
265 | spte |= PT_PRESENT_MASK; | ||
266 | if (pte_access & ACC_USER_MASK) | ||
267 | spte |= PT_USER_MASK; | ||
268 | |||
269 | if (is_error_page(page)) { | ||
270 | set_shadow_pte(shadow_pte, | ||
271 | shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK); | ||
272 | kvm_release_page_clean(page); | ||
273 | return; | ||
274 | } | ||
275 | |||
276 | spte |= page_to_phys(page); | ||
277 | |||
278 | if ((pte_access & ACC_WRITE_MASK) | ||
279 | || (write_fault && !is_write_protection(vcpu) && !user_fault)) { | ||
280 | struct kvm_mmu_page *shadow; | ||
281 | |||
282 | spte |= PT_WRITABLE_MASK; | ||
283 | if (user_fault) { | ||
284 | mmu_unshadow(vcpu->kvm, gfn); | ||
285 | goto unshadowed; | ||
286 | } | ||
287 | |||
288 | shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); | ||
289 | if (shadow) { | ||
290 | pgprintk("%s: found shadow page for %lx, marking ro\n", | ||
291 | __FUNCTION__, gfn); | ||
292 | pte_access &= ~ACC_WRITE_MASK; | ||
293 | if (is_writeble_pte(spte)) { | ||
294 | spte &= ~PT_WRITABLE_MASK; | ||
295 | kvm_x86_ops->tlb_flush(vcpu); | ||
296 | } | ||
297 | if (write_fault) | ||
298 | *ptwrite = 1; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | unshadowed: | ||
303 | |||
304 | if (pte_access & ACC_WRITE_MASK) | ||
305 | mark_page_dirty(vcpu->kvm, gfn); | ||
306 | |||
307 | pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); | ||
308 | set_shadow_pte(shadow_pte, spte); | ||
309 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); | ||
310 | if (!was_rmapped) { | ||
311 | rmap_add(vcpu, shadow_pte, gfn); | ||
312 | if (!is_rmap_pte(*shadow_pte)) | ||
313 | kvm_release_page_clean(page); | ||
314 | } | ||
315 | else | ||
316 | kvm_release_page_clean(page); | ||
317 | if (!ptwrite || !*ptwrite) | ||
318 | vcpu->last_pte_updated = shadow_pte; | ||
319 | } | ||
320 | |||
321 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | 238 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, |
322 | u64 *spte, const void *pte, int bytes, | 239 | u64 *spte, const void *pte, int bytes, |
323 | int offset_in_pte) | 240 | int offset_in_pte) |
@@ -335,8 +252,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
335 | return; | 252 | return; |
336 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); | 253 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); |
337 | pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); | 254 | pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); |
338 | FNAME(set_pte)(vcpu, spte, page->role.access, pte_access, 0, 0, | 255 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, |
339 | gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte)); | 256 | gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte)); |
340 | } | 257 | } |
341 | 258 | ||
342 | /* | 259 | /* |
@@ -399,9 +316,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
399 | *shadow_ent = shadow_pte; | 316 | *shadow_ent = shadow_pte; |
400 | } | 317 | } |
401 | 318 | ||
402 | FNAME(set_pte)(vcpu, shadow_ent, access, walker->pte_access & access, | 319 | mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, |
403 | user_fault, write_fault, walker->pte & PT_DIRTY_MASK, | 320 | user_fault, write_fault, walker->pte & PT_DIRTY_MASK, |
404 | ptwrite, walker->gfn); | 321 | ptwrite, walker->gfn); |
405 | 322 | ||
406 | return shadow_ent; | 323 | return shadow_ent; |
407 | } | 324 | } |