diff options
author | Avi Kivity <avi@qumranet.com> | 2007-12-09 10:40:31 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:53:21 -0500 |
commit | 1c4f1fd6d5692614e8dc75ee53f7be590f1e878b (patch) | |
tree | 8b3c8f468923537dbd87c39995661e704c4f2b80 | |
parent | 2fbf4cf13f777e1f61ee692fe67d16bddd747700 (diff) |
KVM: MMU: Move set_pte() into guest paging mode independent code
As set_pte() no longer references either a gpte or the guest walker, we can
move it out of paging mode dependent code (which compiles twice and is
generally nasty).
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | drivers/kvm/mmu.c | 83 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 93 |
2 files changed, 88 insertions, 88 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index cace1e41b683..a91e05b42345 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -879,6 +879,89 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | |||
879 | return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 879 | return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
880 | } | 880 | } |
881 | 881 | ||
882 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | ||
883 | unsigned pt_access, unsigned pte_access, | ||
884 | int user_fault, int write_fault, int dirty, | ||
885 | int *ptwrite, gfn_t gfn) | ||
886 | { | ||
887 | u64 spte; | ||
888 | int was_rmapped = is_rmap_pte(*shadow_pte); | ||
889 | struct page *page; | ||
890 | |||
891 | pgprintk("%s: spte %llx gpte %llx access %x write_fault %d" | ||
892 | " user_fault %d gfn %lx\n", | ||
893 | __FUNCTION__, *shadow_pte, (u64)gpte, pt_access, | ||
894 | write_fault, user_fault, gfn); | ||
895 | |||
896 | /* | ||
897 | * We don't set the accessed bit, since we sometimes want to see | ||
898 | * whether the guest actually used the pte (in order to detect | ||
899 | * demand paging). | ||
900 | */ | ||
901 | spte = PT_PRESENT_MASK | PT_DIRTY_MASK; | ||
902 | if (!dirty) | ||
903 | pte_access &= ~ACC_WRITE_MASK; | ||
904 | if (!(pte_access & ACC_EXEC_MASK)) | ||
905 | spte |= PT64_NX_MASK; | ||
906 | |||
907 | page = gfn_to_page(vcpu->kvm, gfn); | ||
908 | |||
909 | spte |= PT_PRESENT_MASK; | ||
910 | if (pte_access & ACC_USER_MASK) | ||
911 | spte |= PT_USER_MASK; | ||
912 | |||
913 | if (is_error_page(page)) { | ||
914 | set_shadow_pte(shadow_pte, | ||
915 | shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK); | ||
916 | kvm_release_page_clean(page); | ||
917 | return; | ||
918 | } | ||
919 | |||
920 | spte |= page_to_phys(page); | ||
921 | |||
922 | if ((pte_access & ACC_WRITE_MASK) | ||
923 | || (write_fault && !is_write_protection(vcpu) && !user_fault)) { | ||
924 | struct kvm_mmu_page *shadow; | ||
925 | |||
926 | spte |= PT_WRITABLE_MASK; | ||
927 | if (user_fault) { | ||
928 | mmu_unshadow(vcpu->kvm, gfn); | ||
929 | goto unshadowed; | ||
930 | } | ||
931 | |||
932 | shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); | ||
933 | if (shadow) { | ||
934 | pgprintk("%s: found shadow page for %lx, marking ro\n", | ||
935 | __FUNCTION__, gfn); | ||
936 | pte_access &= ~ACC_WRITE_MASK; | ||
937 | if (is_writeble_pte(spte)) { | ||
938 | spte &= ~PT_WRITABLE_MASK; | ||
939 | kvm_x86_ops->tlb_flush(vcpu); | ||
940 | } | ||
941 | if (write_fault) | ||
942 | *ptwrite = 1; | ||
943 | } | ||
944 | } | ||
945 | |||
946 | unshadowed: | ||
947 | |||
948 | if (pte_access & ACC_WRITE_MASK) | ||
949 | mark_page_dirty(vcpu->kvm, gfn); | ||
950 | |||
951 | pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); | ||
952 | set_shadow_pte(shadow_pte, spte); | ||
953 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); | ||
954 | if (!was_rmapped) { | ||
955 | rmap_add(vcpu, shadow_pte, gfn); | ||
956 | if (!is_rmap_pte(*shadow_pte)) | ||
957 | kvm_release_page_clean(page); | ||
958 | } | ||
959 | else | ||
960 | kvm_release_page_clean(page); | ||
961 | if (!ptwrite || !*ptwrite) | ||
962 | vcpu->last_pte_updated = shadow_pte; | ||
963 | } | ||
964 | |||
882 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | 965 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
883 | { | 966 | { |
884 | } | 967 | } |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 2e5a80af22c9..3ab3fb635e16 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -235,89 +235,6 @@ err: | |||
235 | return 0; | 235 | return 0; |
236 | } | 236 | } |
237 | 237 | ||
238 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 *shadow_pte, | ||
239 | unsigned pt_access, unsigned pte_access, | ||
240 | int user_fault, int write_fault, int dirty, | ||
241 | int *ptwrite, gfn_t gfn) | ||
242 | { | ||
243 | u64 spte; | ||
244 | int was_rmapped = is_rmap_pte(*shadow_pte); | ||
245 | struct page *page; | ||
246 | |||
247 | pgprintk("%s: spte %llx gpte %llx access %x write_fault %d" | ||
248 | " user_fault %d gfn %lx\n", | ||
249 | __FUNCTION__, *shadow_pte, (u64)gpte, pt_access, | ||
250 | write_fault, user_fault, gfn); | ||
251 | |||
252 | /* | ||
253 | * We don't set the accessed bit, since we sometimes want to see | ||
254 | * whether the guest actually used the pte (in order to detect | ||
255 | * demand paging). | ||
256 | */ | ||
257 | spte = PT_PRESENT_MASK | PT_DIRTY_MASK; | ||
258 | if (!dirty) | ||
259 | pte_access &= ~ACC_WRITE_MASK; | ||
260 | if (!(pte_access & ACC_EXEC_MASK)) | ||
261 | spte |= PT64_NX_MASK; | ||
262 | |||
263 | page = gfn_to_page(vcpu->kvm, gfn); | ||
264 | |||
265 | spte |= PT_PRESENT_MASK; | ||
266 | if (pte_access & ACC_USER_MASK) | ||
267 | spte |= PT_USER_MASK; | ||
268 | |||
269 | if (is_error_page(page)) { | ||
270 | set_shadow_pte(shadow_pte, | ||
271 | shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK); | ||
272 | kvm_release_page_clean(page); | ||
273 | return; | ||
274 | } | ||
275 | |||
276 | spte |= page_to_phys(page); | ||
277 | |||
278 | if ((pte_access & ACC_WRITE_MASK) | ||
279 | || (write_fault && !is_write_protection(vcpu) && !user_fault)) { | ||
280 | struct kvm_mmu_page *shadow; | ||
281 | |||
282 | spte |= PT_WRITABLE_MASK; | ||
283 | if (user_fault) { | ||
284 | mmu_unshadow(vcpu->kvm, gfn); | ||
285 | goto unshadowed; | ||
286 | } | ||
287 | |||
288 | shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); | ||
289 | if (shadow) { | ||
290 | pgprintk("%s: found shadow page for %lx, marking ro\n", | ||
291 | __FUNCTION__, gfn); | ||
292 | pte_access &= ~ACC_WRITE_MASK; | ||
293 | if (is_writeble_pte(spte)) { | ||
294 | spte &= ~PT_WRITABLE_MASK; | ||
295 | kvm_x86_ops->tlb_flush(vcpu); | ||
296 | } | ||
297 | if (write_fault) | ||
298 | *ptwrite = 1; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | unshadowed: | ||
303 | |||
304 | if (pte_access & ACC_WRITE_MASK) | ||
305 | mark_page_dirty(vcpu->kvm, gfn); | ||
306 | |||
307 | pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); | ||
308 | set_shadow_pte(shadow_pte, spte); | ||
309 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); | ||
310 | if (!was_rmapped) { | ||
311 | rmap_add(vcpu, shadow_pte, gfn); | ||
312 | if (!is_rmap_pte(*shadow_pte)) | ||
313 | kvm_release_page_clean(page); | ||
314 | } | ||
315 | else | ||
316 | kvm_release_page_clean(page); | ||
317 | if (!ptwrite || !*ptwrite) | ||
318 | vcpu->last_pte_updated = shadow_pte; | ||
319 | } | ||
320 | |||
321 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | 238 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, |
322 | u64 *spte, const void *pte, int bytes, | 239 | u64 *spte, const void *pte, int bytes, |
323 | int offset_in_pte) | 240 | int offset_in_pte) |
@@ -335,8 +252,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
335 | return; | 252 | return; |
336 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); | 253 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); |
337 | pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); | 254 | pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); |
338 | FNAME(set_pte)(vcpu, spte, page->role.access, pte_access, 0, 0, | 255 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, |
339 | gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte)); | 256 | gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte)); |
340 | } | 257 | } |
341 | 258 | ||
342 | /* | 259 | /* |
@@ -399,9 +316,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
399 | *shadow_ent = shadow_pte; | 316 | *shadow_ent = shadow_pte; |
400 | } | 317 | } |
401 | 318 | ||
402 | FNAME(set_pte)(vcpu, shadow_ent, access, walker->pte_access & access, | 319 | mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, |
403 | user_fault, write_fault, walker->pte & PT_DIRTY_MASK, | 320 | user_fault, write_fault, walker->pte & PT_DIRTY_MASK, |
404 | ptwrite, walker->gfn); | 321 | ptwrite, walker->gfn); |
405 | 322 | ||
406 | return shadow_ent; | 323 | return shadow_ent; |
407 | } | 324 | } |