diff options
author | Avi Kivity <avi@qumranet.com> | 2007-12-09 10:40:31 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:53:21 -0500 |
commit | 1c4f1fd6d5692614e8dc75ee53f7be590f1e878b (patch) | |
tree | 8b3c8f468923537dbd87c39995661e704c4f2b80 /drivers/kvm/mmu.c | |
parent | 2fbf4cf13f777e1f61ee692fe67d16bddd747700 (diff) |
KVM: MMU: Move set_pte() into guest paging mode independent code
As set_pte() no longer references either a gpte or the guest walker, we can
move it out of paging mode dependent code (which compiles twice and is
generally nasty).
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 83 |
1 files changed, 83 insertions, 0 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index cace1e41b683..a91e05b42345 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -879,6 +879,89 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | |||
879 | return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 879 | return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
880 | } | 880 | } |
881 | 881 | ||
882 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | ||
883 | unsigned pt_access, unsigned pte_access, | ||
884 | int user_fault, int write_fault, int dirty, | ||
885 | int *ptwrite, gfn_t gfn) | ||
886 | { | ||
887 | u64 spte; | ||
888 | int was_rmapped = is_rmap_pte(*shadow_pte); | ||
889 | struct page *page; | ||
890 | |||
891 | pgprintk("%s: spte %llx gpte %llx access %x write_fault %d" | ||
892 | " user_fault %d gfn %lx\n", | ||
893 | __FUNCTION__, *shadow_pte, (u64)gpte, pt_access, | ||
894 | write_fault, user_fault, gfn); | ||
895 | |||
896 | /* | ||
897 | * We don't set the accessed bit, since we sometimes want to see | ||
898 | * whether the guest actually used the pte (in order to detect | ||
899 | * demand paging). | ||
900 | */ | ||
901 | spte = PT_PRESENT_MASK | PT_DIRTY_MASK; | ||
902 | if (!dirty) | ||
903 | pte_access &= ~ACC_WRITE_MASK; | ||
904 | if (!(pte_access & ACC_EXEC_MASK)) | ||
905 | spte |= PT64_NX_MASK; | ||
906 | |||
907 | page = gfn_to_page(vcpu->kvm, gfn); | ||
908 | |||
909 | spte |= PT_PRESENT_MASK; | ||
910 | if (pte_access & ACC_USER_MASK) | ||
911 | spte |= PT_USER_MASK; | ||
912 | |||
913 | if (is_error_page(page)) { | ||
914 | set_shadow_pte(shadow_pte, | ||
915 | shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK); | ||
916 | kvm_release_page_clean(page); | ||
917 | return; | ||
918 | } | ||
919 | |||
920 | spte |= page_to_phys(page); | ||
921 | |||
922 | if ((pte_access & ACC_WRITE_MASK) | ||
923 | || (write_fault && !is_write_protection(vcpu) && !user_fault)) { | ||
924 | struct kvm_mmu_page *shadow; | ||
925 | |||
926 | spte |= PT_WRITABLE_MASK; | ||
927 | if (user_fault) { | ||
928 | mmu_unshadow(vcpu->kvm, gfn); | ||
929 | goto unshadowed; | ||
930 | } | ||
931 | |||
932 | shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); | ||
933 | if (shadow) { | ||
934 | pgprintk("%s: found shadow page for %lx, marking ro\n", | ||
935 | __FUNCTION__, gfn); | ||
936 | pte_access &= ~ACC_WRITE_MASK; | ||
937 | if (is_writeble_pte(spte)) { | ||
938 | spte &= ~PT_WRITABLE_MASK; | ||
939 | kvm_x86_ops->tlb_flush(vcpu); | ||
940 | } | ||
941 | if (write_fault) | ||
942 | *ptwrite = 1; | ||
943 | } | ||
944 | } | ||
945 | |||
946 | unshadowed: | ||
947 | |||
948 | if (pte_access & ACC_WRITE_MASK) | ||
949 | mark_page_dirty(vcpu->kvm, gfn); | ||
950 | |||
951 | pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); | ||
952 | set_shadow_pte(shadow_pte, spte); | ||
953 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); | ||
954 | if (!was_rmapped) { | ||
955 | rmap_add(vcpu, shadow_pte, gfn); | ||
956 | if (!is_rmap_pte(*shadow_pte)) | ||
957 | kvm_release_page_clean(page); | ||
958 | } | ||
959 | else | ||
960 | kvm_release_page_clean(page); | ||
961 | if (!ptwrite || !*ptwrite) | ||
962 | vcpu->last_pte_updated = shadow_pte; | ||
963 | } | ||
964 | |||
882 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | 965 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
883 | { | 966 | { |
884 | } | 967 | } |