diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-06-20 03:57:39 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-07-11 09:51:16 -0400 |
commit | 8e22f955fb65c5930cc4c5a863cce4e27d0e4a3c (patch) | |
tree | e51d6bf1b501528ffddaa00eebe8beaf5461b52f /arch/x86/kvm/mmu.c | |
parent | d13bc5b5a1f9eafd59331baa1d1d32e1867f57b5 (diff) |
KVM: MMU: cleanup spte_write_protect
Use __drop_large_spte to cleanup this function and comment spte_write_protect
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 45 |
1 files changed, 29 insertions, 16 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d04d6305a725..ed9e96806082 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1050,7 +1050,33 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) | |||
1050 | rmap_remove(kvm, sptep); | 1050 | rmap_remove(kvm, sptep); |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | /* Return true if the spte is dropped. */ | 1053 | |
1054 | static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) | ||
1055 | { | ||
1056 | if (is_large_pte(*sptep)) { | ||
1057 | WARN_ON(page_header(__pa(sptep))->role.level == | ||
1058 | PT_PAGE_TABLE_LEVEL); | ||
1059 | drop_spte(kvm, sptep); | ||
1060 | --kvm->stat.lpages; | ||
1061 | return true; | ||
1062 | } | ||
1063 | |||
1064 | return false; | ||
1065 | } | ||
1066 | |||
1067 | static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) | ||
1068 | { | ||
1069 | if (__drop_large_spte(vcpu->kvm, sptep)) | ||
1070 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
1071 | } | ||
1072 | |||
1073 | /* | ||
1074 | * Write-protect on the specified @sptep due to dirty page logging or | ||
1075 | * protecting shadow page table. @flush indicates whether tlb need be | ||
1076 | * flushed. | ||
1077 | * | ||
1078 | * Return true if the spte is dropped. | ||
1079 | */ | ||
1054 | static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) | 1080 | static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) |
1055 | { | 1081 | { |
1056 | u64 spte = *sptep; | 1082 | u64 spte = *sptep; |
@@ -1061,13 +1087,9 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) | |||
1061 | rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); | 1087 | rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); |
1062 | 1088 | ||
1063 | *flush |= true; | 1089 | *flush |= true; |
1064 | if (is_large_pte(spte)) { | 1090 | |
1065 | WARN_ON(page_header(__pa(sptep))->role.level == | 1091 | if (__drop_large_spte(kvm, sptep)) |
1066 | PT_PAGE_TABLE_LEVEL); | ||
1067 | drop_spte(kvm, sptep); | ||
1068 | --kvm->stat.lpages; | ||
1069 | return true; | 1092 | return true; |
1070 | } | ||
1071 | 1093 | ||
1072 | spte = spte & ~PT_WRITABLE_MASK; | 1094 | spte = spte & ~PT_WRITABLE_MASK; |
1073 | mmu_spte_update(sptep, spte); | 1095 | mmu_spte_update(sptep, spte); |
@@ -1878,15 +1900,6 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) | |||
1878 | mmu_spte_set(sptep, spte); | 1900 | mmu_spte_set(sptep, spte); |
1879 | } | 1901 | } |
1880 | 1902 | ||
1881 | static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) | ||
1882 | { | ||
1883 | if (is_large_pte(*sptep)) { | ||
1884 | drop_spte(vcpu->kvm, sptep); | ||
1885 | --vcpu->kvm->stat.lpages; | ||
1886 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
1887 | } | ||
1888 | } | ||
1889 | |||
1890 | static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, | 1903 | static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
1891 | unsigned direct_access) | 1904 | unsigned direct_access) |
1892 | { | 1905 | { |