diff options
author | Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | 2012-03-01 05:31:22 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-04-08 05:49:56 -0400 |
commit | a0ed46073c14f66dbf0707aaa7588b78da83d7c6 (patch) | |
tree | 3dd18f222e5700f7e3251dcbdb4bc325b93ae14b /arch/x86/kvm/mmu.c | |
parent | 248997095d652576f1213028a95ca5fff85d089f (diff) |
KVM: MMU: Split the main body of rmap_write_protect() off from others
We will use this in the following patch to implement another function
which needs to write protect pages using the rmap information.
Note that there is a small change in debug printing for large pages:
we do not differentiate them from others to avoid duplicating code.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 53 |
1 files changed, 27 insertions, 26 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4cb164268846..c8b5694d1a48 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1010,42 +1010,43 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) | |||
1010 | rmap_remove(kvm, sptep); | 1010 | rmap_remove(kvm, sptep); |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, | 1013 | static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) |
1014 | struct kvm_memory_slot *slot) | ||
1015 | { | 1014 | { |
1016 | unsigned long *rmapp; | 1015 | u64 *spte = NULL; |
1017 | u64 *spte; | 1016 | int write_protected = 0; |
1018 | int i, write_protected = 0; | ||
1019 | 1017 | ||
1020 | rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot); | 1018 | while ((spte = rmap_next(rmapp, spte))) { |
1021 | spte = rmap_next(rmapp, NULL); | ||
1022 | while (spte) { | ||
1023 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | 1019 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
1024 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); | 1020 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); |
1025 | if (is_writable_pte(*spte)) { | 1021 | |
1022 | if (!is_writable_pte(*spte)) | ||
1023 | continue; | ||
1024 | |||
1025 | if (level == PT_PAGE_TABLE_LEVEL) { | ||
1026 | mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK); | 1026 | mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK); |
1027 | write_protected = 1; | 1027 | } else { |
1028 | BUG_ON(!is_large_pte(*spte)); | ||
1029 | drop_spte(kvm, spte); | ||
1030 | --kvm->stat.lpages; | ||
1031 | spte = NULL; | ||
1028 | } | 1032 | } |
1029 | spte = rmap_next(rmapp, spte); | 1033 | |
1034 | write_protected = 1; | ||
1030 | } | 1035 | } |
1031 | 1036 | ||
1032 | /* check for huge page mappings */ | 1037 | return write_protected; |
1033 | for (i = PT_DIRECTORY_LEVEL; | 1038 | } |
1039 | |||
1040 | int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, | ||
1041 | struct kvm_memory_slot *slot) | ||
1042 | { | ||
1043 | unsigned long *rmapp; | ||
1044 | int i, write_protected = 0; | ||
1045 | |||
1046 | for (i = PT_PAGE_TABLE_LEVEL; | ||
1034 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { | 1047 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
1035 | rmapp = __gfn_to_rmap(gfn, i, slot); | 1048 | rmapp = __gfn_to_rmap(gfn, i, slot); |
1036 | spte = rmap_next(rmapp, NULL); | 1049 | write_protected |= __rmap_write_protect(kvm, rmapp, i); |
1037 | while (spte) { | ||
1038 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | ||
1039 | BUG_ON(!is_large_pte(*spte)); | ||
1040 | pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); | ||
1041 | if (is_writable_pte(*spte)) { | ||
1042 | drop_spte(kvm, spte); | ||
1043 | --kvm->stat.lpages; | ||
1044 | spte = NULL; | ||
1045 | write_protected = 1; | ||
1046 | } | ||
1047 | spte = rmap_next(rmapp, spte); | ||
1048 | } | ||
1049 | } | 1050 | } |
1050 | 1051 | ||
1051 | return write_protected; | 1052 | return write_protected; |