diff options
author | Paul Mackerras <paulus@samba.org> | 2011-12-14 21:03:22 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-03-05 07:52:39 -0500 |
commit | 82ed36164c8a8ee685ea3fb3c4f741214ac070ca (patch) | |
tree | 259f2e7a839d7506d7fbb9a3be6763ad2f5e9fd2 /arch/powerpc | |
parent | 55514893739d28f095f19b012133eea4cb4a9390 (diff) |
KVM: PPC: Book3s HV: Implement get_dirty_log using hardware changed bit
This changes the implementation of kvm_vm_ioctl_get_dirty_log() for
Book3s HV guests to use the hardware C (changed) bits in the guest
hashed page table. Since this makes the implementation quite different
from the Book3s PR case, this moves the existing implementation from
book3s.c to book3s_pr.c and creates a new implementation in book3s_hv.c.
That implementation calls kvmppc_hv_get_dirty_log() to do the actual
work by calling kvm_test_clear_dirty on each page. It iterates over
the HPTEs, clearing the C bit if set, and returns 1 if any C bit was
set (including the saved C bit in the rmap entry).
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 39 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 69 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 37 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 39 |
5 files changed, 147 insertions, 39 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 33fdc09508a1..3c3edee672aa 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -156,6 +156,8 @@ extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |||
156 | long pte_index, unsigned long pteh, unsigned long ptel); | 156 | long pte_index, unsigned long pteh, unsigned long ptel); |
157 | extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | 157 | extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, |
158 | long pte_index, unsigned long pteh, unsigned long ptel); | 158 | long pte_index, unsigned long pteh, unsigned long ptel); |
159 | extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, | ||
160 | struct kvm_memory_slot *memslot); | ||
159 | 161 | ||
160 | extern void kvmppc_entry_trampoline(void); | 162 | extern void kvmppc_entry_trampoline(void); |
161 | extern void kvmppc_hv_entry_trampoline(void); | 163 | extern void kvmppc_hv_entry_trampoline(void); |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 6bf7e0582c5a..7d54f4ed6d96 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -477,45 +477,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
477 | return 0; | 477 | return 0; |
478 | } | 478 | } |
479 | 479 | ||
480 | /* | ||
481 | * Get (and clear) the dirty memory log for a memory slot. | ||
482 | */ | ||
483 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
484 | struct kvm_dirty_log *log) | ||
485 | { | ||
486 | struct kvm_memory_slot *memslot; | ||
487 | struct kvm_vcpu *vcpu; | ||
488 | ulong ga, ga_end; | ||
489 | int is_dirty = 0; | ||
490 | int r; | ||
491 | unsigned long n; | ||
492 | |||
493 | mutex_lock(&kvm->slots_lock); | ||
494 | |||
495 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
496 | if (r) | ||
497 | goto out; | ||
498 | |||
499 | /* If nothing is dirty, don't bother messing with page tables. */ | ||
500 | if (is_dirty) { | ||
501 | memslot = id_to_memslot(kvm->memslots, log->slot); | ||
502 | |||
503 | ga = memslot->base_gfn << PAGE_SHIFT; | ||
504 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | ||
505 | |||
506 | kvm_for_each_vcpu(n, vcpu, kvm) | ||
507 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | ||
508 | |||
509 | n = kvm_dirty_bitmap_bytes(memslot); | ||
510 | memset(memslot->dirty_bitmap, 0, n); | ||
511 | } | ||
512 | |||
513 | r = 0; | ||
514 | out: | ||
515 | mutex_unlock(&kvm->slots_lock); | ||
516 | return r; | ||
517 | } | ||
518 | |||
519 | void kvmppc_decrementer_func(unsigned long data) | 480 | void kvmppc_decrementer_func(unsigned long data) |
520 | { | 481 | { |
521 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | 482 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 926e2b92bdab..783cd3510c93 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -870,6 +870,75 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |||
870 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | 870 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); |
871 | } | 871 | } |
872 | 872 | ||
873 | static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) | ||
874 | { | ||
875 | struct revmap_entry *rev = kvm->arch.revmap; | ||
876 | unsigned long head, i, j; | ||
877 | unsigned long *hptep; | ||
878 | int ret = 0; | ||
879 | |||
880 | retry: | ||
881 | lock_rmap(rmapp); | ||
882 | if (*rmapp & KVMPPC_RMAP_CHANGED) { | ||
883 | *rmapp &= ~KVMPPC_RMAP_CHANGED; | ||
884 | ret = 1; | ||
885 | } | ||
886 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | ||
887 | unlock_rmap(rmapp); | ||
888 | return ret; | ||
889 | } | ||
890 | |||
891 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | ||
892 | do { | ||
893 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | ||
894 | j = rev[i].forw; | ||
895 | |||
896 | if (!(hptep[1] & HPTE_R_C)) | ||
897 | continue; | ||
898 | |||
899 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | ||
900 | /* unlock rmap before spinning on the HPTE lock */ | ||
901 | unlock_rmap(rmapp); | ||
902 | while (hptep[0] & HPTE_V_HVLOCK) | ||
903 | cpu_relax(); | ||
904 | goto retry; | ||
905 | } | ||
906 | |||
907 | /* Now check and modify the HPTE */ | ||
908 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) { | ||
909 | /* need to make it temporarily absent to clear C */ | ||
910 | hptep[0] |= HPTE_V_ABSENT; | ||
911 | kvmppc_invalidate_hpte(kvm, hptep, i); | ||
912 | hptep[1] &= ~HPTE_R_C; | ||
913 | eieio(); | ||
914 | hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | ||
915 | rev[i].guest_rpte |= HPTE_R_C; | ||
916 | ret = 1; | ||
917 | } | ||
918 | hptep[0] &= ~HPTE_V_HVLOCK; | ||
919 | } while ((i = j) != head); | ||
920 | |||
921 | unlock_rmap(rmapp); | ||
922 | return ret; | ||
923 | } | ||
924 | |||
925 | long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) | ||
926 | { | ||
927 | unsigned long i; | ||
928 | unsigned long *rmapp, *map; | ||
929 | |||
930 | preempt_disable(); | ||
931 | rmapp = memslot->rmap; | ||
932 | map = memslot->dirty_bitmap; | ||
933 | for (i = 0; i < memslot->npages; ++i) { | ||
934 | if (kvm_test_clear_dirty(kvm, rmapp)) | ||
935 | __set_bit_le(i, map); | ||
936 | ++rmapp; | ||
937 | } | ||
938 | preempt_enable(); | ||
939 | return 0; | ||
940 | } | ||
941 | |||
873 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, | 942 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, |
874 | unsigned long *nb_ret) | 943 | unsigned long *nb_ret) |
875 | { | 944 | { |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 86c4191cb75b..0f1ddf0ec032 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -1072,6 +1072,43 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) | |||
1072 | return fd; | 1072 | return fd; |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | /* | ||
1076 | * Get (and clear) the dirty memory log for a memory slot. | ||
1077 | */ | ||
1078 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | ||
1079 | { | ||
1080 | struct kvm_memory_slot *memslot; | ||
1081 | int r; | ||
1082 | unsigned long n; | ||
1083 | |||
1084 | mutex_lock(&kvm->slots_lock); | ||
1085 | |||
1086 | r = -EINVAL; | ||
1087 | if (log->slot >= KVM_MEMORY_SLOTS) | ||
1088 | goto out; | ||
1089 | |||
1090 | memslot = id_to_memslot(kvm->memslots, log->slot); | ||
1091 | r = -ENOENT; | ||
1092 | if (!memslot->dirty_bitmap) | ||
1093 | goto out; | ||
1094 | |||
1095 | n = kvm_dirty_bitmap_bytes(memslot); | ||
1096 | memset(memslot->dirty_bitmap, 0, n); | ||
1097 | |||
1098 | r = kvmppc_hv_get_dirty_log(kvm, memslot); | ||
1099 | if (r) | ||
1100 | goto out; | ||
1101 | |||
1102 | r = -EFAULT; | ||
1103 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | ||
1104 | goto out; | ||
1105 | |||
1106 | r = 0; | ||
1107 | out: | ||
1108 | mutex_unlock(&kvm->slots_lock); | ||
1109 | return r; | ||
1110 | } | ||
1111 | |||
1075 | static unsigned long slb_pgsize_encoding(unsigned long psize) | 1112 | static unsigned long slb_pgsize_encoding(unsigned long psize) |
1076 | { | 1113 | { |
1077 | unsigned long senc = 0; | 1114 | unsigned long senc = 0; |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 2da670405727..c193625d5289 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -1056,6 +1056,45 @@ out: | |||
1056 | return ret; | 1056 | return ret; |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | /* | ||
1060 | * Get (and clear) the dirty memory log for a memory slot. | ||
1061 | */ | ||
1062 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
1063 | struct kvm_dirty_log *log) | ||
1064 | { | ||
1065 | struct kvm_memory_slot *memslot; | ||
1066 | struct kvm_vcpu *vcpu; | ||
1067 | ulong ga, ga_end; | ||
1068 | int is_dirty = 0; | ||
1069 | int r; | ||
1070 | unsigned long n; | ||
1071 | |||
1072 | mutex_lock(&kvm->slots_lock); | ||
1073 | |||
1074 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
1075 | if (r) | ||
1076 | goto out; | ||
1077 | |||
1078 | /* If nothing is dirty, don't bother messing with page tables. */ | ||
1079 | if (is_dirty) { | ||
1080 | memslot = id_to_memslot(kvm->memslots, log->slot); | ||
1081 | |||
1082 | ga = memslot->base_gfn << PAGE_SHIFT; | ||
1083 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | ||
1084 | |||
1085 | kvm_for_each_vcpu(n, vcpu, kvm) | ||
1086 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | ||
1087 | |||
1088 | n = kvm_dirty_bitmap_bytes(memslot); | ||
1089 | memset(memslot->dirty_bitmap, 0, n); | ||
1090 | } | ||
1091 | |||
1092 | r = 0; | ||
1093 | out: | ||
1094 | mutex_unlock(&kvm->slots_lock); | ||
1095 | return r; | ||
1096 | } | ||
1097 | |||
1059 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 1098 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1060 | struct kvm_userspace_memory_region *mem) | 1099 | struct kvm_userspace_memory_region *mem) |
1061 | { | 1100 | { |