aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-01-12 15:09:51 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:57:22 -0500
commit9d4cba7f93c52d4121ab9c6f289e582d368a6979 (patch)
tree482c7d42dcf6921cf82cd54c51c5d0f07b64b748 /virt
parent1a18a69b762374c423305772500f36eb8984ca52 (diff)
KVM: Move gfn_to_memslot() to kvm_host.h
This moves __gfn_to_memslot() and search_memslots() from kvm_main.c to kvm_host.h to reduce the code duplication caused by the need for non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c to call gfn_to_memslot() in real mode. Rather than putting gfn_to_memslot() itself in a header, which would lead to increased code size, this puts __gfn_to_memslot() in a header. Then, the non-modular uses of gfn_to_memslot() are changed to call __gfn_to_memslot() instead. This way there is only one place in the source code that needs to be changed should the gfn_to_memslot() implementation need to be modified. On powerpc, the Book3S HV style of KVM has code that is called from real mode which needs to call gfn_to_memslot() and thus needs this. (Module code is allocated in the vmalloc region, which can't be accessed in real mode.) With this, we can remove builtin_gfn_to_memslot() from book3s_hv_rm_mmu.c. Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: Avi Kivity <avi@redhat.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c21
1 files changed, 1 insertions, 20 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9f32bffd37c0..470e30520fe8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -640,19 +640,6 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
640} 640}
641#endif /* !CONFIG_S390 */ 641#endif /* !CONFIG_S390 */
642 642
643static struct kvm_memory_slot *
644search_memslots(struct kvm_memslots *slots, gfn_t gfn)
645{
646 struct kvm_memory_slot *memslot;
647
648 kvm_for_each_memslot(memslot, slots)
649 if (gfn >= memslot->base_gfn &&
650 gfn < memslot->base_gfn + memslot->npages)
651 return memslot;
652
653 return NULL;
654}
655
656static int cmp_memslot(const void *slot1, const void *slot2) 643static int cmp_memslot(const void *slot1, const void *slot2)
657{ 644{
658 struct kvm_memory_slot *s1, *s2; 645 struct kvm_memory_slot *s1, *s2;
@@ -1031,12 +1018,6 @@ int kvm_is_error_hva(unsigned long addr)
1031} 1018}
1032EXPORT_SYMBOL_GPL(kvm_is_error_hva); 1019EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1033 1020
1034static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
1035 gfn_t gfn)
1036{
1037 return search_memslots(slots, gfn);
1038}
1039
1040struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1021struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1041{ 1022{
1042 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1023 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
@@ -1459,7 +1440,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1459 1440
1460 ghc->gpa = gpa; 1441 ghc->gpa = gpa;
1461 ghc->generation = slots->generation; 1442 ghc->generation = slots->generation;
1462 ghc->memslot = __gfn_to_memslot(slots, gfn); 1443 ghc->memslot = gfn_to_memslot(kvm, gfn);
1463 ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); 1444 ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1464 if (!kvm_is_error_hva(ghc->hva)) 1445 if (!kvm_is_error_hva(ghc->hva))
1465 ghc->hva += offset; 1446 ghc->hva += offset;