diff options
author | Avi Kivity <avi@qumranet.com> | 2007-03-30 07:02:32 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-05-03 03:52:28 -0400 |
commit | 954bbbc236afe23b368abdf4942f313a5f6e1d50 (patch) | |
tree | 4c990bff08f7a9a13980deb68a85e091065bb1ec | |
parent | e0fa826f969c262c23908953bf85add487cc2e6c (diff) |
KVM: Simply gfn_to_page()
Mapping a guest page to a host page is a common operation. Currently,
one has first to find the memory slot where the page belongs (gfn_to_memslot),
then locate the page itself (gfn_to_page()).
This is clumsy, and also won't work well with memory aliases. So simplify
gfn_to_page() not to require memory slot translation first, and instead do it
internally.
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | drivers/kvm/kvm.h | 12 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 45 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 12 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 6 |
4 files changed, 33 insertions, 42 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 59357bea5b61..d19985a5508a 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -443,11 +443,7 @@ void kvm_emulator_want_group7_invlpg(void); | |||
443 | 443 | ||
444 | extern hpa_t bad_page_address; | 444 | extern hpa_t bad_page_address; |
445 | 445 | ||
446 | static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn) | 446 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
447 | { | ||
448 | return slot->phys_mem[gfn - slot->base_gfn]; | ||
449 | } | ||
450 | |||
451 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | 447 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
452 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | 448 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
453 | 449 | ||
@@ -523,12 +519,6 @@ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
523 | return vcpu->mmu.page_fault(vcpu, gva, error_code); | 519 | return vcpu->mmu.page_fault(vcpu, gva, error_code); |
524 | } | 520 | } |
525 | 521 | ||
526 | static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) | ||
527 | { | ||
528 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); | ||
529 | return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL; | ||
530 | } | ||
531 | |||
532 | static inline int is_long_mode(struct kvm_vcpu *vcpu) | 522 | static inline int is_long_mode(struct kvm_vcpu *vcpu) |
533 | { | 523 | { |
534 | #ifdef CONFIG_X86_64 | 524 | #ifdef CONFIG_X86_64 |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 33eade7e237c..a0ec5dda3305 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -420,12 +420,12 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
420 | u64 pdpte; | 420 | u64 pdpte; |
421 | u64 *pdpt; | 421 | u64 *pdpt; |
422 | int ret; | 422 | int ret; |
423 | struct kvm_memory_slot *memslot; | 423 | struct page *page; |
424 | 424 | ||
425 | spin_lock(&vcpu->kvm->lock); | 425 | spin_lock(&vcpu->kvm->lock); |
426 | memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn); | 426 | page = gfn_to_page(vcpu->kvm, pdpt_gfn); |
427 | /* FIXME: !memslot - emulate? 0xff? */ | 427 | /* FIXME: !page - emulate? 0xff? */ |
428 | pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); | 428 | pdpt = kmap_atomic(page, KM_USER0); |
429 | 429 | ||
430 | ret = 1; | 430 | ret = 1; |
431 | for (i = 0; i < 4; ++i) { | 431 | for (i = 0; i < 4; ++i) { |
@@ -861,6 +861,17 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | |||
861 | } | 861 | } |
862 | EXPORT_SYMBOL_GPL(gfn_to_memslot); | 862 | EXPORT_SYMBOL_GPL(gfn_to_memslot); |
863 | 863 | ||
864 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | ||
865 | { | ||
866 | struct kvm_memory_slot *slot; | ||
867 | |||
868 | slot = gfn_to_memslot(kvm, gfn); | ||
869 | if (!slot) | ||
870 | return NULL; | ||
871 | return slot->phys_mem[gfn - slot->base_gfn]; | ||
872 | } | ||
873 | EXPORT_SYMBOL_GPL(gfn_to_page); | ||
874 | |||
864 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | 875 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) |
865 | { | 876 | { |
866 | int i; | 877 | int i; |
@@ -899,20 +910,20 @@ static int emulator_read_std(unsigned long addr, | |||
899 | unsigned offset = addr & (PAGE_SIZE-1); | 910 | unsigned offset = addr & (PAGE_SIZE-1); |
900 | unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); | 911 | unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); |
901 | unsigned long pfn; | 912 | unsigned long pfn; |
902 | struct kvm_memory_slot *memslot; | 913 | struct page *page; |
903 | void *page; | 914 | void *page_virt; |
904 | 915 | ||
905 | if (gpa == UNMAPPED_GVA) | 916 | if (gpa == UNMAPPED_GVA) |
906 | return X86EMUL_PROPAGATE_FAULT; | 917 | return X86EMUL_PROPAGATE_FAULT; |
907 | pfn = gpa >> PAGE_SHIFT; | 918 | pfn = gpa >> PAGE_SHIFT; |
908 | memslot = gfn_to_memslot(vcpu->kvm, pfn); | 919 | page = gfn_to_page(vcpu->kvm, pfn); |
909 | if (!memslot) | 920 | if (!page) |
910 | return X86EMUL_UNHANDLEABLE; | 921 | return X86EMUL_UNHANDLEABLE; |
911 | page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0); | 922 | page_virt = kmap_atomic(page, KM_USER0); |
912 | 923 | ||
913 | memcpy(data, page + offset, tocopy); | 924 | memcpy(data, page_virt + offset, tocopy); |
914 | 925 | ||
915 | kunmap_atomic(page, KM_USER0); | 926 | kunmap_atomic(page_virt, KM_USER0); |
916 | 927 | ||
917 | bytes -= tocopy; | 928 | bytes -= tocopy; |
918 | data += tocopy; | 929 | data += tocopy; |
@@ -963,16 +974,14 @@ static int emulator_read_emulated(unsigned long addr, | |||
963 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | 974 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
964 | unsigned long val, int bytes) | 975 | unsigned long val, int bytes) |
965 | { | 976 | { |
966 | struct kvm_memory_slot *m; | ||
967 | struct page *page; | 977 | struct page *page; |
968 | void *virt; | 978 | void *virt; |
969 | 979 | ||
970 | if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) | 980 | if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) |
971 | return 0; | 981 | return 0; |
972 | m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT); | 982 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
973 | if (!m) | 983 | if (!page) |
974 | return 0; | 984 | return 0; |
975 | page = gfn_to_page(m, gpa >> PAGE_SHIFT); | ||
976 | kvm_mmu_pre_write(vcpu, gpa, bytes); | 985 | kvm_mmu_pre_write(vcpu, gpa, bytes); |
977 | mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); | 986 | mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); |
978 | virt = kmap_atomic(page, KM_USER0); | 987 | virt = kmap_atomic(page, KM_USER0); |
@@ -2516,15 +2525,11 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma, | |||
2516 | { | 2525 | { |
2517 | struct kvm *kvm = vma->vm_file->private_data; | 2526 | struct kvm *kvm = vma->vm_file->private_data; |
2518 | unsigned long pgoff; | 2527 | unsigned long pgoff; |
2519 | struct kvm_memory_slot *slot; | ||
2520 | struct page *page; | 2528 | struct page *page; |
2521 | 2529 | ||
2522 | *type = VM_FAULT_MINOR; | 2530 | *type = VM_FAULT_MINOR; |
2523 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 2531 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
2524 | slot = gfn_to_memslot(kvm, pgoff); | 2532 | page = gfn_to_page(kvm, pgoff); |
2525 | if (!slot) | ||
2526 | return NOPAGE_SIGBUS; | ||
2527 | page = gfn_to_page(slot, pgoff); | ||
2528 | if (!page) | 2533 | if (!page) |
2529 | return NOPAGE_SIGBUS; | 2534 | return NOPAGE_SIGBUS; |
2530 | get_page(page); | 2535 | get_page(page); |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 376800a33968..8bdb9ca1811c 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -390,13 +390,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) | |||
390 | { | 390 | { |
391 | struct kvm *kvm = vcpu->kvm; | 391 | struct kvm *kvm = vcpu->kvm; |
392 | struct page *page; | 392 | struct page *page; |
393 | struct kvm_memory_slot *slot; | ||
394 | struct kvm_rmap_desc *desc; | 393 | struct kvm_rmap_desc *desc; |
395 | u64 *spte; | 394 | u64 *spte; |
396 | 395 | ||
397 | slot = gfn_to_memslot(kvm, gfn); | 396 | page = gfn_to_page(kvm, gfn); |
398 | BUG_ON(!slot); | 397 | BUG_ON(!page); |
399 | page = gfn_to_page(slot, gfn); | ||
400 | 398 | ||
401 | while (page_private(page)) { | 399 | while (page_private(page)) { |
402 | if (!(page_private(page) & 1)) | 400 | if (!(page_private(page) & 1)) |
@@ -711,14 +709,12 @@ hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) | |||
711 | 709 | ||
712 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) | 710 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
713 | { | 711 | { |
714 | struct kvm_memory_slot *slot; | ||
715 | struct page *page; | 712 | struct page *page; |
716 | 713 | ||
717 | ASSERT((gpa & HPA_ERR_MASK) == 0); | 714 | ASSERT((gpa & HPA_ERR_MASK) == 0); |
718 | slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT); | 715 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
719 | if (!slot) | 716 | if (!page) |
720 | return gpa | HPA_ERR_MASK; | 717 | return gpa | HPA_ERR_MASK; |
721 | page = gfn_to_page(slot, gpa >> PAGE_SHIFT); | ||
722 | return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | 718 | return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) |
723 | | (gpa & (PAGE_SIZE-1)); | 719 | | (gpa & (PAGE_SIZE-1)); |
724 | } | 720 | } |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index b64b7b792e84..61a611691e50 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -926,9 +926,9 @@ static int init_rmode_tss(struct kvm* kvm) | |||
926 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; | 926 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; |
927 | char *page; | 927 | char *page; |
928 | 928 | ||
929 | p1 = _gfn_to_page(kvm, fn++); | 929 | p1 = gfn_to_page(kvm, fn++); |
930 | p2 = _gfn_to_page(kvm, fn++); | 930 | p2 = gfn_to_page(kvm, fn++); |
931 | p3 = _gfn_to_page(kvm, fn); | 931 | p3 = gfn_to_page(kvm, fn); |
932 | 932 | ||
933 | if (!p1 || !p2 || !p3) { | 933 | if (!p1 || !p2 || !p3) { |
934 | kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__); | 934 | kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__); |