aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm_main.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-03-30 07:02:32 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:28 -0400
commit954bbbc236afe23b368abdf4942f313a5f6e1d50 (patch)
tree4c990bff08f7a9a13980deb68a85e091065bb1ec /drivers/kvm/kvm_main.c
parente0fa826f969c262c23908953bf85add487cc2e6c (diff)
KVM: Simply gfn_to_page()
Mapping a guest page to a host page is a common operation. Currently, one has first to find the memory slot where the page belongs (gfn_to_memslot), then locate the page itself (gfn_to_page()). This is clumsy, and also won't work well with memory aliases. So simplify gfn_to_page() not to require memory slot translation first, and instead do it internally. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r--drivers/kvm/kvm_main.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 33eade7e237c..a0ec5dda3305 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -420,12 +420,12 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
420 u64 pdpte; 420 u64 pdpte;
421 u64 *pdpt; 421 u64 *pdpt;
422 int ret; 422 int ret;
423 struct kvm_memory_slot *memslot; 423 struct page *page;
424 424
425 spin_lock(&vcpu->kvm->lock); 425 spin_lock(&vcpu->kvm->lock);
426 memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn); 426 page = gfn_to_page(vcpu->kvm, pdpt_gfn);
427 /* FIXME: !memslot - emulate? 0xff? */ 427 /* FIXME: !page - emulate? 0xff? */
428 pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); 428 pdpt = kmap_atomic(page, KM_USER0);
429 429
430 ret = 1; 430 ret = 1;
431 for (i = 0; i < 4; ++i) { 431 for (i = 0; i < 4; ++i) {
@@ -861,6 +861,17 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
861} 861}
862EXPORT_SYMBOL_GPL(gfn_to_memslot); 862EXPORT_SYMBOL_GPL(gfn_to_memslot);
863 863
864struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
865{
866 struct kvm_memory_slot *slot;
867
868 slot = gfn_to_memslot(kvm, gfn);
869 if (!slot)
870 return NULL;
871 return slot->phys_mem[gfn - slot->base_gfn];
872}
873EXPORT_SYMBOL_GPL(gfn_to_page);
874
864void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 875void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
865{ 876{
866 int i; 877 int i;
@@ -899,20 +910,20 @@ static int emulator_read_std(unsigned long addr,
899 unsigned offset = addr & (PAGE_SIZE-1); 910 unsigned offset = addr & (PAGE_SIZE-1);
900 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); 911 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
901 unsigned long pfn; 912 unsigned long pfn;
902 struct kvm_memory_slot *memslot; 913 struct page *page;
903 void *page; 914 void *page_virt;
904 915
905 if (gpa == UNMAPPED_GVA) 916 if (gpa == UNMAPPED_GVA)
906 return X86EMUL_PROPAGATE_FAULT; 917 return X86EMUL_PROPAGATE_FAULT;
907 pfn = gpa >> PAGE_SHIFT; 918 pfn = gpa >> PAGE_SHIFT;
908 memslot = gfn_to_memslot(vcpu->kvm, pfn); 919 page = gfn_to_page(vcpu->kvm, pfn);
909 if (!memslot) 920 if (!page)
910 return X86EMUL_UNHANDLEABLE; 921 return X86EMUL_UNHANDLEABLE;
911 page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0); 922 page_virt = kmap_atomic(page, KM_USER0);
912 923
913 memcpy(data, page + offset, tocopy); 924 memcpy(data, page_virt + offset, tocopy);
914 925
915 kunmap_atomic(page, KM_USER0); 926 kunmap_atomic(page_virt, KM_USER0);
916 927
917 bytes -= tocopy; 928 bytes -= tocopy;
918 data += tocopy; 929 data += tocopy;
@@ -963,16 +974,14 @@ static int emulator_read_emulated(unsigned long addr,
963static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 974static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
964 unsigned long val, int bytes) 975 unsigned long val, int bytes)
965{ 976{
966 struct kvm_memory_slot *m;
967 struct page *page; 977 struct page *page;
968 void *virt; 978 void *virt;
969 979
970 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) 980 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
971 return 0; 981 return 0;
972 m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT); 982 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
973 if (!m) 983 if (!page)
974 return 0; 984 return 0;
975 page = gfn_to_page(m, gpa >> PAGE_SHIFT);
976 kvm_mmu_pre_write(vcpu, gpa, bytes); 985 kvm_mmu_pre_write(vcpu, gpa, bytes);
977 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); 986 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
978 virt = kmap_atomic(page, KM_USER0); 987 virt = kmap_atomic(page, KM_USER0);
@@ -2516,15 +2525,11 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2516{ 2525{
2517 struct kvm *kvm = vma->vm_file->private_data; 2526 struct kvm *kvm = vma->vm_file->private_data;
2518 unsigned long pgoff; 2527 unsigned long pgoff;
2519 struct kvm_memory_slot *slot;
2520 struct page *page; 2528 struct page *page;
2521 2529
2522 *type = VM_FAULT_MINOR; 2530 *type = VM_FAULT_MINOR;
2523 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2531 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2524 slot = gfn_to_memslot(kvm, pgoff); 2532 page = gfn_to_page(kvm, pgoff);
2525 if (!slot)
2526 return NOPAGE_SIGBUS;
2527 page = gfn_to_page(slot, pgoff);
2528 if (!page) 2533 if (!page)
2529 return NOPAGE_SIGBUS; 2534 return NOPAGE_SIGBUS;
2530 get_page(page); 2535 get_page(page);