aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-24 15:47:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-24 15:47:25 -0400
commit1765a1fe5d6f82c0eceb1ad10594cfc83759b6d0 (patch)
treea701020f0fa3a1932a36d174c5fffd20496303a9 /virt/kvm/kvm_main.c
parentbdaf12b41235b0c59949914de022341e77907461 (diff)
parent2a31339aa014c0d0b97c57d3ebc997732f8f47fc (diff)
Merge branch 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (321 commits) KVM: Drop CONFIG_DMAR dependency around kvm_iommu_map_pages KVM: Fix signature of kvm_iommu_map_pages stub KVM: MCE: Send SRAR SIGBUS directly KVM: MCE: Add MCG_SER_P into KVM_MCE_CAP_SUPPORTED KVM: fix typo in copyright notice KVM: Disable interrupts around get_kernel_ns() KVM: MMU: Avoid sign extension in mmu_alloc_direct_roots() pae root address KVM: MMU: move access code parsing to FNAME(walk_addr) function KVM: MMU: audit: check whether have unsync sps after root sync KVM: MMU: audit: introduce audit_printk to cleanup audit code KVM: MMU: audit: unregister audit tracepoints before module unloaded KVM: MMU: audit: fix vcpu's spte walking KVM: MMU: set access bit for direct mapping KVM: MMU: cleanup for error mask set while walk guest page table KVM: MMU: update 'root_hpa' out of loop in PAE shadow path KVM: x86 emulator: Eliminate compilation warning in x86_decode_insn() KVM: x86: Fix constant type in kvm_get_time_scale KVM: VMX: Add AX to list of registers clobbered by guest switch KVM guest: Move a printk that's using the clock before it's ready KVM: x86: TSC catchup mode ...
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c84
1 files changed, 66 insertions, 18 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 60e5e4612b0b..5225052aebc1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5,7 +5,7 @@
5 * machines without emulation or binary translation. 5 * machines without emulation or binary translation.
6 * 6 *
7 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affilates. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 * 9 *
10 * Authors: 10 * Authors:
11 * Avi Kivity <avi@qumranet.com> 11 * Avi Kivity <avi@qumranet.com>
@@ -705,14 +705,12 @@ skip_lpage:
705 if (r) 705 if (r)
706 goto out_free; 706 goto out_free;
707 707
708#ifdef CONFIG_DMAR
709 /* map the pages in iommu page table */ 708 /* map the pages in iommu page table */
710 if (npages) { 709 if (npages) {
711 r = kvm_iommu_map_pages(kvm, &new); 710 r = kvm_iommu_map_pages(kvm, &new);
712 if (r) 711 if (r)
713 goto out_free; 712 goto out_free;
714 } 713 }
715#endif
716 714
717 r = -ENOMEM; 715 r = -ENOMEM;
718 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 716 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
@@ -927,35 +925,46 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
927 return memslot - slots->memslots; 925 return memslot - slots->memslots;
928} 926}
929 927
930static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 928static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
931{ 929 gfn_t *nr_pages)
932 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
933}
934
935unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
936{ 930{
937 struct kvm_memory_slot *slot; 931 struct kvm_memory_slot *slot;
938 932
939 slot = gfn_to_memslot(kvm, gfn); 933 slot = gfn_to_memslot(kvm, gfn);
940 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 934 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
941 return bad_hva(); 935 return bad_hva();
936
937 if (nr_pages)
938 *nr_pages = slot->npages - (gfn - slot->base_gfn);
939
942 return gfn_to_hva_memslot(slot, gfn); 940 return gfn_to_hva_memslot(slot, gfn);
943} 941}
942
943unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
944{
945 return gfn_to_hva_many(kvm, gfn, NULL);
946}
944EXPORT_SYMBOL_GPL(gfn_to_hva); 947EXPORT_SYMBOL_GPL(gfn_to_hva);
945 948
946static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) 949static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
947{ 950{
948 struct page *page[1]; 951 struct page *page[1];
949 int npages; 952 int npages;
950 pfn_t pfn; 953 pfn_t pfn;
951 954
952 might_sleep(); 955 if (atomic)
953 956 npages = __get_user_pages_fast(addr, 1, 1, page);
954 npages = get_user_pages_fast(addr, 1, 1, page); 957 else {
958 might_sleep();
959 npages = get_user_pages_fast(addr, 1, 1, page);
960 }
955 961
956 if (unlikely(npages != 1)) { 962 if (unlikely(npages != 1)) {
957 struct vm_area_struct *vma; 963 struct vm_area_struct *vma;
958 964
965 if (atomic)
966 goto return_fault_page;
967
959 down_read(&current->mm->mmap_sem); 968 down_read(&current->mm->mmap_sem);
960 if (is_hwpoison_address(addr)) { 969 if (is_hwpoison_address(addr)) {
961 up_read(&current->mm->mmap_sem); 970 up_read(&current->mm->mmap_sem);
@@ -968,6 +977,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
968 if (vma == NULL || addr < vma->vm_start || 977 if (vma == NULL || addr < vma->vm_start ||
969 !(vma->vm_flags & VM_PFNMAP)) { 978 !(vma->vm_flags & VM_PFNMAP)) {
970 up_read(&current->mm->mmap_sem); 979 up_read(&current->mm->mmap_sem);
980return_fault_page:
971 get_page(fault_page); 981 get_page(fault_page);
972 return page_to_pfn(fault_page); 982 return page_to_pfn(fault_page);
973 } 983 }
@@ -981,7 +991,13 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
981 return pfn; 991 return pfn;
982} 992}
983 993
984pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 994pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
995{
996 return hva_to_pfn(kvm, addr, true);
997}
998EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
999
1000static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic)
985{ 1001{
986 unsigned long addr; 1002 unsigned long addr;
987 1003
@@ -991,7 +1007,18 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
991 return page_to_pfn(bad_page); 1007 return page_to_pfn(bad_page);
992 } 1008 }
993 1009
994 return hva_to_pfn(kvm, addr); 1010 return hva_to_pfn(kvm, addr, atomic);
1011}
1012
1013pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1014{
1015 return __gfn_to_pfn(kvm, gfn, true);
1016}
1017EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1018
1019pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1020{
1021 return __gfn_to_pfn(kvm, gfn, false);
995} 1022}
996EXPORT_SYMBOL_GPL(gfn_to_pfn); 1023EXPORT_SYMBOL_GPL(gfn_to_pfn);
997 1024
@@ -999,9 +1026,26 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
999 struct kvm_memory_slot *slot, gfn_t gfn) 1026 struct kvm_memory_slot *slot, gfn_t gfn)
1000{ 1027{
1001 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 1028 unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1002 return hva_to_pfn(kvm, addr); 1029 return hva_to_pfn(kvm, addr, false);
1003} 1030}
1004 1031
1032int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1033 int nr_pages)
1034{
1035 unsigned long addr;
1036 gfn_t entry;
1037
1038 addr = gfn_to_hva_many(kvm, gfn, &entry);
1039 if (kvm_is_error_hva(addr))
1040 return -1;
1041
1042 if (entry < nr_pages)
1043 return 0;
1044
1045 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1046}
1047EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1048
1005struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1049struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1006{ 1050{
1007 pfn_t pfn; 1051 pfn_t pfn;
@@ -1964,7 +2008,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1964 case CPU_STARTING: 2008 case CPU_STARTING:
1965 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2009 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1966 cpu); 2010 cpu);
2011 spin_lock(&kvm_lock);
1967 hardware_enable(NULL); 2012 hardware_enable(NULL);
2013 spin_unlock(&kvm_lock);
1968 break; 2014 break;
1969 } 2015 }
1970 return NOTIFY_OK; 2016 return NOTIFY_OK;
@@ -1977,7 +2023,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
1977 /* spin while reset goes on */ 2023 /* spin while reset goes on */
1978 local_irq_enable(); 2024 local_irq_enable();
1979 while (true) 2025 while (true)
1980 ; 2026 cpu_relax();
1981 } 2027 }
1982 /* Fault while not rebooting. We want the trace. */ 2028 /* Fault while not rebooting. We want the trace. */
1983 BUG(); 2029 BUG();
@@ -2171,8 +2217,10 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2171 2217
2172static int kvm_resume(struct sys_device *dev) 2218static int kvm_resume(struct sys_device *dev)
2173{ 2219{
2174 if (kvm_usage_count) 2220 if (kvm_usage_count) {
2221 WARN_ON(spin_is_locked(&kvm_lock));
2175 hardware_enable(NULL); 2222 hardware_enable(NULL);
2223 }
2176 return 0; 2224 return 0;
2177} 2225}
2178 2226