aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-05-02 12:26:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-05-02 12:26:09 -0400
commite7e6d2a4a1aecb087c0f522d8d131a0252691398 (patch)
treedc81a974f3a8510c88f3e40427be68c8a03a5727
parentb28e4f08d43beca4ac8ba2f768f4aaa8b056c4cf (diff)
parenta5a5aef451430dbd48f5bf32029b8b98b690074d (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: - Fix for a Haswell regression in nested virtualization, introduced during the merge window. - A fix from Oleg to async page faults. - A bunch of small ARM changes. - A trivial patch to use the new MSI-X API introduced during the merge window. * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: ARM: vgic: Fix the overlap check action about setting the GICD & GICC base address. KVM: arm/arm64: vgic: fix GICD_ICFGR register accesses KVM: async_pf: mm->mm_users can not pin apf->mm KVM: ARM: vgic: Fix sgi dispatch problem MAINTAINERS: co-maintainance of KVM/{arm,arm64} arm: KVM: fix possible misalignment of PGDs and bounce page KVM: x86: Check for host supported fields in shadow vmcs kvm: Use pci_enable_msix_exact() instead of pci_enable_msix() ARM: KVM: disable KVM in Kconfig on big-endian systems
-rw-r--r--MAINTAINERS5
-rw-r--r--arch/arm/kvm/Kconfig2
-rw-r--r--arch/arm/kvm/mmu.c15
-rw-r--r--arch/x86/kvm/vmx.c53
-rw-r--r--virt/kvm/arm/vgic.c15
-rw-r--r--virt/kvm/assigned-dev.c3
-rw-r--r--virt/kvm/async_pf.c8
7 files changed, 70 insertions, 31 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index ea44a57f790e..6bef70b614c9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5114,14 +5114,19 @@ F: drivers/s390/kvm/
5114 5114
5115KERNEL VIRTUAL MACHINE (KVM) FOR ARM 5115KERNEL VIRTUAL MACHINE (KVM) FOR ARM
5116M: Christoffer Dall <christoffer.dall@linaro.org> 5116M: Christoffer Dall <christoffer.dall@linaro.org>
5117M: Marc Zyngier <marc.zyngier@arm.com>
5118L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
5117L: kvmarm@lists.cs.columbia.edu 5119L: kvmarm@lists.cs.columbia.edu
5118W: http://systems.cs.columbia.edu/projects/kvm-arm 5120W: http://systems.cs.columbia.edu/projects/kvm-arm
5119S: Supported 5121S: Supported
5120F: arch/arm/include/uapi/asm/kvm* 5122F: arch/arm/include/uapi/asm/kvm*
5121F: arch/arm/include/asm/kvm* 5123F: arch/arm/include/asm/kvm*
5122F: arch/arm/kvm/ 5124F: arch/arm/kvm/
5125F: virt/kvm/arm/
5126F: include/kvm/arm_*
5123 5127
5124KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) 5128KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
5129M: Christoffer Dall <christoffer.dall@linaro.org>
5125M: Marc Zyngier <marc.zyngier@arm.com> 5130M: Marc Zyngier <marc.zyngier@arm.com>
5126L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 5131L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
5127L: kvmarm@lists.cs.columbia.edu 5132L: kvmarm@lists.cs.columbia.edu
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 466bd299b1a8..4be5bb150bdd 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -23,7 +23,7 @@ config KVM
23 select HAVE_KVM_CPU_RELAX_INTERCEPT 23 select HAVE_KVM_CPU_RELAX_INTERCEPT
24 select KVM_MMIO 24 select KVM_MMIO
25 select KVM_ARM_HOST 25 select KVM_ARM_HOST
26 depends on ARM_VIRT_EXT && ARM_LPAE 26 depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
27 ---help--- 27 ---help---
28 Support hosting virtualized guest machines. You will also 28 Support hosting virtualized guest machines. You will also
29 need to select one or more of the processor modules below. 29 need to select one or more of the processor modules below.
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 80bb1e6c2c29..16f804938b8f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
42static unsigned long hyp_idmap_end; 42static unsigned long hyp_idmap_end;
43static phys_addr_t hyp_idmap_vector; 43static phys_addr_t hyp_idmap_vector;
44 44
45#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
46
45#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) 47#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
46 48
47static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) 49static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
293 if (boot_hyp_pgd) { 295 if (boot_hyp_pgd) {
294 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); 296 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
295 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 297 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
296 kfree(boot_hyp_pgd); 298 free_pages((unsigned long)boot_hyp_pgd, pgd_order);
297 boot_hyp_pgd = NULL; 299 boot_hyp_pgd = NULL;
298 } 300 }
299 301
300 if (hyp_pgd) 302 if (hyp_pgd)
301 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 303 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
302 304
303 kfree(init_bounce_page); 305 free_page((unsigned long)init_bounce_page);
304 init_bounce_page = NULL; 306 init_bounce_page = NULL;
305 307
306 mutex_unlock(&kvm_hyp_pgd_mutex); 308 mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -330,7 +332,7 @@ void free_hyp_pgds(void)
330 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) 332 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
331 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 333 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
332 334
333 kfree(hyp_pgd); 335 free_pages((unsigned long)hyp_pgd, pgd_order);
334 hyp_pgd = NULL; 336 hyp_pgd = NULL;
335 } 337 }
336 338
@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
1024 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; 1026 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1025 phys_addr_t phys_base; 1027 phys_addr_t phys_base;
1026 1028
1027 init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 1029 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
1028 if (!init_bounce_page) { 1030 if (!init_bounce_page) {
1029 kvm_err("Couldn't allocate HYP init bounce page\n"); 1031 kvm_err("Couldn't allocate HYP init bounce page\n");
1030 err = -ENOMEM; 1032 err = -ENOMEM;
@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
1050 (unsigned long)phys_base); 1052 (unsigned long)phys_base);
1051 } 1053 }
1052 1054
1053 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); 1055 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
1054 boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); 1056 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
1057
1055 if (!hyp_pgd || !boot_hyp_pgd) { 1058 if (!hyp_pgd || !boot_hyp_pgd) {
1056 kvm_err("Hyp mode PGD not allocated\n"); 1059 kvm_err("Hyp mode PGD not allocated\n");
1057 err = -ENOMEM; 1060 err = -ENOMEM;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1f68c5831924..33e8c028842f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
503 [number##_HIGH] = VMCS12_OFFSET(name)+4 503 [number##_HIGH] = VMCS12_OFFSET(name)+4
504 504
505 505
506static const unsigned long shadow_read_only_fields[] = { 506static unsigned long shadow_read_only_fields[] = {
507 /* 507 /*
508 * We do NOT shadow fields that are modified when L0 508 * We do NOT shadow fields that are modified when L0
509 * traps and emulates any vmx instruction (e.g. VMPTRLD, 509 * traps and emulates any vmx instruction (e.g. VMPTRLD,
@@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = {
526 GUEST_LINEAR_ADDRESS, 526 GUEST_LINEAR_ADDRESS,
527 GUEST_PHYSICAL_ADDRESS 527 GUEST_PHYSICAL_ADDRESS
528}; 528};
529static const int max_shadow_read_only_fields = 529static int max_shadow_read_only_fields =
530 ARRAY_SIZE(shadow_read_only_fields); 530 ARRAY_SIZE(shadow_read_only_fields);
531 531
532static const unsigned long shadow_read_write_fields[] = { 532static unsigned long shadow_read_write_fields[] = {
533 GUEST_RIP, 533 GUEST_RIP,
534 GUEST_RSP, 534 GUEST_RSP,
535 GUEST_CR0, 535 GUEST_CR0,
@@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = {
558 HOST_FS_SELECTOR, 558 HOST_FS_SELECTOR,
559 HOST_GS_SELECTOR 559 HOST_GS_SELECTOR
560}; 560};
561static const int max_shadow_read_write_fields = 561static int max_shadow_read_write_fields =
562 ARRAY_SIZE(shadow_read_write_fields); 562 ARRAY_SIZE(shadow_read_write_fields);
563 563
564static const unsigned short vmcs_field_to_offset_table[] = { 564static const unsigned short vmcs_field_to_offset_table[] = {
@@ -3009,6 +3009,41 @@ static void free_kvm_area(void)
3009 } 3009 }
3010} 3010}
3011 3011
3012static void init_vmcs_shadow_fields(void)
3013{
3014 int i, j;
3015
3016 /* No checks for read only fields yet */
3017
3018 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
3019 switch (shadow_read_write_fields[i]) {
3020 case GUEST_BNDCFGS:
3021 if (!vmx_mpx_supported())
3022 continue;
3023 break;
3024 default:
3025 break;
3026 }
3027
3028 if (j < i)
3029 shadow_read_write_fields[j] =
3030 shadow_read_write_fields[i];
3031 j++;
3032 }
3033 max_shadow_read_write_fields = j;
3034
3035 /* shadowed fields guest access without vmexit */
3036 for (i = 0; i < max_shadow_read_write_fields; i++) {
3037 clear_bit(shadow_read_write_fields[i],
3038 vmx_vmwrite_bitmap);
3039 clear_bit(shadow_read_write_fields[i],
3040 vmx_vmread_bitmap);
3041 }
3042 for (i = 0; i < max_shadow_read_only_fields; i++)
3043 clear_bit(shadow_read_only_fields[i],
3044 vmx_vmread_bitmap);
3045}
3046
3012static __init int alloc_kvm_area(void) 3047static __init int alloc_kvm_area(void)
3013{ 3048{
3014 int cpu; 3049 int cpu;
@@ -3039,6 +3074,8 @@ static __init int hardware_setup(void)
3039 enable_vpid = 0; 3074 enable_vpid = 0;
3040 if (!cpu_has_vmx_shadow_vmcs()) 3075 if (!cpu_has_vmx_shadow_vmcs())
3041 enable_shadow_vmcs = 0; 3076 enable_shadow_vmcs = 0;
3077 if (enable_shadow_vmcs)
3078 init_vmcs_shadow_fields();
3042 3079
3043 if (!cpu_has_vmx_ept() || 3080 if (!cpu_has_vmx_ept() ||
3044 !cpu_has_vmx_ept_4levels()) { 3081 !cpu_has_vmx_ept_4levels()) {
@@ -8803,14 +8840,6 @@ static int __init vmx_init(void)
8803 8840
8804 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 8841 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
8805 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 8842 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
8806 /* shadowed read/write fields */
8807 for (i = 0; i < max_shadow_read_write_fields; i++) {
8808 clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
8809 clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
8810 }
8811 /* shadowed read only fields */
8812 for (i = 0; i < max_shadow_read_only_fields; i++)
8813 clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
8814 8843
8815 /* 8844 /*
8816 * Allow direct access to the PC debug port (it is often used for I/O 8845 * Allow direct access to the PC debug port (it is often used for I/O
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 47b29834a6b6..56ff9bebb577 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
548 u32 val; 548 u32 val;
549 u32 *reg; 549 u32 *reg;
550 550
551 offset >>= 1;
552 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, 551 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
553 vcpu->vcpu_id, offset); 552 vcpu->vcpu_id, offset >> 1);
554 553
555 if (offset & 2) 554 if (offset & 4)
556 val = *reg >> 16; 555 val = *reg >> 16;
557 else 556 else
558 val = *reg & 0xffff; 557 val = *reg & 0xffff;
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
561 vgic_reg_access(mmio, &val, offset, 560 vgic_reg_access(mmio, &val, offset,
562 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); 561 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
563 if (mmio->is_write) { 562 if (mmio->is_write) {
564 if (offset < 4) { 563 if (offset < 8) {
565 *reg = ~0U; /* Force PPIs/SGIs to 1 */ 564 *reg = ~0U; /* Force PPIs/SGIs to 1 */
566 return false; 565 return false;
567 } 566 }
568 567
569 val = vgic_cfg_compress(val); 568 val = vgic_cfg_compress(val);
570 if (offset & 2) { 569 if (offset & 4) {
571 *reg &= 0xffff; 570 *reg &= 0xffff;
572 *reg |= val << 16; 571 *reg |= val << 16;
573 } else { 572 } else {
@@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
916 case 0: 915 case 0:
917 if (!target_cpus) 916 if (!target_cpus)
918 return; 917 return;
918 break;
919 919
920 case 1: 920 case 1:
921 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; 921 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1667 if (addr + size < addr) 1667 if (addr + size < addr)
1668 return -EINVAL; 1668 return -EINVAL;
1669 1669
1670 *ioaddr = addr;
1670 ret = vgic_ioaddr_overlap(kvm); 1671 ret = vgic_ioaddr_overlap(kvm);
1671 if (ret) 1672 if (ret)
1672 return ret; 1673 *ioaddr = VGIC_ADDR_UNDEF;
1673 *ioaddr = addr; 1674
1674 return ret; 1675 return ret;
1675} 1676}
1676 1677
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 8db43701016f..bf06577fea51 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -395,7 +395,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
395 if (dev->entries_nr == 0) 395 if (dev->entries_nr == 0)
396 return r; 396 return r;
397 397
398 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); 398 r = pci_enable_msix_exact(dev->dev,
399 dev->host_msix_entries, dev->entries_nr);
399 if (r) 400 if (r)
400 return r; 401 return r;
401 402
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 10df100c4514..06e6401d6ef4 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work)
101 if (waitqueue_active(&vcpu->wq)) 101 if (waitqueue_active(&vcpu->wq))
102 wake_up_interruptible(&vcpu->wq); 102 wake_up_interruptible(&vcpu->wq);
103 103
104 mmdrop(mm); 104 mmput(mm);
105 kvm_put_kvm(vcpu->kvm); 105 kvm_put_kvm(vcpu->kvm);
106} 106}
107 107
@@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
118 flush_work(&work->work); 118 flush_work(&work->work);
119#else 119#else
120 if (cancel_work_sync(&work->work)) { 120 if (cancel_work_sync(&work->work)) {
121 mmdrop(work->mm); 121 mmput(work->mm);
122 kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ 122 kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
123 kmem_cache_free(async_pf_cache, work); 123 kmem_cache_free(async_pf_cache, work);
124 } 124 }
@@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
183 work->addr = hva; 183 work->addr = hva;
184 work->arch = *arch; 184 work->arch = *arch;
185 work->mm = current->mm; 185 work->mm = current->mm;
186 atomic_inc(&work->mm->mm_count); 186 atomic_inc(&work->mm->mm_users);
187 kvm_get_kvm(work->vcpu->kvm); 187 kvm_get_kvm(work->vcpu->kvm);
188 188
189 /* this can't really happen otherwise gfn_to_pfn_async 189 /* this can't really happen otherwise gfn_to_pfn_async
@@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
201 return 1; 201 return 1;
202retry_sync: 202retry_sync:
203 kvm_put_kvm(work->vcpu->kvm); 203 kvm_put_kvm(work->vcpu->kvm);
204 mmdrop(work->mm); 204 mmput(work->mm);
205 kmem_cache_free(async_pf_cache, work); 205 kmem_cache_free(async_pf_cache, work);
206 return 0; 206 return 0;
207} 207}