summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-27 19:13:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-27 19:13:31 -0400
commit46dc111dfe47bf47f23884cade3c8a355be87c8c (patch)
tree77c6a7d935b832044dfdbc7266a3726f9e555756 /virt
parent19b522dbad0d0c35d0741a74aef70ce6fe2d6fb4 (diff)
parent5e62493f1a70e7f13059544daaee05e40e8548e2 (diff)
rMerge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Radim Krčmář: "ARM: - PSCI selection API, a leftover from 4.16 (for stable) - Kick vcpu on active interrupt affinity change - Plug a VMID allocation race on oversubscribed systems - Silence debug messages - Update Christoffer's email address (linaro -> arm) x86: - Expose userspace-relevant bits of a newly added feature - Fix TLB flushing on VMX with VPID, but without EPT" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: x86/headers/UAPI: Move DISABLE_EXITS KVM capability bits to the UAPI kvm: apic: Flush TLB after APIC mode/address change if VPIDs are in use arm/arm64: KVM: Add PSCI version selection API KVM: arm/arm64: vgic: Kick new VCPU on interrupt migration arm64: KVM: Demote SVE and LORegion warnings to debug only MAINTAINERS: Update e-mail address for Christoffer Dall KVM: arm/arm64: Close VMID generation race
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c15
-rw-r--r--virt/kvm/arm/psci.c60
-rw-r--r--virt/kvm/arm/vgic/vgic.c8
3 files changed, 78 insertions, 5 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index dba629c5f8ac..a4c1b76240df 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
64static u32 kvm_next_vmid; 64static u32 kvm_next_vmid;
65static unsigned int kvm_vmid_bits __read_mostly; 65static unsigned int kvm_vmid_bits __read_mostly;
66static DEFINE_SPINLOCK(kvm_vmid_lock); 66static DEFINE_RWLOCK(kvm_vmid_lock);
67 67
68static bool vgic_present; 68static bool vgic_present;
69 69
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
473{ 473{
474 phys_addr_t pgd_phys; 474 phys_addr_t pgd_phys;
475 u64 vmid; 475 u64 vmid;
476 bool new_gen;
476 477
477 if (!need_new_vmid_gen(kvm)) 478 read_lock(&kvm_vmid_lock);
479 new_gen = need_new_vmid_gen(kvm);
480 read_unlock(&kvm_vmid_lock);
481
482 if (!new_gen)
478 return; 483 return;
479 484
480 spin_lock(&kvm_vmid_lock); 485 write_lock(&kvm_vmid_lock);
481 486
482 /* 487 /*
483 * We need to re-check the vmid_gen here to ensure that if another vcpu 488 * We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
485 * use the same vmid. 490 * use the same vmid.
486 */ 491 */
487 if (!need_new_vmid_gen(kvm)) { 492 if (!need_new_vmid_gen(kvm)) {
488 spin_unlock(&kvm_vmid_lock); 493 write_unlock(&kvm_vmid_lock);
489 return; 494 return;
490 } 495 }
491 496
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
519 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); 524 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
520 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; 525 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
521 526
522 spin_unlock(&kvm_vmid_lock); 527 write_unlock(&kvm_vmid_lock);
523} 528}
524 529
525static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 530static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 6919352cbf15..c4762bef13c6 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -18,6 +18,7 @@
18#include <linux/arm-smccc.h> 18#include <linux/arm-smccc.h>
19#include <linux/preempt.h> 19#include <linux/preempt.h>
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/uaccess.h>
21#include <linux/wait.h> 22#include <linux/wait.h>
22 23
23#include <asm/cputype.h> 24#include <asm/cputype.h>
@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
427 smccc_set_retval(vcpu, val, 0, 0, 0); 428 smccc_set_retval(vcpu, val, 0, 0, 0);
428 return 1; 429 return 1;
429} 430}
431
432int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
433{
434 return 1; /* PSCI version */
435}
436
437int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
438{
439 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
440 return -EFAULT;
441
442 return 0;
443}
444
445int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
446{
447 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
448 void __user *uaddr = (void __user *)(long)reg->addr;
449 u64 val;
450
451 val = kvm_psci_version(vcpu, vcpu->kvm);
452 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
453 return -EFAULT;
454
455 return 0;
456 }
457
458 return -EINVAL;
459}
460
461int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
462{
463 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
464 void __user *uaddr = (void __user *)(long)reg->addr;
465 bool wants_02;
466 u64 val;
467
468 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
469 return -EFAULT;
470
471 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
472
473 switch (val) {
474 case KVM_ARM_PSCI_0_1:
475 if (wants_02)
476 return -EINVAL;
477 vcpu->kvm->arch.psci_version = val;
478 return 0;
479 case KVM_ARM_PSCI_0_2:
480 case KVM_ARM_PSCI_1_0:
481 if (!wants_02)
482 return -EINVAL;
483 vcpu->kvm->arch.psci_version = val;
484 return 0;
485 }
486 }
487
488 return -EINVAL;
489}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index bd125563b15b..702936cbe173 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -600,6 +600,7 @@ retry:
600 600
601 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 601 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
602 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 602 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
603 bool target_vcpu_needs_kick = false;
603 604
604 spin_lock(&irq->irq_lock); 605 spin_lock(&irq->irq_lock);
605 606
@@ -670,11 +671,18 @@ retry:
670 list_del(&irq->ap_list); 671 list_del(&irq->ap_list);
671 irq->vcpu = target_vcpu; 672 irq->vcpu = target_vcpu;
672 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); 673 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
674 target_vcpu_needs_kick = true;
673 } 675 }
674 676
675 spin_unlock(&irq->irq_lock); 677 spin_unlock(&irq->irq_lock);
676 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 678 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
677 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); 679 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
680
681 if (target_vcpu_needs_kick) {
682 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
683 kvm_vcpu_kick(target_vcpu);
684 }
685
678 goto retry; 686 goto retry;
679 } 687 }
680 688