diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-03-18 14:23:12 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-03-18 14:23:12 -0400 |
| commit | 3cd1d3273f25d9fdad6840e07c0bac65e95a2030 (patch) | |
| tree | ce70c2b748b26ff10a126378db4a0f677dc8da79 | |
| parent | 9ef0f88fe5466c2ca1d2975549ba6be502c464c1 (diff) | |
| parent | daaf216c06fba4ee4dc3f62715667da929d68774 (diff) | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini:
"PPC:
- fix bug leading to lost IPIs and smp_call_function_many() lockups
on POWER9
ARM:
- locking fix
- reset fix
- GICv2 multi-source SGI injection fix
- GICv2-on-v3 MMIO synchronization fix
- make the console less verbose.
x86:
- fix device passthrough on AMD SME"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86: Fix device passthrough when SME is active
kvm: arm/arm64: vgic-v3: Tighten synchronization for guests using v2 on v3
KVM: arm/arm64: vgic: Don't populate multiple LRs with the same vintid
KVM: arm/arm64: Reduce verbosity of KVM init log
KVM: arm/arm64: Reset mapped IRQs on VM reset
KVM: arm/arm64: Avoid vcpu_load for other vcpu ioctls than KVM_RUN
KVM: arm/arm64: vgic: Add missing irq_lock to vgic_mmio_read_pending
KVM: PPC: Book3S HV: Fix trap number return from __kvmppc_vcore_entry
| -rw-r--r-- | arch/arm64/kvm/guest.c | 3 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 10 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu.c | 4 | ||||
| -rw-r--r-- | include/kvm/arm_vgic.h | 1 | ||||
| -rw-r--r-- | include/linux/irqchip/arm-gic-v3.h | 1 | ||||
| -rw-r--r-- | include/linux/irqchip/arm-gic.h | 1 | ||||
| -rw-r--r-- | virt/kvm/arm/arch_timer.c | 6 | ||||
| -rw-r--r-- | virt/kvm/arm/arm.c | 9 | ||||
| -rw-r--r-- | virt/kvm/arm/hyp/vgic-v3-sr.c | 3 | ||||
| -rw-r--r-- | virt/kvm/arm/mmu.c | 6 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 3 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic/vgic-v2.c | 11 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 9 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 87 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic/vgic.h | 3 |
15 files changed, 117 insertions, 40 deletions
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index d7e3299a7734..959e50d2588c 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c | |||
| @@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
| 363 | { | 363 | { |
| 364 | int ret = 0; | 364 | int ret = 0; |
| 365 | 365 | ||
| 366 | vcpu_load(vcpu); | ||
| 367 | |||
| 368 | trace_kvm_set_guest_debug(vcpu, dbg->control); | 366 | trace_kvm_set_guest_debug(vcpu, dbg->control); |
| 369 | 367 | ||
| 370 | if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { | 368 | if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { |
| @@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
| 386 | } | 384 | } |
| 387 | 385 | ||
| 388 | out: | 386 | out: |
| 389 | vcpu_put(vcpu); | ||
| 390 | return ret; | 387 | return ret; |
| 391 | } | 388 | } |
| 392 | 389 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index f31f357b8c5a..d33264697a31 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -320,7 +320,6 @@ kvm_novcpu_exit: | |||
| 320 | stw r12, STACK_SLOT_TRAP(r1) | 320 | stw r12, STACK_SLOT_TRAP(r1) |
| 321 | bl kvmhv_commence_exit | 321 | bl kvmhv_commence_exit |
| 322 | nop | 322 | nop |
| 323 | lwz r12, STACK_SLOT_TRAP(r1) | ||
| 324 | b kvmhv_switch_to_host | 323 | b kvmhv_switch_to_host |
| 325 | 324 | ||
| 326 | /* | 325 | /* |
| @@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 1220 | 1219 | ||
| 1221 | secondary_too_late: | 1220 | secondary_too_late: |
| 1222 | li r12, 0 | 1221 | li r12, 0 |
| 1222 | stw r12, STACK_SLOT_TRAP(r1) | ||
| 1223 | cmpdi r4, 0 | 1223 | cmpdi r4, 0 |
| 1224 | beq 11f | 1224 | beq 11f |
| 1225 | stw r12, VCPU_TRAP(r4) | 1225 | stw r12, VCPU_TRAP(r4) |
| @@ -1558,12 +1558,12 @@ mc_cont: | |||
| 1558 | 3: stw r5,VCPU_SLB_MAX(r9) | 1558 | 3: stw r5,VCPU_SLB_MAX(r9) |
| 1559 | 1559 | ||
| 1560 | guest_bypass: | 1560 | guest_bypass: |
| 1561 | stw r12, STACK_SLOT_TRAP(r1) | ||
| 1561 | mr r3, r12 | 1562 | mr r3, r12 |
| 1562 | /* Increment exit count, poke other threads to exit */ | 1563 | /* Increment exit count, poke other threads to exit */ |
| 1563 | bl kvmhv_commence_exit | 1564 | bl kvmhv_commence_exit |
| 1564 | nop | 1565 | nop |
| 1565 | ld r9, HSTATE_KVM_VCPU(r13) | 1566 | ld r9, HSTATE_KVM_VCPU(r13) |
| 1566 | lwz r12, VCPU_TRAP(r9) | ||
| 1567 | 1567 | ||
| 1568 | /* Stop others sending VCPU interrupts to this physical CPU */ | 1568 | /* Stop others sending VCPU interrupts to this physical CPU */ |
| 1569 | li r0, -1 | 1569 | li r0, -1 |
| @@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) | |||
| 1898 | * POWER7/POWER8 guest -> host partition switch code. | 1898 | * POWER7/POWER8 guest -> host partition switch code. |
| 1899 | * We don't have to lock against tlbies but we do | 1899 | * We don't have to lock against tlbies but we do |
| 1900 | * have to coordinate the hardware threads. | 1900 | * have to coordinate the hardware threads. |
| 1901 | * Here STACK_SLOT_TRAP(r1) contains the trap number. | ||
| 1901 | */ | 1902 | */ |
| 1902 | kvmhv_switch_to_host: | 1903 | kvmhv_switch_to_host: |
| 1903 | /* Secondary threads wait for primary to do partition switch */ | 1904 | /* Secondary threads wait for primary to do partition switch */ |
| @@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION | |||
| 1950 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | 1951 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 1951 | 1952 | ||
| 1952 | /* If HMI, call kvmppc_realmode_hmi_handler() */ | 1953 | /* If HMI, call kvmppc_realmode_hmi_handler() */ |
| 1954 | lwz r12, STACK_SLOT_TRAP(r1) | ||
| 1953 | cmpwi r12, BOOK3S_INTERRUPT_HMI | 1955 | cmpwi r12, BOOK3S_INTERRUPT_HMI |
| 1954 | bne 27f | 1956 | bne 27f |
| 1955 | bl kvmppc_realmode_hmi_handler | 1957 | bl kvmppc_realmode_hmi_handler |
| 1956 | nop | 1958 | nop |
| 1957 | cmpdi r3, 0 | 1959 | cmpdi r3, 0 |
| 1958 | li r12, BOOK3S_INTERRUPT_HMI | ||
| 1959 | /* | 1960 | /* |
| 1960 | * At this point kvmppc_realmode_hmi_handler may have resync-ed | 1961 | * At this point kvmppc_realmode_hmi_handler may have resync-ed |
| 1961 | * the TB, and if it has, we must not subtract the guest timebase | 1962 | * the TB, and if it has, we must not subtract the guest timebase |
| @@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION | |||
| 2008 | lwz r8, KVM_SPLIT_DO_RESTORE(r3) | 2009 | lwz r8, KVM_SPLIT_DO_RESTORE(r3) |
| 2009 | cmpwi r8, 0 | 2010 | cmpwi r8, 0 |
| 2010 | beq 47f | 2011 | beq 47f |
| 2011 | stw r12, STACK_SLOT_TRAP(r1) | ||
| 2012 | bl kvmhv_p9_restore_lpcr | 2012 | bl kvmhv_p9_restore_lpcr |
| 2013 | nop | 2013 | nop |
| 2014 | lwz r12, STACK_SLOT_TRAP(r1) | ||
| 2015 | b 48f | 2014 | b 48f |
| 2016 | 47: | 2015 | 47: |
| 2017 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | 2016 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| @@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
| 2049 | li r0, KVM_GUEST_MODE_NONE | 2048 | li r0, KVM_GUEST_MODE_NONE |
| 2050 | stb r0, HSTATE_IN_GUEST(r13) | 2049 | stb r0, HSTATE_IN_GUEST(r13) |
| 2051 | 2050 | ||
| 2051 | lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ | ||
| 2052 | ld r0, SFS+PPC_LR_STKOFF(r1) | 2052 | ld r0, SFS+PPC_LR_STKOFF(r1) |
| 2053 | addi r1, r1, SFS | 2053 | addi r1, r1, SFS |
| 2054 | mtlr r0 | 2054 | mtlr r0 |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f551962ac294..763bb3bade63 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 2770 | else | 2770 | else |
| 2771 | pte_access &= ~ACC_WRITE_MASK; | 2771 | pte_access &= ~ACC_WRITE_MASK; |
| 2772 | 2772 | ||
| 2773 | if (!kvm_is_mmio_pfn(pfn)) | ||
| 2774 | spte |= shadow_me_mask; | ||
| 2775 | |||
| 2773 | spte |= (u64)pfn << PAGE_SHIFT; | 2776 | spte |= (u64)pfn << PAGE_SHIFT; |
| 2774 | spte |= shadow_me_mask; | ||
| 2775 | 2777 | ||
| 2776 | if (pte_access & ACC_WRITE_MASK) { | 2778 | if (pte_access & ACC_WRITE_MASK) { |
| 2777 | 2779 | ||
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index cdbd142ca7f2..02924ae2527e 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
| @@ -360,6 +360,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu); | |||
| 360 | bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); | 360 | bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); |
| 361 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); | 361 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); |
| 362 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); | 362 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); |
| 363 | void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); | ||
| 363 | 364 | ||
| 364 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); | 365 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); |
| 365 | 366 | ||
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index c00c4c33e432..b26eccc78fb1 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -503,6 +503,7 @@ | |||
| 503 | 503 | ||
| 504 | #define ICH_HCR_EN (1 << 0) | 504 | #define ICH_HCR_EN (1 << 0) |
| 505 | #define ICH_HCR_UIE (1 << 1) | 505 | #define ICH_HCR_UIE (1 << 1) |
| 506 | #define ICH_HCR_NPIE (1 << 3) | ||
| 506 | #define ICH_HCR_TC (1 << 10) | 507 | #define ICH_HCR_TC (1 << 10) |
| 507 | #define ICH_HCR_TALL0 (1 << 11) | 508 | #define ICH_HCR_TALL0 (1 << 11) |
| 508 | #define ICH_HCR_TALL1 (1 << 12) | 509 | #define ICH_HCR_TALL1 (1 << 12) |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index d3453ee072fc..68d8b1f73682 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
| @@ -84,6 +84,7 @@ | |||
| 84 | 84 | ||
| 85 | #define GICH_HCR_EN (1 << 0) | 85 | #define GICH_HCR_EN (1 << 0) |
| 86 | #define GICH_HCR_UIE (1 << 1) | 86 | #define GICH_HCR_UIE (1 << 1) |
| 87 | #define GICH_HCR_NPIE (1 << 3) | ||
| 87 | 88 | ||
| 88 | #define GICH_LR_VIRTUALID (0x3ff << 0) | 89 | #define GICH_LR_VIRTUALID (0x3ff << 0) |
| 89 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) | 90 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 70f4c30918eb..282389eb204f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
| @@ -581,6 +581,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | |||
| 581 | 581 | ||
| 582 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | 582 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) |
| 583 | { | 583 | { |
| 584 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
| 584 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 585 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
| 585 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 586 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
| 586 | 587 | ||
| @@ -594,6 +595,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | |||
| 594 | ptimer->cnt_ctl = 0; | 595 | ptimer->cnt_ctl = 0; |
| 595 | kvm_timer_update_state(vcpu); | 596 | kvm_timer_update_state(vcpu); |
| 596 | 597 | ||
| 598 | if (timer->enabled && irqchip_in_kernel(vcpu->kvm)) | ||
| 599 | kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq); | ||
| 600 | |||
| 597 | return 0; | 601 | return 0; |
| 598 | } | 602 | } |
| 599 | 603 | ||
| @@ -767,7 +771,7 @@ int kvm_timer_hyp_init(bool has_gic) | |||
| 767 | static_branch_enable(&has_gic_active_state); | 771 | static_branch_enable(&has_gic_active_state); |
| 768 | } | 772 | } |
| 769 | 773 | ||
| 770 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); | 774 | kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); |
| 771 | 775 | ||
| 772 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, | 776 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, |
| 773 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, | 777 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 86941f6181bb..53572304843b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
| @@ -384,14 +384,11 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu) | |||
| 384 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 384 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 385 | struct kvm_mp_state *mp_state) | 385 | struct kvm_mp_state *mp_state) |
| 386 | { | 386 | { |
| 387 | vcpu_load(vcpu); | ||
| 388 | |||
| 389 | if (vcpu->arch.power_off) | 387 | if (vcpu->arch.power_off) |
| 390 | mp_state->mp_state = KVM_MP_STATE_STOPPED; | 388 | mp_state->mp_state = KVM_MP_STATE_STOPPED; |
| 391 | else | 389 | else |
| 392 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; | 390 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; |
| 393 | 391 | ||
| 394 | vcpu_put(vcpu); | ||
| 395 | return 0; | 392 | return 0; |
| 396 | } | 393 | } |
| 397 | 394 | ||
| @@ -400,8 +397,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
| 400 | { | 397 | { |
| 401 | int ret = 0; | 398 | int ret = 0; |
| 402 | 399 | ||
| 403 | vcpu_load(vcpu); | ||
| 404 | |||
| 405 | switch (mp_state->mp_state) { | 400 | switch (mp_state->mp_state) { |
| 406 | case KVM_MP_STATE_RUNNABLE: | 401 | case KVM_MP_STATE_RUNNABLE: |
| 407 | vcpu->arch.power_off = false; | 402 | vcpu->arch.power_off = false; |
| @@ -413,7 +408,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
| 413 | ret = -EINVAL; | 408 | ret = -EINVAL; |
| 414 | } | 409 | } |
| 415 | 410 | ||
| 416 | vcpu_put(vcpu); | ||
| 417 | return ret; | 411 | return ret; |
| 418 | } | 412 | } |
| 419 | 413 | ||
| @@ -1036,8 +1030,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 1036 | struct kvm_device_attr attr; | 1030 | struct kvm_device_attr attr; |
| 1037 | long r; | 1031 | long r; |
| 1038 | 1032 | ||
| 1039 | vcpu_load(vcpu); | ||
| 1040 | |||
| 1041 | switch (ioctl) { | 1033 | switch (ioctl) { |
| 1042 | case KVM_ARM_VCPU_INIT: { | 1034 | case KVM_ARM_VCPU_INIT: { |
| 1043 | struct kvm_vcpu_init init; | 1035 | struct kvm_vcpu_init init; |
| @@ -1114,7 +1106,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 1114 | r = -EINVAL; | 1106 | r = -EINVAL; |
| 1115 | } | 1107 | } |
| 1116 | 1108 | ||
| 1117 | vcpu_put(vcpu); | ||
| 1118 | return r; | 1109 | return r; |
| 1119 | } | 1110 | } |
| 1120 | 1111 | ||
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c index f5c3d6d7019e..b89ce5432214 100644 --- a/virt/kvm/arm/hyp/vgic-v3-sr.c +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c | |||
| @@ -215,7 +215,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
| 215 | * are now visible to the system register interface. | 215 | * are now visible to the system register interface. |
| 216 | */ | 216 | */ |
| 217 | if (!cpu_if->vgic_sre) { | 217 | if (!cpu_if->vgic_sre) { |
| 218 | dsb(st); | 218 | dsb(sy); |
| 219 | isb(); | ||
| 219 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | 220 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
| 220 | } | 221 | } |
| 221 | 222 | ||
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index ec62d1cccab7..b960acdd0c05 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
| @@ -1810,9 +1810,9 @@ int kvm_mmu_init(void) | |||
| 1810 | */ | 1810 | */ |
| 1811 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); | 1811 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); |
| 1812 | 1812 | ||
| 1813 | kvm_info("IDMAP page: %lx\n", hyp_idmap_start); | 1813 | kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); |
| 1814 | kvm_info("HYP VA range: %lx:%lx\n", | 1814 | kvm_debug("HYP VA range: %lx:%lx\n", |
| 1815 | kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); | 1815 | kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); |
| 1816 | 1816 | ||
| 1817 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && | 1817 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
| 1818 | hyp_idmap_start < kern_hyp_va(~0UL) && | 1818 | hyp_idmap_start < kern_hyp_va(~0UL) && |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 83d82bd7dc4e..dbe99d635c80 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
| @@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, | |||
| 113 | /* Loop over all IRQs affected by this read */ | 113 | /* Loop over all IRQs affected by this read */ |
| 114 | for (i = 0; i < len * 8; i++) { | 114 | for (i = 0; i < len * 8; i++) { |
| 115 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 115 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 116 | unsigned long flags; | ||
| 116 | 117 | ||
| 118 | spin_lock_irqsave(&irq->irq_lock, flags); | ||
| 117 | if (irq_is_pending(irq)) | 119 | if (irq_is_pending(irq)) |
| 118 | value |= (1U << i); | 120 | value |= (1U << i); |
| 121 | spin_unlock_irqrestore(&irq->irq_lock, flags); | ||
| 119 | 122 | ||
| 120 | vgic_put_irq(vcpu->kvm, irq); | 123 | vgic_put_irq(vcpu->kvm, irq); |
| 121 | } | 124 | } |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index c32d7b93ffd1..29556f71b691 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
| @@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void) | |||
| 37 | vgic_v2_write_lr(i, 0); | 37 | vgic_v2_write_lr(i, 0); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | void vgic_v2_set_npie(struct kvm_vcpu *vcpu) | ||
| 41 | { | ||
| 42 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | ||
| 43 | |||
| 44 | cpuif->vgic_hcr |= GICH_HCR_NPIE; | ||
| 45 | } | ||
| 46 | |||
| 40 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) | 47 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) |
| 41 | { | 48 | { |
| 42 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | 49 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; |
| @@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
| 64 | int lr; | 71 | int lr; |
| 65 | unsigned long flags; | 72 | unsigned long flags; |
| 66 | 73 | ||
| 67 | cpuif->vgic_hcr &= ~GICH_HCR_UIE; | 74 | cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); |
| 68 | 75 | ||
| 69 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { | 76 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
| 70 | u32 val = cpuif->vgic_lr[lr]; | 77 | u32 val = cpuif->vgic_lr[lr]; |
| @@ -410,7 +417,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info) | |||
| 410 | kvm_vgic_global_state.type = VGIC_V2; | 417 | kvm_vgic_global_state.type = VGIC_V2; |
| 411 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; | 418 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; |
| 412 | 419 | ||
| 413 | kvm_info("vgic-v2@%llx\n", info->vctrl.start); | 420 | kvm_debug("vgic-v2@%llx\n", info->vctrl.start); |
| 414 | 421 | ||
| 415 | return 0; | 422 | return 0; |
| 416 | out: | 423 | out: |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 6b329414e57a..0ff2006f3781 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
| @@ -26,6 +26,13 @@ static bool group1_trap; | |||
| 26 | static bool common_trap; | 26 | static bool common_trap; |
| 27 | static bool gicv4_enable; | 27 | static bool gicv4_enable; |
| 28 | 28 | ||
| 29 | void vgic_v3_set_npie(struct kvm_vcpu *vcpu) | ||
| 30 | { | ||
| 31 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | ||
| 32 | |||
| 33 | cpuif->vgic_hcr |= ICH_HCR_NPIE; | ||
| 34 | } | ||
| 35 | |||
| 29 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) | 36 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) |
| 30 | { | 37 | { |
| 31 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | 38 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; |
| @@ -47,7 +54,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
| 47 | int lr; | 54 | int lr; |
| 48 | unsigned long flags; | 55 | unsigned long flags; |
| 49 | 56 | ||
| 50 | cpuif->vgic_hcr &= ~ICH_HCR_UIE; | 57 | cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); |
| 51 | 58 | ||
| 52 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { | 59 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
| 53 | u64 val = cpuif->vgic_lr[lr]; | 60 | u64 val = cpuif->vgic_lr[lr]; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index c7c5ef190afa..8201899126f6 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
| @@ -495,6 +495,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, | |||
| 495 | return ret; | 495 | return ret; |
| 496 | } | 496 | } |
| 497 | 497 | ||
| 498 | /** | ||
| 499 | * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ | ||
| 500 | * @vcpu: The VCPU pointer | ||
| 501 | * @vintid: The INTID of the interrupt | ||
| 502 | * | ||
| 503 | * Reset the active and pending states of a mapped interrupt. Kernel | ||
| 504 | * subsystems injecting mapped interrupts should reset their interrupt lines | ||
| 505 | * when we are doing a reset of the VM. | ||
| 506 | */ | ||
| 507 | void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) | ||
| 508 | { | ||
| 509 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | ||
| 510 | unsigned long flags; | ||
| 511 | |||
| 512 | if (!irq->hw) | ||
| 513 | goto out; | ||
| 514 | |||
| 515 | spin_lock_irqsave(&irq->irq_lock, flags); | ||
| 516 | irq->active = false; | ||
| 517 | irq->pending_latch = false; | ||
| 518 | irq->line_level = false; | ||
| 519 | spin_unlock_irqrestore(&irq->irq_lock, flags); | ||
| 520 | out: | ||
| 521 | vgic_put_irq(vcpu->kvm, irq); | ||
| 522 | } | ||
| 523 | |||
| 498 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) | 524 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) |
| 499 | { | 525 | { |
| 500 | struct vgic_irq *irq; | 526 | struct vgic_irq *irq; |
| @@ -684,22 +710,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) | |||
| 684 | vgic_v3_set_underflow(vcpu); | 710 | vgic_v3_set_underflow(vcpu); |
| 685 | } | 711 | } |
| 686 | 712 | ||
| 713 | static inline void vgic_set_npie(struct kvm_vcpu *vcpu) | ||
| 714 | { | ||
| 715 | if (kvm_vgic_global_state.type == VGIC_V2) | ||
| 716 | vgic_v2_set_npie(vcpu); | ||
| 717 | else | ||
| 718 | vgic_v3_set_npie(vcpu); | ||
| 719 | } | ||
| 720 | |||
| 687 | /* Requires the ap_list_lock to be held. */ | 721 | /* Requires the ap_list_lock to be held. */ |
| 688 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu) | 722 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu, |
| 723 | bool *multi_sgi) | ||
| 689 | { | 724 | { |
| 690 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 725 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 691 | struct vgic_irq *irq; | 726 | struct vgic_irq *irq; |
| 692 | int count = 0; | 727 | int count = 0; |
| 693 | 728 | ||
| 729 | *multi_sgi = false; | ||
| 730 | |||
| 694 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 731 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
| 695 | 732 | ||
| 696 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 733 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
| 697 | spin_lock(&irq->irq_lock); | 734 | spin_lock(&irq->irq_lock); |
| 698 | /* GICv2 SGIs can count for more than one... */ | 735 | /* GICv2 SGIs can count for more than one... */ |
| 699 | if (vgic_irq_is_sgi(irq->intid) && irq->source) | 736 | if (vgic_irq_is_sgi(irq->intid) && irq->source) { |
| 700 | count += hweight8(irq->source); | 737 | int w = hweight8(irq->source); |
| 701 | else | 738 | |
| 739 | count += w; | ||
| 740 | *multi_sgi |= (w > 1); | ||
| 741 | } else { | ||
| 702 | count++; | 742 | count++; |
| 743 | } | ||
| 703 | spin_unlock(&irq->irq_lock); | 744 | spin_unlock(&irq->irq_lock); |
| 704 | } | 745 | } |
| 705 | return count; | 746 | return count; |
| @@ -710,28 +751,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
| 710 | { | 751 | { |
| 711 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 752 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 712 | struct vgic_irq *irq; | 753 | struct vgic_irq *irq; |
| 713 | int count = 0; | 754 | int count; |
| 755 | bool npie = false; | ||
| 756 | bool multi_sgi; | ||
| 757 | u8 prio = 0xff; | ||
| 714 | 758 | ||
| 715 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 759 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
| 716 | 760 | ||
| 717 | if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) | 761 | count = compute_ap_list_depth(vcpu, &multi_sgi); |
| 762 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) | ||
| 718 | vgic_sort_ap_list(vcpu); | 763 | vgic_sort_ap_list(vcpu); |
| 719 | 764 | ||
| 765 | count = 0; | ||
| 766 | |||
| 720 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 767 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
| 721 | spin_lock(&irq->irq_lock); | 768 | spin_lock(&irq->irq_lock); |
| 722 | 769 | ||
| 723 | if (unlikely(vgic_target_oracle(irq) != vcpu)) | ||
| 724 | goto next; | ||
| 725 | |||
| 726 | /* | 770 | /* |
| 727 | * If we get an SGI with multiple sources, try to get | 771 | * If we have multi-SGIs in the pipeline, we need to |
| 728 | * them in all at once. | 772 | * guarantee that they are all seen before any IRQ of |
| 773 | * lower priority. In that case, we need to filter out | ||
| 774 | * these interrupts by exiting early. This is easy as | ||
| 775 | * the AP list has been sorted already. | ||
| 729 | */ | 776 | */ |
| 730 | do { | 777 | if (multi_sgi && irq->priority > prio) { |
| 778 | spin_unlock(&irq->irq_lock); | ||
| 779 | break; | ||
| 780 | } | ||
| 781 | |||
| 782 | if (likely(vgic_target_oracle(irq) == vcpu)) { | ||
| 731 | vgic_populate_lr(vcpu, irq, count++); | 783 | vgic_populate_lr(vcpu, irq, count++); |
| 732 | } while (irq->source && count < kvm_vgic_global_state.nr_lr); | ||
| 733 | 784 | ||
| 734 | next: | 785 | if (irq->source) { |
| 786 | npie = true; | ||
| 787 | prio = irq->priority; | ||
| 788 | } | ||
| 789 | } | ||
| 790 | |||
| 735 | spin_unlock(&irq->irq_lock); | 791 | spin_unlock(&irq->irq_lock); |
| 736 | 792 | ||
| 737 | if (count == kvm_vgic_global_state.nr_lr) { | 793 | if (count == kvm_vgic_global_state.nr_lr) { |
| @@ -742,6 +798,9 @@ next: | |||
| 742 | } | 798 | } |
| 743 | } | 799 | } |
| 744 | 800 | ||
| 801 | if (npie) | ||
| 802 | vgic_set_npie(vcpu); | ||
| 803 | |||
| 745 | vcpu->arch.vgic_cpu.used_lrs = count; | 804 | vcpu->arch.vgic_cpu.used_lrs = count; |
| 746 | 805 | ||
| 747 | /* Nuke remaining LRs */ | 806 | /* Nuke remaining LRs */ |
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 12c37b89f7a3..f5b8519e5546 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h | |||
| @@ -96,6 +96,7 @@ | |||
| 96 | /* we only support 64 kB translation table page size */ | 96 | /* we only support 64 kB translation table page size */ |
| 97 | #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) | 97 | #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) |
| 98 | 98 | ||
| 99 | /* Requires the irq_lock to be held by the caller. */ | ||
| 99 | static inline bool irq_is_pending(struct vgic_irq *irq) | 100 | static inline bool irq_is_pending(struct vgic_irq *irq) |
| 100 | { | 101 | { |
| 101 | if (irq->config == VGIC_CONFIG_EDGE) | 102 | if (irq->config == VGIC_CONFIG_EDGE) |
| @@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); | |||
| 159 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | 160 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
| 160 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); | 161 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); |
| 161 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); | 162 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); |
| 163 | void vgic_v2_set_npie(struct kvm_vcpu *vcpu); | ||
| 162 | int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); | 164 | int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); |
| 163 | int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, | 165 | int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
| 164 | int offset, u32 *val); | 166 | int offset, u32 *val); |
| @@ -188,6 +190,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); | |||
| 188 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | 190 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
| 189 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); | 191 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); |
| 190 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); | 192 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); |
| 193 | void vgic_v3_set_npie(struct kvm_vcpu *vcpu); | ||
| 191 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | 194 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
| 192 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | 195 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
| 193 | void vgic_v3_enable(struct kvm_vcpu *vcpu); | 196 | void vgic_v3_enable(struct kvm_vcpu *vcpu); |
