aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-05-06 11:46:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-05-06 11:46:29 -0400
commit701e39d05119229b92ecca4add7b7ed2193622c3 (patch)
tree778729107b8b87c47640c6b86c4c74a7800c3de5
parent772d4f84c6010f80a932f9e7e47a8acbbc0a527b (diff)
parentecf08dad723d3e000aecff6c396f54772d124733 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pll KVM fixes from Radim Krčmář: "ARM: - Fix proxying of GICv2 CPU interface accesses - Fix crash when switching to BE - Track source vcpu git GICv2 SGIs - Fix an outdated bit of documentation x86: - Speed up injection of expired timers (for stable)" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: remove APIC Timer periodic/oneshot spikes arm64: vgic-v2: Fix proxying of cpuif access KVM: arm/arm64: vgic_init: Cleanup reference to process_maintenance KVM: arm64: Fix order of vcpu_write_sys_reg() arguments KVM: arm/arm64: vgic: Fix source vcpu issues for GICv2 SGI
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c24
-rw-r--r--arch/x86/kvm/lapic.c37
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c10
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c38
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c49
-rw-r--r--virt/kvm/arm/vgic/vgic.c30
-rw-r--r--virt/kvm/arm/vgic/vgic.h14
10 files changed, 122 insertions, 85 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 23b33e8ea03a..1dab3a984608 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -333,7 +333,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
333 } else { 333 } else {
334 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); 334 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
335 sctlr |= (1 << 25); 335 sctlr |= (1 << 25);
336 vcpu_write_sys_reg(vcpu, SCTLR_EL1, sctlr); 336 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
337 } 337 }
338} 338}
339 339
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 86801b6055d6..39be799d0417 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -18,11 +18,20 @@
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/irqchip/arm-gic.h> 19#include <linux/irqchip/arm-gic.h>
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/swab.h>
21 22
22#include <asm/kvm_emulate.h> 23#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 24#include <asm/kvm_hyp.h>
24#include <asm/kvm_mmu.h> 25#include <asm/kvm_mmu.h>
25 26
27static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
28{
29 if (vcpu_mode_is_32bit(vcpu))
30 return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT);
31
32 return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
33}
34
26/* 35/*
27 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the 36 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
28 * guest. 37 * guest.
@@ -64,14 +73,19 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
64 addr += fault_ipa - vgic->vgic_cpu_base; 73 addr += fault_ipa - vgic->vgic_cpu_base;
65 74
66 if (kvm_vcpu_dabt_iswrite(vcpu)) { 75 if (kvm_vcpu_dabt_iswrite(vcpu)) {
67 u32 data = vcpu_data_guest_to_host(vcpu, 76 u32 data = vcpu_get_reg(vcpu, rd);
68 vcpu_get_reg(vcpu, rd), 77 if (__is_be(vcpu)) {
69 sizeof(u32)); 78 /* guest pre-swabbed data, undo this for writel() */
79 data = swab32(data);
80 }
70 writel_relaxed(data, addr); 81 writel_relaxed(data, addr);
71 } else { 82 } else {
72 u32 data = readl_relaxed(addr); 83 u32 data = readl_relaxed(addr);
73 vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data, 84 if (__is_be(vcpu)) {
74 sizeof(u32))); 85 /* guest expects swabbed data */
86 data = swab32(data);
87 }
88 vcpu_set_reg(vcpu, rd, data);
75 } 89 }
76 90
77 return 1; 91 return 1;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 70dcb5548022..b74c9c1405b9 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1463,23 +1463,6 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
1463 local_irq_restore(flags); 1463 local_irq_restore(flags);
1464} 1464}
1465 1465
1466static void start_sw_period(struct kvm_lapic *apic)
1467{
1468 if (!apic->lapic_timer.period)
1469 return;
1470
1471 if (apic_lvtt_oneshot(apic) &&
1472 ktime_after(ktime_get(),
1473 apic->lapic_timer.target_expiration)) {
1474 apic_timer_expired(apic);
1475 return;
1476 }
1477
1478 hrtimer_start(&apic->lapic_timer.timer,
1479 apic->lapic_timer.target_expiration,
1480 HRTIMER_MODE_ABS_PINNED);
1481}
1482
1483static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor) 1466static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1484{ 1467{
1485 ktime_t now, remaining; 1468 ktime_t now, remaining;
@@ -1546,6 +1529,26 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1546 apic->lapic_timer.period); 1529 apic->lapic_timer.period);
1547} 1530}
1548 1531
1532static void start_sw_period(struct kvm_lapic *apic)
1533{
1534 if (!apic->lapic_timer.period)
1535 return;
1536
1537 if (ktime_after(ktime_get(),
1538 apic->lapic_timer.target_expiration)) {
1539 apic_timer_expired(apic);
1540
1541 if (apic_lvtt_oneshot(apic))
1542 return;
1543
1544 advance_periodic_target_expiration(apic);
1545 }
1546
1547 hrtimer_start(&apic->lapic_timer.timer,
1548 apic->lapic_timer.target_expiration,
1549 HRTIMER_MODE_ABS_PINNED);
1550}
1551
1549bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) 1552bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1550{ 1553{
1551 if (!lapic_in_kernel(vcpu)) 1554 if (!lapic_in_kernel(vcpu))
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 24f03941ada8..e7efe12a81bd 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -131,6 +131,7 @@ struct vgic_irq {
131 u32 mpidr; /* GICv3 target VCPU */ 131 u32 mpidr; /* GICv3 target VCPU */
132 }; 132 };
133 u8 source; /* GICv2 SGIs only */ 133 u8 source; /* GICv2 SGIs only */
134 u8 active_source; /* GICv2 SGIs only */
134 u8 priority; 135 u8 priority;
135 enum vgic_irq_config config; /* Level or edge */ 136 enum vgic_irq_config config; /* Level or edge */
136 137
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 68378fe17a0e..e07156c30323 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -423,7 +423,7 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
423 * We cannot rely on the vgic maintenance interrupt to be 423 * We cannot rely on the vgic maintenance interrupt to be
424 * delivered synchronously. This means we can only use it to 424 * delivered synchronously. This means we can only use it to
425 * exit the VM, and we perform the handling of EOIed 425 * exit the VM, and we perform the handling of EOIed
426 * interrupts on the exit path (see vgic_process_maintenance). 426 * interrupts on the exit path (see vgic_fold_lr_state).
427 */ 427 */
428 return IRQ_HANDLED; 428 return IRQ_HANDLED;
429} 429}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index dbe99d635c80..ff9655cfeb2f 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -289,10 +289,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
289 irq->vcpu->cpu != -1) /* VCPU thread is running */ 289 irq->vcpu->cpu != -1) /* VCPU thread is running */
290 cond_resched_lock(&irq->irq_lock); 290 cond_resched_lock(&irq->irq_lock);
291 291
292 if (irq->hw) 292 if (irq->hw) {
293 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 293 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
294 else 294 } else {
295 u32 model = vcpu->kvm->arch.vgic.vgic_model;
296
295 irq->active = active; 297 irq->active = active;
298 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
299 active && vgic_irq_is_sgi(irq->intid))
300 irq->active_source = requester_vcpu->vcpu_id;
301 }
296 302
297 if (irq->active) 303 if (irq->active)
298 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 304 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 45aa433f018f..a5f2e44f1c33 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -37,13 +37,6 @@ void vgic_v2_init_lrs(void)
37 vgic_v2_write_lr(i, 0); 37 vgic_v2_write_lr(i, 0);
38} 38}
39 39
40void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
41{
42 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
43
44 cpuif->vgic_hcr |= GICH_HCR_NPIE;
45}
46
47void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) 40void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
48{ 41{
49 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 42 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -71,13 +64,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
71 int lr; 64 int lr;
72 unsigned long flags; 65 unsigned long flags;
73 66
74 cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); 67 cpuif->vgic_hcr &= ~GICH_HCR_UIE;
75 68
76 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { 69 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
77 u32 val = cpuif->vgic_lr[lr]; 70 u32 val = cpuif->vgic_lr[lr];
78 u32 intid = val & GICH_LR_VIRTUALID; 71 u32 cpuid, intid = val & GICH_LR_VIRTUALID;
79 struct vgic_irq *irq; 72 struct vgic_irq *irq;
80 73
74 /* Extract the source vCPU id from the LR */
75 cpuid = val & GICH_LR_PHYSID_CPUID;
76 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
77 cpuid &= 7;
78
81 /* Notify fds when the guest EOI'ed a level-triggered SPI */ 79 /* Notify fds when the guest EOI'ed a level-triggered SPI */
82 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) 80 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
83 kvm_notify_acked_irq(vcpu->kvm, 0, 81 kvm_notify_acked_irq(vcpu->kvm, 0,
@@ -90,17 +88,16 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
90 /* Always preserve the active bit */ 88 /* Always preserve the active bit */
91 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 89 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
92 90
91 if (irq->active && vgic_irq_is_sgi(intid))
92 irq->active_source = cpuid;
93
93 /* Edge is the only case where we preserve the pending bit */ 94 /* Edge is the only case where we preserve the pending bit */
94 if (irq->config == VGIC_CONFIG_EDGE && 95 if (irq->config == VGIC_CONFIG_EDGE &&
95 (val & GICH_LR_PENDING_BIT)) { 96 (val & GICH_LR_PENDING_BIT)) {
96 irq->pending_latch = true; 97 irq->pending_latch = true;
97 98
98 if (vgic_irq_is_sgi(intid)) { 99 if (vgic_irq_is_sgi(intid))
99 u32 cpuid = val & GICH_LR_PHYSID_CPUID;
100
101 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
102 irq->source |= (1 << cpuid); 100 irq->source |= (1 << cpuid);
103 }
104 } 101 }
105 102
106 /* 103 /*
@@ -152,8 +149,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
152 u32 val = irq->intid; 149 u32 val = irq->intid;
153 bool allow_pending = true; 150 bool allow_pending = true;
154 151
155 if (irq->active) 152 if (irq->active) {
156 val |= GICH_LR_ACTIVE_BIT; 153 val |= GICH_LR_ACTIVE_BIT;
154 if (vgic_irq_is_sgi(irq->intid))
155 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
156 if (vgic_irq_is_multi_sgi(irq)) {
157 allow_pending = false;
158 val |= GICH_LR_EOI;
159 }
160 }
157 161
158 if (irq->hw) { 162 if (irq->hw) {
159 val |= GICH_LR_HW; 163 val |= GICH_LR_HW;
@@ -190,8 +194,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
190 BUG_ON(!src); 194 BUG_ON(!src);
191 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 195 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
192 irq->source &= ~(1 << (src - 1)); 196 irq->source &= ~(1 << (src - 1));
193 if (irq->source) 197 if (irq->source) {
194 irq->pending_latch = true; 198 irq->pending_latch = true;
199 val |= GICH_LR_EOI;
200 }
195 } 201 }
196 } 202 }
197 203
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 8195f52ae6f0..c7423f3768e5 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -27,13 +27,6 @@ static bool group1_trap;
27static bool common_trap; 27static bool common_trap;
28static bool gicv4_enable; 28static bool gicv4_enable;
29 29
30void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
31{
32 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
33
34 cpuif->vgic_hcr |= ICH_HCR_NPIE;
35}
36
37void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) 30void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
38{ 31{
39 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; 32 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -55,17 +48,23 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
55 int lr; 48 int lr;
56 unsigned long flags; 49 unsigned long flags;
57 50
58 cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); 51 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
59 52
60 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { 53 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
61 u64 val = cpuif->vgic_lr[lr]; 54 u64 val = cpuif->vgic_lr[lr];
62 u32 intid; 55 u32 intid, cpuid;
63 struct vgic_irq *irq; 56 struct vgic_irq *irq;
57 bool is_v2_sgi = false;
64 58
65 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) 59 cpuid = val & GICH_LR_PHYSID_CPUID;
60 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
61
62 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
66 intid = val & ICH_LR_VIRTUAL_ID_MASK; 63 intid = val & ICH_LR_VIRTUAL_ID_MASK;
67 else 64 } else {
68 intid = val & GICH_LR_VIRTUALID; 65 intid = val & GICH_LR_VIRTUALID;
66 is_v2_sgi = vgic_irq_is_sgi(intid);
67 }
69 68
70 /* Notify fds when the guest EOI'ed a level-triggered IRQ */ 69 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
71 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) 70 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
@@ -81,18 +80,16 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
81 /* Always preserve the active bit */ 80 /* Always preserve the active bit */
82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 81 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
83 82
83 if (irq->active && is_v2_sgi)
84 irq->active_source = cpuid;
85
84 /* Edge is the only case where we preserve the pending bit */ 86 /* Edge is the only case where we preserve the pending bit */
85 if (irq->config == VGIC_CONFIG_EDGE && 87 if (irq->config == VGIC_CONFIG_EDGE &&
86 (val & ICH_LR_PENDING_BIT)) { 88 (val & ICH_LR_PENDING_BIT)) {
87 irq->pending_latch = true; 89 irq->pending_latch = true;
88 90
89 if (vgic_irq_is_sgi(intid) && 91 if (is_v2_sgi)
90 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
91 u32 cpuid = val & GICH_LR_PHYSID_CPUID;
92
93 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
94 irq->source |= (1 << cpuid); 92 irq->source |= (1 << cpuid);
95 }
96 } 93 }
97 94
98 /* 95 /*
@@ -133,10 +130,20 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
133{ 130{
134 u32 model = vcpu->kvm->arch.vgic.vgic_model; 131 u32 model = vcpu->kvm->arch.vgic.vgic_model;
135 u64 val = irq->intid; 132 u64 val = irq->intid;
136 bool allow_pending = true; 133 bool allow_pending = true, is_v2_sgi;
137 134
138 if (irq->active) 135 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
136 model == KVM_DEV_TYPE_ARM_VGIC_V2);
137
138 if (irq->active) {
139 val |= ICH_LR_ACTIVE_BIT; 139 val |= ICH_LR_ACTIVE_BIT;
140 if (is_v2_sgi)
141 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
142 if (vgic_irq_is_multi_sgi(irq)) {
143 allow_pending = false;
144 val |= ICH_LR_EOI;
145 }
146 }
140 147
141 if (irq->hw) { 148 if (irq->hw) {
142 val |= ICH_LR_HW; 149 val |= ICH_LR_HW;
@@ -174,8 +181,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
174 BUG_ON(!src); 181 BUG_ON(!src);
175 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 182 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
176 irq->source &= ~(1 << (src - 1)); 183 irq->source &= ~(1 << (src - 1));
177 if (irq->source) 184 if (irq->source) {
178 irq->pending_latch = true; 185 irq->pending_latch = true;
186 val |= ICH_LR_EOI;
187 }
179 } 188 }
180 } 189 }
181 190
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 702936cbe173..97bfba8d9a59 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -725,14 +725,6 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
725 vgic_v3_set_underflow(vcpu); 725 vgic_v3_set_underflow(vcpu);
726} 726}
727 727
728static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
729{
730 if (kvm_vgic_global_state.type == VGIC_V2)
731 vgic_v2_set_npie(vcpu);
732 else
733 vgic_v3_set_npie(vcpu);
734}
735
736/* Requires the ap_list_lock to be held. */ 728/* Requires the ap_list_lock to be held. */
737static int compute_ap_list_depth(struct kvm_vcpu *vcpu, 729static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
738 bool *multi_sgi) 730 bool *multi_sgi)
@@ -746,17 +738,15 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
746 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); 738 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
747 739
748 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 740 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
741 int w;
742
749 spin_lock(&irq->irq_lock); 743 spin_lock(&irq->irq_lock);
750 /* GICv2 SGIs can count for more than one... */ 744 /* GICv2 SGIs can count for more than one... */
751 if (vgic_irq_is_sgi(irq->intid) && irq->source) { 745 w = vgic_irq_get_lr_count(irq);
752 int w = hweight8(irq->source);
753
754 count += w;
755 *multi_sgi |= (w > 1);
756 } else {
757 count++;
758 }
759 spin_unlock(&irq->irq_lock); 746 spin_unlock(&irq->irq_lock);
747
748 count += w;
749 *multi_sgi |= (w > 1);
760 } 750 }
761 return count; 751 return count;
762} 752}
@@ -767,7 +757,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
767 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 757 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
768 struct vgic_irq *irq; 758 struct vgic_irq *irq;
769 int count; 759 int count;
770 bool npie = false;
771 bool multi_sgi; 760 bool multi_sgi;
772 u8 prio = 0xff; 761 u8 prio = 0xff;
773 762
@@ -797,10 +786,8 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
797 if (likely(vgic_target_oracle(irq) == vcpu)) { 786 if (likely(vgic_target_oracle(irq) == vcpu)) {
798 vgic_populate_lr(vcpu, irq, count++); 787 vgic_populate_lr(vcpu, irq, count++);
799 788
800 if (irq->source) { 789 if (irq->source)
801 npie = true;
802 prio = irq->priority; 790 prio = irq->priority;
803 }
804 } 791 }
805 792
806 spin_unlock(&irq->irq_lock); 793 spin_unlock(&irq->irq_lock);
@@ -813,9 +800,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
813 } 800 }
814 } 801 }
815 802
816 if (npie)
817 vgic_set_npie(vcpu);
818
819 vcpu->arch.vgic_cpu.used_lrs = count; 803 vcpu->arch.vgic_cpu.used_lrs = count;
820 804
821 /* Nuke remaining LRs */ 805 /* Nuke remaining LRs */
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 830e815748a0..32c25d42c93f 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -110,6 +110,20 @@ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
110 return irq->config == VGIC_CONFIG_LEVEL && irq->hw; 110 return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
111} 111}
112 112
113static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
114{
115 /* Account for the active state as an interrupt */
116 if (vgic_irq_is_sgi(irq->intid) && irq->source)
117 return hweight8(irq->source) + irq->active;
118
119 return irq_is_pending(irq) || irq->active;
120}
121
122static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
123{
124 return vgic_irq_get_lr_count(irq) > 1;
125}
126
113/* 127/*
114 * This struct provides an intermediate representation of the fields contained 128 * This struct provides an intermediate representation of the fields contained
115 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC 129 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC