aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/kvm_host.h10
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h5
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/reset.c24
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/kvm/hyp/switch.c5
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c5
-rw-r--r--arch/arm64/kvm/reset.c50
-rw-r--r--arch/arm64/kvm/sys_regs.c50
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--virt/kvm/arm/arm.c10
-rw-r--r--virt/kvm/arm/mmu.c9
-rw-r--r--virt/kvm/arm/psci.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c118
22 files changed, 303 insertions, 168 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index ca56537b61bc..50e89869178a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
48#define KVM_REQ_SLEEP \ 48#define KVM_REQ_SLEEP \
49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
51#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
51 52
52DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 53DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
53 54
@@ -147,6 +148,13 @@ struct kvm_cpu_context {
147 148
148typedef struct kvm_cpu_context kvm_cpu_context_t; 149typedef struct kvm_cpu_context kvm_cpu_context_t;
149 150
151struct vcpu_reset_state {
152 unsigned long pc;
153 unsigned long r0;
154 bool be;
155 bool reset;
156};
157
150struct kvm_vcpu_arch { 158struct kvm_vcpu_arch {
151 struct kvm_cpu_context ctxt; 159 struct kvm_cpu_context ctxt;
152 160
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
186 /* Cache some mmu pages needed inside spinlock regions */ 194 /* Cache some mmu pages needed inside spinlock regions */
187 struct kvm_mmu_memory_cache mmu_page_cache; 195 struct kvm_mmu_memory_cache mmu_page_cache;
188 196
197 struct vcpu_reset_state reset_state;
198
189 /* Detect first run of a vcpu */ 199 /* Detect first run of a vcpu */
190 bool has_run_once; 200 bool has_run_once;
191}; 201};
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index c4b1d4fb1797..de2089501b8b 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
76#define S2_PMD_MASK PMD_MASK 76#define S2_PMD_MASK PMD_MASK
77#define S2_PMD_SIZE PMD_SIZE 77#define S2_PMD_SIZE PMD_SIZE
78 78
79static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
80{
81 return true;
82}
83
79#endif /* __ARM_S2_PGTABLE_H_ */ 84#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 222c1635bc7a..e8bd288fd5be 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1450 reset_coproc_regs(vcpu, table, num); 1450 reset_coproc_regs(vcpu, table, num);
1451 1451
1452 for (num = 1; num < NR_CP15_REGS; num++) 1452 for (num = 1; num < NR_CP15_REGS; num++)
1453 if (vcpu_cp15(vcpu, num) == 0x42424242) 1453 WARN(vcpu_cp15(vcpu, num) == 0x42424242,
1454 panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); 1454 "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1455} 1455}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 5ed0c3ee33d6..e53327912adc 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -26,6 +26,7 @@
26#include <asm/cputype.h> 26#include <asm/cputype.h>
27#include <asm/kvm_arm.h> 27#include <asm/kvm_arm.h>
28#include <asm/kvm_coproc.h> 28#include <asm/kvm_coproc.h>
29#include <asm/kvm_emulate.h>
29 30
30#include <kvm/arm_arch_timer.h> 31#include <kvm/arm_arch_timer.h>
31 32
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
69 /* Reset CP15 registers */ 70 /* Reset CP15 registers */
70 kvm_reset_coprocs(vcpu); 71 kvm_reset_coprocs(vcpu);
71 72
73 /*
74 * Additional reset state handling that PSCI may have imposed on us.
75 * Must be done after all the sys_reg reset.
76 */
77 if (READ_ONCE(vcpu->arch.reset_state.reset)) {
78 unsigned long target_pc = vcpu->arch.reset_state.pc;
79
80 /* Gracefully handle Thumb2 entry point */
81 if (target_pc & 1) {
82 target_pc &= ~1UL;
83 vcpu_set_thumb(vcpu);
84 }
85
86 /* Propagate caller endianness */
87 if (vcpu->arch.reset_state.be)
88 kvm_vcpu_set_be(vcpu);
89
90 *vcpu_pc(vcpu) = target_pc;
91 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
92
93 vcpu->arch.reset_state.reset = false;
94 }
95
72 /* Reset arch_timer context */ 96 /* Reset arch_timer context */
73 return kvm_timer_vcpu_reset(vcpu); 97 return kvm_timer_vcpu_reset(vcpu);
74} 98}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7732d0ba4e60..da3fc7324d68 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
48#define KVM_REQ_SLEEP \ 48#define KVM_REQ_SLEEP \
49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
51#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
51 52
52DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 53DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
53 54
@@ -208,6 +209,13 @@ struct kvm_cpu_context {
208 209
209typedef struct kvm_cpu_context kvm_cpu_context_t; 210typedef struct kvm_cpu_context kvm_cpu_context_t;
210 211
212struct vcpu_reset_state {
213 unsigned long pc;
214 unsigned long r0;
215 bool be;
216 bool reset;
217};
218
211struct kvm_vcpu_arch { 219struct kvm_vcpu_arch {
212 struct kvm_cpu_context ctxt; 220 struct kvm_cpu_context ctxt;
213 221
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch {
297 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 305 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
298 u64 vsesr_el2; 306 u64 vsesr_el2;
299 307
308 /* Additional reset state */
309 struct vcpu_reset_state reset_state;
310
300 /* True when deferrable sysregs are loaded on the physical CPU, 311 /* True when deferrable sysregs are loaded on the physical CPU,
301 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ 312 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
302 bool sysregs_loaded_on_cpu; 313 bool sysregs_loaded_on_cpu;
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index b0b1478094b4..421ebf6f7086 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -23,6 +23,7 @@
23#include <kvm/arm_psci.h> 23#include <kvm/arm_psci.h>
24 24
25#include <asm/cpufeature.h> 25#include <asm/cpufeature.h>
26#include <asm/kprobes.h>
26#include <asm/kvm_asm.h> 27#include <asm/kvm_asm.h>
27#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
28#include <asm/kvm_host.h> 29#include <asm/kvm_host.h>
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
107 108
108 write_sysreg(kvm_get_hyp_vector(), vbar_el1); 109 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
109} 110}
111NOKPROBE_SYMBOL(activate_traps_vhe);
110 112
111static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) 113static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
112{ 114{
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void)
154 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); 156 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
155 write_sysreg(vectors, vbar_el1); 157 write_sysreg(vectors, vbar_el1);
156} 158}
159NOKPROBE_SYMBOL(deactivate_traps_vhe);
157 160
158static void __hyp_text __deactivate_traps_nvhe(void) 161static void __hyp_text __deactivate_traps_nvhe(void)
159{ 162{
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
513 516
514 return exit_code; 517 return exit_code;
515} 518}
519NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
516 520
517/* Switch to the guest for legacy non-VHE systems */ 521/* Switch to the guest for legacy non-VHE systems */
518int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) 522int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
620 read_sysreg_el2(esr), read_sysreg_el2(far), 624 read_sysreg_el2(esr), read_sysreg_el2(far),
621 read_sysreg(hpfar_el2), par, vcpu); 625 read_sysreg(hpfar_el2), par, vcpu);
622} 626}
627NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
623 628
624void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) 629void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
625{ 630{
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 68d6f7c3b237..b426e2cf973c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -18,6 +18,7 @@
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
20 20
21#include <asm/kprobes.h>
21#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
22#include <asm/kvm_emulate.h> 23#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 24#include <asm/kvm_hyp.h>
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
98{ 99{
99 __sysreg_save_common_state(ctxt); 100 __sysreg_save_common_state(ctxt);
100} 101}
102NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
101 103
102void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) 104void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
103{ 105{
104 __sysreg_save_common_state(ctxt); 106 __sysreg_save_common_state(ctxt);
105 __sysreg_save_el2_return_state(ctxt); 107 __sysreg_save_el2_return_state(ctxt);
106} 108}
109NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
107 110
108static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) 111static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
109{ 112{
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
188{ 191{
189 __sysreg_restore_common_state(ctxt); 192 __sysreg_restore_common_state(ctxt);
190} 193}
194NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
191 195
192void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) 196void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
193{ 197{
194 __sysreg_restore_common_state(ctxt); 198 __sysreg_restore_common_state(ctxt);
195 __sysreg_restore_el2_return_state(ctxt); 199 __sysreg_restore_el2_return_state(ctxt);
196} 200}
201NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
197 202
198void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) 203void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
199{ 204{
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index b72a3dd56204..f16a5f8ff2b4 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -32,6 +32,7 @@
32#include <asm/kvm_arm.h> 32#include <asm/kvm_arm.h>
33#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/kvm_coproc.h> 34#include <asm/kvm_coproc.h>
35#include <asm/kvm_emulate.h>
35#include <asm/kvm_mmu.h> 36#include <asm/kvm_mmu.h>
36 37
37/* Maximum phys_shift supported for any VM on this host */ 38/* Maximum phys_shift supported for any VM on this host */
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
105 * This function finds the right table above and sets the registers on 106 * This function finds the right table above and sets the registers on
106 * the virtual CPU struct to their architecturally defined reset 107 * the virtual CPU struct to their architecturally defined reset
107 * values. 108 * values.
109 *
110 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
111 * ioctl or as part of handling a request issued by another VCPU in the PSCI
112 * handling code. In the first case, the VCPU will not be loaded, and in the
113 * second case the VCPU will be loaded. Because this function operates purely
114 * on the memory-backed valus of system registers, we want to do a full put if
115 * we were loaded (handling a request) and load the values back at the end of
116 * the function. Otherwise we leave the state alone. In both cases, we
117 * disable preemption around the vcpu reset as we would otherwise race with
118 * preempt notifiers which also call put/load.
108 */ 119 */
109int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 120int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
110{ 121{
111 const struct kvm_regs *cpu_reset; 122 const struct kvm_regs *cpu_reset;
123 int ret = -EINVAL;
124 bool loaded;
125
126 preempt_disable();
127 loaded = (vcpu->cpu != -1);
128 if (loaded)
129 kvm_arch_vcpu_put(vcpu);
112 130
113 switch (vcpu->arch.target) { 131 switch (vcpu->arch.target) {
114 default: 132 default:
115 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 133 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
116 if (!cpu_has_32bit_el1()) 134 if (!cpu_has_32bit_el1())
117 return -EINVAL; 135 goto out;
118 cpu_reset = &default_regs_reset32; 136 cpu_reset = &default_regs_reset32;
119 } else { 137 } else {
120 cpu_reset = &default_regs_reset; 138 cpu_reset = &default_regs_reset;
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
129 /* Reset system registers */ 147 /* Reset system registers */
130 kvm_reset_sys_regs(vcpu); 148 kvm_reset_sys_regs(vcpu);
131 149
150 /*
151 * Additional reset state handling that PSCI may have imposed on us.
152 * Must be done after all the sys_reg reset.
153 */
154 if (vcpu->arch.reset_state.reset) {
155 unsigned long target_pc = vcpu->arch.reset_state.pc;
156
157 /* Gracefully handle Thumb2 entry point */
158 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
159 target_pc &= ~1UL;
160 vcpu_set_thumb(vcpu);
161 }
162
163 /* Propagate caller endianness */
164 if (vcpu->arch.reset_state.be)
165 kvm_vcpu_set_be(vcpu);
166
167 *vcpu_pc(vcpu) = target_pc;
168 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
169
170 vcpu->arch.reset_state.reset = false;
171 }
172
132 /* Reset PMU */ 173 /* Reset PMU */
133 kvm_pmu_vcpu_reset(vcpu); 174 kvm_pmu_vcpu_reset(vcpu);
134 175
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
137 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
138 179
139 /* Reset timer */ 180 /* Reset timer */
140 return kvm_timer_vcpu_reset(vcpu); 181 ret = kvm_timer_vcpu_reset(vcpu);
182out:
183 if (loaded)
184 kvm_arch_vcpu_load(vcpu, smp_processor_id());
185 preempt_enable();
186 return ret;
141} 187}
142 188
143void kvm_set_ipa_limit(void) 189void kvm_set_ipa_limit(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e3e37228ae4e..c936aa40c3f4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
314 return read_zero(vcpu, p); 314 return read_zero(vcpu, p);
315} 315}
316 316
317static bool trap_undef(struct kvm_vcpu *vcpu, 317/*
318 struct sys_reg_params *p, 318 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
319 const struct sys_reg_desc *r) 319 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
320 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
321 * treat it separately.
322 */
323static bool trap_loregion(struct kvm_vcpu *vcpu,
324 struct sys_reg_params *p,
325 const struct sys_reg_desc *r)
320{ 326{
321 kvm_inject_undefined(vcpu); 327 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
322 return false; 328 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
329 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
330
331 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
332 kvm_inject_undefined(vcpu);
333 return false;
334 }
335
336 if (p->is_write && sr == SYS_LORID_EL1)
337 return write_to_read_only(vcpu, p, r);
338
339 return trap_raz_wi(vcpu, p, r);
323} 340}
324 341
325static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 342static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
1048 if (val & ptrauth_mask) 1065 if (val & ptrauth_mask)
1049 kvm_debug("ptrauth unsupported for guests, suppressing\n"); 1066 kvm_debug("ptrauth unsupported for guests, suppressing\n");
1050 val &= ~ptrauth_mask; 1067 val &= ~ptrauth_mask;
1051 } else if (id == SYS_ID_AA64MMFR1_EL1) {
1052 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1053 kvm_debug("LORegions unsupported for guests, suppressing\n");
1054
1055 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1056 } 1068 }
1057 1069
1058 return val; 1070 return val;
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1338 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 1350 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1339 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 1351 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1340 1352
1341 { SYS_DESC(SYS_LORSA_EL1), trap_undef }, 1353 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1342 { SYS_DESC(SYS_LOREA_EL1), trap_undef }, 1354 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1343 { SYS_DESC(SYS_LORN_EL1), trap_undef }, 1355 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1344 { SYS_DESC(SYS_LORC_EL1), trap_undef }, 1356 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1345 { SYS_DESC(SYS_LORID_EL1), trap_undef }, 1357 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1346 1358
1347 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, 1359 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1348 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 1360 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2596 table = get_target_table(vcpu->arch.target, true, &num); 2608 table = get_target_table(vcpu->arch.target, true, &num);
2597 reset_sys_reg_descs(vcpu, table, num); 2609 reset_sys_reg_descs(vcpu, table, num);
2598 2610
2599 for (num = 1; num < NR_SYS_REGS; num++) 2611 for (num = 1; num < NR_SYS_REGS; num++) {
2600 if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 2612 if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
2601 panic("Didn't reset __vcpu_sys_reg(%zi)", num); 2613 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2614 break;
2615 }
2602} 2616}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4f31f96bbfab..c36c86f1ec9a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -100,7 +100,7 @@ enum vgic_irq_config {
100}; 100};
101 101
102struct vgic_irq { 102struct vgic_irq {
103 spinlock_t irq_lock; /* Protects the content of the struct */ 103 raw_spinlock_t irq_lock; /* Protects the content of the struct */
104 struct list_head lpi_list; /* Used to link all LPIs together */ 104 struct list_head lpi_list; /* Used to link all LPIs together */
105 struct list_head ap_list; 105 struct list_head ap_list;
106 106
@@ -256,7 +256,7 @@ struct vgic_dist {
256 u64 propbaser; 256 u64 propbaser;
257 257
258 /* Protects the lpi_list and the count value below. */ 258 /* Protects the lpi_list and the count value below. */
259 spinlock_t lpi_list_lock; 259 raw_spinlock_t lpi_list_lock;
260 struct list_head lpi_list_head; 260 struct list_head lpi_list_head;
261 int lpi_list_count; 261 int lpi_list_count;
262 262
@@ -307,7 +307,7 @@ struct vgic_cpu {
307 unsigned int used_lrs; 307 unsigned int used_lrs;
308 struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; 308 struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
309 309
310 spinlock_t ap_list_lock; /* Protects the ap_list */ 310 raw_spinlock_t ap_list_lock; /* Protects the ap_list */
311 311
312 /* 312 /*
313 * List of IRQs that this VCPU should consider because they are either 313 * List of IRQs that this VCPU should consider because they are either
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 9e350fd34504..9c486fad3f9f 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
626 /* Awaken to handle a signal, request we sleep again later. */ 626 /* Awaken to handle a signal, request we sleep again later. */
627 kvm_make_request(KVM_REQ_SLEEP, vcpu); 627 kvm_make_request(KVM_REQ_SLEEP, vcpu);
628 } 628 }
629
630 /*
631 * Make sure we will observe a potential reset request if we've
632 * observed a change to the power state. Pairs with the smp_wmb() in
633 * kvm_psci_vcpu_on().
634 */
635 smp_rmb();
629} 636}
630 637
631static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 638static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
639 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 646 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
640 vcpu_req_sleep(vcpu); 647 vcpu_req_sleep(vcpu);
641 648
649 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
650 kvm_reset_vcpu(vcpu);
651
642 /* 652 /*
643 * Clear IRQ_PENDING requests that were made to guarantee 653 * Clear IRQ_PENDING requests that were made to guarantee
644 * that a VCPU sees new virtual interrupts. 654 * that a VCPU sees new virtual interrupts.
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..30251e288629 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1695 1695
1696 vma_pagesize = vma_kernel_pagesize(vma); 1696 vma_pagesize = vma_kernel_pagesize(vma);
1697 /* 1697 /*
1698 * PUD level may not exist for a VM but PMD is guaranteed to 1698 * The stage2 has a minimum of 2 level table (For arm64 see
1699 * exist. 1699 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1700 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1701 * As for PUD huge maps, we must make sure that we have at least
1702 * 3 levels, i.e, PMD is not folded.
1700 */ 1703 */
1701 if ((vma_pagesize == PMD_SIZE || 1704 if ((vma_pagesize == PMD_SIZE ||
1702 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && 1705 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
1703 !force_pte) { 1706 !force_pte) {
1704 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1707 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1705 } 1708 }
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 9b73d3ad918a..34d08ee63747 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
104 104
105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) 105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
106{ 106{
107 struct vcpu_reset_state *reset_state;
107 struct kvm *kvm = source_vcpu->kvm; 108 struct kvm *kvm = source_vcpu->kvm;
108 struct kvm_vcpu *vcpu = NULL; 109 struct kvm_vcpu *vcpu = NULL;
109 struct swait_queue_head *wq;
110 unsigned long cpu_id; 110 unsigned long cpu_id;
111 unsigned long context_id;
112 phys_addr_t target_pc;
113 111
114 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; 112 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
115 if (vcpu_mode_is_32bit(source_vcpu)) 113 if (vcpu_mode_is_32bit(source_vcpu))
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
130 return PSCI_RET_INVALID_PARAMS; 128 return PSCI_RET_INVALID_PARAMS;
131 } 129 }
132 130
133 target_pc = smccc_get_arg2(source_vcpu); 131 reset_state = &vcpu->arch.reset_state;
134 context_id = smccc_get_arg3(source_vcpu);
135 132
136 kvm_reset_vcpu(vcpu); 133 reset_state->pc = smccc_get_arg2(source_vcpu);
137
138 /* Gracefully handle Thumb2 entry point */
139 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
140 target_pc &= ~((phys_addr_t) 1);
141 vcpu_set_thumb(vcpu);
142 }
143 134
144 /* Propagate caller endianness */ 135 /* Propagate caller endianness */
145 if (kvm_vcpu_is_be(source_vcpu)) 136 reset_state->be = kvm_vcpu_is_be(source_vcpu);
146 kvm_vcpu_set_be(vcpu);
147 137
148 *vcpu_pc(vcpu) = target_pc;
149 /* 138 /*
150 * NOTE: We always update r0 (or x0) because for PSCI v0.1 139 * NOTE: We always update r0 (or x0) because for PSCI v0.1
151 * the general puspose registers are undefined upon CPU_ON. 140 * the general puspose registers are undefined upon CPU_ON.
152 */ 141 */
153 smccc_set_retval(vcpu, context_id, 0, 0, 0); 142 reset_state->r0 = smccc_get_arg3(source_vcpu);
154 vcpu->arch.power_off = false; 143
155 smp_mb(); /* Make sure the above is visible */ 144 WRITE_ONCE(reset_state->reset, true);
145 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
156 146
157 wq = kvm_arch_vcpu_wq(vcpu); 147 /*
158 swake_up_one(wq); 148 * Make sure the reset request is observed if the change to
149 * power_state is observed.
150 */
151 smp_wmb();
152
153 vcpu->arch.power_off = false;
154 kvm_vcpu_wake_up(vcpu);
159 155
160 return PSCI_RET_SUCCESS; 156 return PSCI_RET_SUCCESS;
161} 157}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 07aa900bac56..1f62f2b8065d 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
251 return 0; 251 return 0;
252 } 252 }
253 253
254 spin_lock_irqsave(&irq->irq_lock, flags); 254 raw_spin_lock_irqsave(&irq->irq_lock, flags);
255 print_irq_state(s, irq, vcpu); 255 print_irq_state(s, irq, vcpu);
256 spin_unlock_irqrestore(&irq->irq_lock, flags); 256 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
257 257
258 vgic_put_irq(kvm, irq); 258 vgic_put_irq(kvm, irq);
259 return 0; 259 return 0;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index c0c0b88af1d5..3bdb31eaed64 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
64 struct vgic_dist *dist = &kvm->arch.vgic; 64 struct vgic_dist *dist = &kvm->arch.vgic;
65 65
66 INIT_LIST_HEAD(&dist->lpi_list_head); 66 INIT_LIST_HEAD(&dist->lpi_list_head);
67 spin_lock_init(&dist->lpi_list_lock); 67 raw_spin_lock_init(&dist->lpi_list_lock);
68} 68}
69 69
70/* CREATION */ 70/* CREATION */
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
171 171
172 irq->intid = i + VGIC_NR_PRIVATE_IRQS; 172 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
173 INIT_LIST_HEAD(&irq->ap_list); 173 INIT_LIST_HEAD(&irq->ap_list);
174 spin_lock_init(&irq->irq_lock); 174 raw_spin_lock_init(&irq->irq_lock);
175 irq->vcpu = NULL; 175 irq->vcpu = NULL;
176 irq->target_vcpu = vcpu0; 176 irq->target_vcpu = vcpu0;
177 kref_init(&irq->refcount); 177 kref_init(&irq->refcount);
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; 206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
207 207
208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
209 spin_lock_init(&vgic_cpu->ap_list_lock); 209 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
210 210
211 /* 211 /*
212 * Enable and configure all SGIs to be edge-triggered and 212 * Enable and configure all SGIs to be edge-triggered and
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
217 217
218 INIT_LIST_HEAD(&irq->ap_list); 218 INIT_LIST_HEAD(&irq->ap_list);
219 spin_lock_init(&irq->irq_lock); 219 raw_spin_lock_init(&irq->irq_lock);
220 irq->intid = i; 220 irq->intid = i;
221 irq->vcpu = NULL; 221 irq->vcpu = NULL;
222 irq->target_vcpu = vcpu; 222 irq->target_vcpu = vcpu;
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
231 irq->config = VGIC_CONFIG_LEVEL; 231 irq->config = VGIC_CONFIG_LEVEL;
232 } 232 }
233 233
234 /*
235 * GICv3 can only be created via the KVM_DEVICE_CREATE API and
236 * so we always know the emulation type at this point as it's
237 * either explicitly configured as GICv3, or explicitly
238 * configured as GICv2, or not configured yet which also
239 * implies GICv2.
240 */
241 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 234 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
242 irq->group = 1; 235 irq->group = 1;
243 else 236 else
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
281{ 274{
282 struct vgic_dist *dist = &kvm->arch.vgic; 275 struct vgic_dist *dist = &kvm->arch.vgic;
283 struct kvm_vcpu *vcpu; 276 struct kvm_vcpu *vcpu;
284 int ret = 0, i; 277 int ret = 0, i, idx;
285 278
286 if (vgic_initialized(kvm)) 279 if (vgic_initialized(kvm))
287 return 0; 280 return 0;
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
298 if (ret) 291 if (ret)
299 goto out; 292 goto out;
300 293
294 /* Initialize groups on CPUs created before the VGIC type was known */
295 kvm_for_each_vcpu(idx, vcpu, kvm) {
296 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
297
298 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
299 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
300 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
301 irq->group = 1;
302 else
303 irq->group = 0;
304 }
305 }
306
301 if (vgic_has_its(kvm)) { 307 if (vgic_has_its(kvm)) {
302 ret = vgic_v4_init(kvm); 308 ret = vgic_v4_init(kvm);
303 if (ret) 309 if (ret)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index eb2a390a6c86..ab3f47745d9c 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
65 65
66 INIT_LIST_HEAD(&irq->lpi_list); 66 INIT_LIST_HEAD(&irq->lpi_list);
67 INIT_LIST_HEAD(&irq->ap_list); 67 INIT_LIST_HEAD(&irq->ap_list);
68 spin_lock_init(&irq->irq_lock); 68 raw_spin_lock_init(&irq->irq_lock);
69 69
70 irq->config = VGIC_CONFIG_EDGE; 70 irq->config = VGIC_CONFIG_EDGE;
71 kref_init(&irq->refcount); 71 kref_init(&irq->refcount);
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
73 irq->target_vcpu = vcpu; 73 irq->target_vcpu = vcpu;
74 irq->group = 1; 74 irq->group = 1;
75 75
76 spin_lock_irqsave(&dist->lpi_list_lock, flags); 76 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
77 77
78 /* 78 /*
79 * There could be a race with another vgic_add_lpi(), so we need to 79 * There could be a race with another vgic_add_lpi(), so we need to
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
101 dist->lpi_list_count++; 101 dist->lpi_list_count++;
102 102
103out_unlock: 103out_unlock:
104 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 104 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
105 105
106 /* 106 /*
107 * We "cache" the configuration table entries in our struct vgic_irq's. 107 * We "cache" the configuration table entries in our struct vgic_irq's.
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
287 if (ret) 287 if (ret)
288 return ret; 288 return ret;
289 289
290 spin_lock_irqsave(&irq->irq_lock, flags); 290 raw_spin_lock_irqsave(&irq->irq_lock, flags);
291 291
292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { 292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
293 irq->priority = LPI_PROP_PRIORITY(prop); 293 irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
299 } 299 }
300 } 300 }
301 301
302 spin_unlock_irqrestore(&irq->irq_lock, flags); 302 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
303 303
304 if (irq->hw) 304 if (irq->hw)
305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); 305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
332 if (!intids) 332 if (!intids)
333 return -ENOMEM; 333 return -ENOMEM;
334 334
335 spin_lock_irqsave(&dist->lpi_list_lock, flags); 335 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
337 if (i == irq_count) 337 if (i == irq_count)
338 break; 338 break;
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
341 continue; 341 continue;
342 intids[i++] = irq->intid; 342 intids[i++] = irq->intid;
343 } 343 }
344 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 344 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
345 345
346 *intid_ptr = intids; 346 *intid_ptr = intids;
347 return i; 347 return i;
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
352 int ret = 0; 352 int ret = 0;
353 unsigned long flags; 353 unsigned long flags;
354 354
355 spin_lock_irqsave(&irq->irq_lock, flags); 355 raw_spin_lock_irqsave(&irq->irq_lock, flags);
356 irq->target_vcpu = vcpu; 356 irq->target_vcpu = vcpu;
357 spin_unlock_irqrestore(&irq->irq_lock, flags); 357 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
358 358
359 if (irq->hw) { 359 if (irq->hw) {
360 struct its_vlpi_map map; 360 struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
455 } 455 }
456 456
457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
458 spin_lock_irqsave(&irq->irq_lock, flags); 458 raw_spin_lock_irqsave(&irq->irq_lock, flags);
459 irq->pending_latch = pendmask & (1U << bit_nr); 459 irq->pending_latch = pendmask & (1U << bit_nr);
460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
461 vgic_put_irq(vcpu->kvm, irq); 461 vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
612 return irq_set_irqchip_state(irq->host_irq, 612 return irq_set_irqchip_state(irq->host_irq,
613 IRQCHIP_STATE_PENDING, true); 613 IRQCHIP_STATE_PENDING, true);
614 614
615 spin_lock_irqsave(&irq->irq_lock, flags); 615 raw_spin_lock_irqsave(&irq->irq_lock, flags);
616 irq->pending_latch = true; 616 irq->pending_latch = true;
617 vgic_queue_irq_unlock(kvm, irq, flags); 617 vgic_queue_irq_unlock(kvm, irq, flags);
618 618
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 738b65d2d0e7..b535fffc7400 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
147 147
148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); 148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
149 149
150 spin_lock_irqsave(&irq->irq_lock, flags); 150 raw_spin_lock_irqsave(&irq->irq_lock, flags);
151 irq->pending_latch = true; 151 irq->pending_latch = true;
152 irq->source |= 1U << source_vcpu->vcpu_id; 152 irq->source |= 1U << source_vcpu->vcpu_id;
153 153
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); 191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
192 int target; 192 int target;
193 193
194 spin_lock_irqsave(&irq->irq_lock, flags); 194 raw_spin_lock_irqsave(&irq->irq_lock, flags);
195 195
196 irq->targets = (val >> (i * 8)) & cpu_mask; 196 irq->targets = (val >> (i * 8)) & cpu_mask;
197 target = irq->targets ? __ffs(irq->targets) : 0; 197 target = irq->targets ? __ffs(irq->targets) : 0;
198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); 198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
199 199
200 spin_unlock_irqrestore(&irq->irq_lock, flags); 200 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
201 vgic_put_irq(vcpu->kvm, irq); 201 vgic_put_irq(vcpu->kvm, irq);
202 } 202 }
203} 203}
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
230 for (i = 0; i < len; i++) { 230 for (i = 0; i < len; i++) {
231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232 232
233 spin_lock_irqsave(&irq->irq_lock, flags); 233 raw_spin_lock_irqsave(&irq->irq_lock, flags);
234 234
235 irq->source &= ~((val >> (i * 8)) & 0xff); 235 irq->source &= ~((val >> (i * 8)) & 0xff);
236 if (!irq->source) 236 if (!irq->source)
237 irq->pending_latch = false; 237 irq->pending_latch = false;
238 238
239 spin_unlock_irqrestore(&irq->irq_lock, flags); 239 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
240 vgic_put_irq(vcpu->kvm, irq); 240 vgic_put_irq(vcpu->kvm, irq);
241 } 241 }
242} 242}
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
252 for (i = 0; i < len; i++) { 252 for (i = 0; i < len; i++) {
253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
254 254
255 spin_lock_irqsave(&irq->irq_lock, flags); 255 raw_spin_lock_irqsave(&irq->irq_lock, flags);
256 256
257 irq->source |= (val >> (i * 8)) & 0xff; 257 irq->source |= (val >> (i * 8)) & 0xff;
258 258
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
260 irq->pending_latch = true; 260 irq->pending_latch = true;
261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
262 } else { 262 } else {
263 spin_unlock_irqrestore(&irq->irq_lock, flags); 263 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
264 } 264 }
265 vgic_put_irq(vcpu->kvm, irq); 265 vgic_put_irq(vcpu->kvm, irq);
266 } 266 }
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index b3d1f0985117..4a12322bf7df 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
169 if (!irq) 169 if (!irq)
170 return; 170 return;
171 171
172 spin_lock_irqsave(&irq->irq_lock, flags); 172 raw_spin_lock_irqsave(&irq->irq_lock, flags);
173 173
174 /* We only care about and preserve Aff0, Aff1 and Aff2. */ 174 /* We only care about and preserve Aff0, Aff1 and Aff2. */
175 irq->mpidr = val & GENMASK(23, 0); 175 irq->mpidr = val & GENMASK(23, 0);
176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); 176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
177 177
178 spin_unlock_irqrestore(&irq->irq_lock, flags); 178 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
179 vgic_put_irq(vcpu->kvm, irq); 179 vgic_put_irq(vcpu->kvm, irq);
180} 180}
181 181
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
281 for (i = 0; i < len * 8; i++) { 281 for (i = 0; i < len * 8; i++) {
282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
283 283
284 spin_lock_irqsave(&irq->irq_lock, flags); 284 raw_spin_lock_irqsave(&irq->irq_lock, flags);
285 if (test_bit(i, &val)) { 285 if (test_bit(i, &val)) {
286 /* 286 /*
287 * pending_latch is set irrespective of irq type 287 * pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
293 } else { 293 } else {
294 irq->pending_latch = false; 294 irq->pending_latch = false;
295 spin_unlock_irqrestore(&irq->irq_lock, flags); 295 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
296 } 296 }
297 297
298 vgic_put_irq(vcpu->kvm, irq); 298 vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
957 957
958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); 958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
959 959
960 spin_lock_irqsave(&irq->irq_lock, flags); 960 raw_spin_lock_irqsave(&irq->irq_lock, flags);
961 961
962 /* 962 /*
963 * An access targetting Group0 SGIs can only generate 963 * An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
968 irq->pending_latch = true; 968 irq->pending_latch = true;
969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
970 } else { 970 } else {
971 spin_unlock_irqrestore(&irq->irq_lock, flags); 971 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
972 } 972 }
973 973
974 vgic_put_irq(vcpu->kvm, irq); 974 vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ceeda7e04a4d..7de42fba05b5 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
77 for (i = 0; i < len * 8; i++) { 77 for (i = 0; i < len * 8; i++) {
78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79 79
80 spin_lock_irqsave(&irq->irq_lock, flags); 80 raw_spin_lock_irqsave(&irq->irq_lock, flags);
81 irq->group = !!(val & BIT(i)); 81 irq->group = !!(val & BIT(i));
82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
83 83
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
120 for_each_set_bit(i, &val, len * 8) { 120 for_each_set_bit(i, &val, len * 8) {
121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
122 122
123 spin_lock_irqsave(&irq->irq_lock, flags); 123 raw_spin_lock_irqsave(&irq->irq_lock, flags);
124 irq->enabled = true; 124 irq->enabled = true;
125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
126 126
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
139 for_each_set_bit(i, &val, len * 8) { 139 for_each_set_bit(i, &val, len * 8) {
140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
141 141
142 spin_lock_irqsave(&irq->irq_lock, flags); 142 raw_spin_lock_irqsave(&irq->irq_lock, flags);
143 143
144 irq->enabled = false; 144 irq->enabled = false;
145 145
146 spin_unlock_irqrestore(&irq->irq_lock, flags); 146 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
147 vgic_put_irq(vcpu->kvm, irq); 147 vgic_put_irq(vcpu->kvm, irq);
148 } 148 }
149} 149}
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
161 unsigned long flags; 161 unsigned long flags;
162 162
163 spin_lock_irqsave(&irq->irq_lock, flags); 163 raw_spin_lock_irqsave(&irq->irq_lock, flags);
164 if (irq_is_pending(irq)) 164 if (irq_is_pending(irq))
165 value |= (1U << i); 165 value |= (1U << i);
166 spin_unlock_irqrestore(&irq->irq_lock, flags); 166 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
167 167
168 vgic_put_irq(vcpu->kvm, irq); 168 vgic_put_irq(vcpu->kvm, irq);
169 } 169 }
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 for_each_set_bit(i, &val, len * 8) { 215 for_each_set_bit(i, &val, len * 8) {
216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
217 217
218 spin_lock_irqsave(&irq->irq_lock, flags); 218 raw_spin_lock_irqsave(&irq->irq_lock, flags);
219 if (irq->hw) 219 if (irq->hw)
220 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 220 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
221 else 221 else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
262 for_each_set_bit(i, &val, len * 8) { 262 for_each_set_bit(i, &val, len * 8) {
263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264 264
265 spin_lock_irqsave(&irq->irq_lock, flags); 265 raw_spin_lock_irqsave(&irq->irq_lock, flags);
266 266
267 if (irq->hw) 267 if (irq->hw)
268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess); 268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
269 else 269 else
270 irq->pending_latch = false; 270 irq->pending_latch = false;
271 271
272 spin_unlock_irqrestore(&irq->irq_lock, flags); 272 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
273 vgic_put_irq(vcpu->kvm, irq); 273 vgic_put_irq(vcpu->kvm, irq);
274 } 274 }
275} 275}
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
311 unsigned long flags; 311 unsigned long flags;
312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); 312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
313 313
314 spin_lock_irqsave(&irq->irq_lock, flags); 314 raw_spin_lock_irqsave(&irq->irq_lock, flags);
315 315
316 if (irq->hw) { 316 if (irq->hw) {
317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
342 if (irq->active) 342 if (irq->active)
343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
344 else 344 else
345 spin_unlock_irqrestore(&irq->irq_lock, flags); 345 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
346} 346}
347 347
348/* 348/*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
485 for (i = 0; i < len; i++) { 485 for (i = 0; i < len; i++) {
486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
487 487
488 spin_lock_irqsave(&irq->irq_lock, flags); 488 raw_spin_lock_irqsave(&irq->irq_lock, flags);
489 /* Narrow the priority range to what we actually support */ 489 /* Narrow the priority range to what we actually support */
490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); 490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
491 spin_unlock_irqrestore(&irq->irq_lock, flags); 491 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
492 492
493 vgic_put_irq(vcpu->kvm, irq); 493 vgic_put_irq(vcpu->kvm, irq);
494 } 494 }
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
534 continue; 534 continue;
535 535
536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
537 spin_lock_irqsave(&irq->irq_lock, flags); 537 raw_spin_lock_irqsave(&irq->irq_lock, flags);
538 538
539 if (test_bit(i * 2 + 1, &val)) 539 if (test_bit(i * 2 + 1, &val))
540 irq->config = VGIC_CONFIG_EDGE; 540 irq->config = VGIC_CONFIG_EDGE;
541 else 541 else
542 irq->config = VGIC_CONFIG_LEVEL; 542 irq->config = VGIC_CONFIG_LEVEL;
543 543
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 544 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 545 vgic_put_irq(vcpu->kvm, irq);
546 } 546 }
547} 547}
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
590 * restore irq config before line level. 590 * restore irq config before line level.
591 */ 591 */
592 new_level = !!(val & (1U << i)); 592 new_level = !!(val & (1U << i));
593 spin_lock_irqsave(&irq->irq_lock, flags); 593 raw_spin_lock_irqsave(&irq->irq_lock, flags);
594 irq->line_level = new_level; 594 irq->line_level = new_level;
595 if (new_level) 595 if (new_level)
596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
597 else 597 else
598 spin_unlock_irqrestore(&irq->irq_lock, flags); 598 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
599 599
600 vgic_put_irq(vcpu->kvm, irq); 600 vgic_put_irq(vcpu->kvm, irq);
601 } 601 }
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892abd7dc..d91a8938aa7c 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
84 84
85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
86 86
87 spin_lock(&irq->irq_lock); 87 raw_spin_lock(&irq->irq_lock);
88 88
89 /* Always preserve the active bit */ 89 /* Always preserve the active bit */
90 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 90 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
127 vgic_irq_set_phys_active(irq, false); 127 vgic_irq_set_phys_active(irq, false);
128 } 128 }
129 129
130 spin_unlock(&irq->irq_lock); 130 raw_spin_unlock(&irq->irq_lock);
131 vgic_put_irq(vcpu->kvm, irq); 131 vgic_put_irq(vcpu->kvm, irq);
132 } 132 }
133 133
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9c0dd234ebe8..4ee0aeb9a905 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
76 if (!irq) /* An LPI could have been unmapped. */ 76 if (!irq) /* An LPI could have been unmapped. */
77 continue; 77 continue;
78 78
79 spin_lock(&irq->irq_lock); 79 raw_spin_lock(&irq->irq_lock);
80 80
81 /* Always preserve the active bit */ 81 /* Always preserve the active bit */
82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 82 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
119 vgic_irq_set_phys_active(irq, false); 119 vgic_irq_set_phys_active(irq, false);
120 } 120 }
121 121
122 spin_unlock(&irq->irq_lock); 122 raw_spin_unlock(&irq->irq_lock);
123 vgic_put_irq(vcpu->kvm, irq); 123 vgic_put_irq(vcpu->kvm, irq);
124 } 124 }
125 125
@@ -347,9 +347,9 @@ retry:
347 347
348 status = val & (1 << bit_nr); 348 status = val & (1 << bit_nr);
349 349
350 spin_lock_irqsave(&irq->irq_lock, flags); 350 raw_spin_lock_irqsave(&irq->irq_lock, flags);
351 if (irq->target_vcpu != vcpu) { 351 if (irq->target_vcpu != vcpu) {
352 spin_unlock_irqrestore(&irq->irq_lock, flags); 352 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
353 goto retry; 353 goto retry;
354 } 354 }
355 irq->pending_latch = status; 355 irq->pending_latch = status;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 870b1185173b..abd9c7352677 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
54 * When taking more than one ap_list_lock at the same time, always take the 54 * When taking more than one ap_list_lock at the same time, always take the
55 * lowest numbered VCPU's ap_list_lock first, so: 55 * lowest numbered VCPU's ap_list_lock first, so:
56 * vcpuX->vcpu_id < vcpuY->vcpu_id: 56 * vcpuX->vcpu_id < vcpuY->vcpu_id:
57 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); 57 * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
58 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); 58 * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
59 * 59 *
60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have 60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
61 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer 61 * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
62 * spinlocks for any lock that may be taken while injecting an interrupt. 62 * spinlocks for any lock that may be taken while injecting an interrupt.
63 */ 63 */
64 64
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
72 struct vgic_irq *irq = NULL; 72 struct vgic_irq *irq = NULL;
73 unsigned long flags; 73 unsigned long flags;
74 74
75 spin_lock_irqsave(&dist->lpi_list_lock, flags); 75 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
76 76
77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
78 if (irq->intid != intid) 78 if (irq->intid != intid)
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
88 irq = NULL; 88 irq = NULL;
89 89
90out_unlock: 90out_unlock:
91 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 91 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
92 92
93 return irq; 93 return irq;
94} 94}
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
138 if (irq->intid < VGIC_MIN_LPI) 138 if (irq->intid < VGIC_MIN_LPI)
139 return; 139 return;
140 140
141 spin_lock_irqsave(&dist->lpi_list_lock, flags); 141 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
142 if (!kref_put(&irq->refcount, vgic_irq_release)) { 142 if (!kref_put(&irq->refcount, vgic_irq_release)) {
143 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 143 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
144 return; 144 return;
145 }; 145 };
146 146
147 list_del(&irq->lpi_list); 147 list_del(&irq->lpi_list);
148 dist->lpi_list_count--; 148 dist->lpi_list_count--;
149 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 149 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 150
151 kfree(irq); 151 kfree(irq);
152} 152}
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
244 bool penda, pendb; 244 bool penda, pendb;
245 int ret; 245 int ret;
246 246
247 spin_lock(&irqa->irq_lock); 247 raw_spin_lock(&irqa->irq_lock);
248 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 248 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
249 249
250 if (irqa->active || irqb->active) { 250 if (irqa->active || irqb->active) {
251 ret = (int)irqb->active - (int)irqa->active; 251 ret = (int)irqb->active - (int)irqa->active;
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
263 /* Both pending and enabled, sort by priority */ 263 /* Both pending and enabled, sort by priority */
264 ret = irqa->priority - irqb->priority; 264 ret = irqa->priority - irqb->priority;
265out: 265out:
266 spin_unlock(&irqb->irq_lock); 266 raw_spin_unlock(&irqb->irq_lock);
267 spin_unlock(&irqa->irq_lock); 267 raw_spin_unlock(&irqa->irq_lock);
268 return ret; 268 return ret;
269} 269}
270 270
@@ -325,7 +325,7 @@ retry:
325 * not need to be inserted into an ap_list and there is also 325 * not need to be inserted into an ap_list and there is also
326 * no more work for us to do. 326 * no more work for us to do.
327 */ 327 */
328 spin_unlock_irqrestore(&irq->irq_lock, flags); 328 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
329 329
330 /* 330 /*
331 * We have to kick the VCPU here, because we could be 331 * We have to kick the VCPU here, because we could be
@@ -347,12 +347,12 @@ retry:
347 * We must unlock the irq lock to take the ap_list_lock where 347 * We must unlock the irq lock to take the ap_list_lock where
348 * we are going to insert this new pending interrupt. 348 * we are going to insert this new pending interrupt.
349 */ 349 */
350 spin_unlock_irqrestore(&irq->irq_lock, flags); 350 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
351 351
352 /* someone can do stuff here, which we re-check below */ 352 /* someone can do stuff here, which we re-check below */
353 353
354 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 354 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
355 spin_lock(&irq->irq_lock); 355 raw_spin_lock(&irq->irq_lock);
356 356
357 /* 357 /*
358 * Did something change behind our backs? 358 * Did something change behind our backs?
@@ -367,10 +367,11 @@ retry:
367 */ 367 */
368 368
369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { 369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
370 spin_unlock(&irq->irq_lock); 370 raw_spin_unlock(&irq->irq_lock);
371 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 371 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
372 flags);
372 373
373 spin_lock_irqsave(&irq->irq_lock, flags); 374 raw_spin_lock_irqsave(&irq->irq_lock, flags);
374 goto retry; 375 goto retry;
375 } 376 }
376 377
@@ -382,8 +383,8 @@ retry:
382 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); 383 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
383 irq->vcpu = vcpu; 384 irq->vcpu = vcpu;
384 385
385 spin_unlock(&irq->irq_lock); 386 raw_spin_unlock(&irq->irq_lock);
386 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 387 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
387 388
388 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 389 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
389 kvm_vcpu_kick(vcpu); 390 kvm_vcpu_kick(vcpu);
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
430 if (!irq) 431 if (!irq)
431 return -EINVAL; 432 return -EINVAL;
432 433
433 spin_lock_irqsave(&irq->irq_lock, flags); 434 raw_spin_lock_irqsave(&irq->irq_lock, flags);
434 435
435 if (!vgic_validate_injection(irq, level, owner)) { 436 if (!vgic_validate_injection(irq, level, owner)) {
436 /* Nothing to see here, move along... */ 437 /* Nothing to see here, move along... */
437 spin_unlock_irqrestore(&irq->irq_lock, flags); 438 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
438 vgic_put_irq(kvm, irq); 439 vgic_put_irq(kvm, irq);
439 return 0; 440 return 0;
440 } 441 }
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
494 495
495 BUG_ON(!irq); 496 BUG_ON(!irq);
496 497
497 spin_lock_irqsave(&irq->irq_lock, flags); 498 raw_spin_lock_irqsave(&irq->irq_lock, flags);
498 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); 499 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
499 spin_unlock_irqrestore(&irq->irq_lock, flags); 500 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
500 vgic_put_irq(vcpu->kvm, irq); 501 vgic_put_irq(vcpu->kvm, irq);
501 502
502 return ret; 503 return ret;
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
519 if (!irq->hw) 520 if (!irq->hw)
520 goto out; 521 goto out;
521 522
522 spin_lock_irqsave(&irq->irq_lock, flags); 523 raw_spin_lock_irqsave(&irq->irq_lock, flags);
523 irq->active = false; 524 irq->active = false;
524 irq->pending_latch = false; 525 irq->pending_latch = false;
525 irq->line_level = false; 526 irq->line_level = false;
526 spin_unlock_irqrestore(&irq->irq_lock, flags); 527 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527out: 528out:
528 vgic_put_irq(vcpu->kvm, irq); 529 vgic_put_irq(vcpu->kvm, irq);
529} 530}
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
539 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 540 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
540 BUG_ON(!irq); 541 BUG_ON(!irq);
541 542
542 spin_lock_irqsave(&irq->irq_lock, flags); 543 raw_spin_lock_irqsave(&irq->irq_lock, flags);
543 kvm_vgic_unmap_irq(irq); 544 kvm_vgic_unmap_irq(irq);
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 545 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 546 vgic_put_irq(vcpu->kvm, irq);
546 547
547 return 0; 548 return 0;
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
571 return -EINVAL; 572 return -EINVAL;
572 573
573 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 574 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
574 spin_lock_irqsave(&irq->irq_lock, flags); 575 raw_spin_lock_irqsave(&irq->irq_lock, flags);
575 if (irq->owner && irq->owner != owner) 576 if (irq->owner && irq->owner != owner)
576 ret = -EEXIST; 577 ret = -EEXIST;
577 else 578 else
578 irq->owner = owner; 579 irq->owner = owner;
579 spin_unlock_irqrestore(&irq->irq_lock, flags); 580 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
580 581
581 return ret; 582 return ret;
582} 583}
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
597 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 598 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
598 599
599retry: 600retry:
600 spin_lock(&vgic_cpu->ap_list_lock); 601 raw_spin_lock(&vgic_cpu->ap_list_lock);
601 602
602 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 603 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
603 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 604 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
604 bool target_vcpu_needs_kick = false; 605 bool target_vcpu_needs_kick = false;
605 606
606 spin_lock(&irq->irq_lock); 607 raw_spin_lock(&irq->irq_lock);
607 608
608 BUG_ON(vcpu != irq->vcpu); 609 BUG_ON(vcpu != irq->vcpu);
609 610
@@ -616,7 +617,7 @@ retry:
616 */ 617 */
617 list_del(&irq->ap_list); 618 list_del(&irq->ap_list);
618 irq->vcpu = NULL; 619 irq->vcpu = NULL;
619 spin_unlock(&irq->irq_lock); 620 raw_spin_unlock(&irq->irq_lock);
620 621
621 /* 622 /*
622 * This vgic_put_irq call matches the 623 * This vgic_put_irq call matches the
@@ -631,14 +632,14 @@ retry:
631 632
632 if (target_vcpu == vcpu) { 633 if (target_vcpu == vcpu) {
633 /* We're on the right CPU */ 634 /* We're on the right CPU */
634 spin_unlock(&irq->irq_lock); 635 raw_spin_unlock(&irq->irq_lock);
635 continue; 636 continue;
636 } 637 }
637 638
638 /* This interrupt looks like it has to be migrated. */ 639 /* This interrupt looks like it has to be migrated. */
639 640
640 spin_unlock(&irq->irq_lock); 641 raw_spin_unlock(&irq->irq_lock);
641 spin_unlock(&vgic_cpu->ap_list_lock); 642 raw_spin_unlock(&vgic_cpu->ap_list_lock);
642 643
643 /* 644 /*
644 * Ensure locking order by always locking the smallest 645 * Ensure locking order by always locking the smallest
@@ -652,10 +653,10 @@ retry:
652 vcpuB = vcpu; 653 vcpuB = vcpu;
653 } 654 }
654 655
655 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); 656 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
656 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, 657 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
657 SINGLE_DEPTH_NESTING); 658 SINGLE_DEPTH_NESTING);
658 spin_lock(&irq->irq_lock); 659 raw_spin_lock(&irq->irq_lock);
659 660
660 /* 661 /*
661 * If the affinity has been preserved, move the 662 * If the affinity has been preserved, move the
@@ -675,9 +676,9 @@ retry:
675 target_vcpu_needs_kick = true; 676 target_vcpu_needs_kick = true;
676 } 677 }
677 678
678 spin_unlock(&irq->irq_lock); 679 raw_spin_unlock(&irq->irq_lock);
679 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 680 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
680 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); 681 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
681 682
682 if (target_vcpu_needs_kick) { 683 if (target_vcpu_needs_kick) {
683 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); 684 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -687,7 +688,7 @@ retry:
687 goto retry; 688 goto retry;
688 } 689 }
689 690
690 spin_unlock(&vgic_cpu->ap_list_lock); 691 raw_spin_unlock(&vgic_cpu->ap_list_lock);
691} 692}
692 693
693static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) 694static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 742 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
742 int w; 743 int w;
743 744
744 spin_lock(&irq->irq_lock); 745 raw_spin_lock(&irq->irq_lock);
745 /* GICv2 SGIs can count for more than one... */ 746 /* GICv2 SGIs can count for more than one... */
746 w = vgic_irq_get_lr_count(irq); 747 w = vgic_irq_get_lr_count(irq);
747 spin_unlock(&irq->irq_lock); 748 raw_spin_unlock(&irq->irq_lock);
748 749
749 count += w; 750 count += w;
750 *multi_sgi |= (w > 1); 751 *multi_sgi |= (w > 1);
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
770 count = 0; 771 count = 0;
771 772
772 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 773 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
773 spin_lock(&irq->irq_lock); 774 raw_spin_lock(&irq->irq_lock);
774 775
775 /* 776 /*
776 * If we have multi-SGIs in the pipeline, we need to 777 * If we have multi-SGIs in the pipeline, we need to
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
780 * the AP list has been sorted already. 781 * the AP list has been sorted already.
781 */ 782 */
782 if (multi_sgi && irq->priority > prio) { 783 if (multi_sgi && irq->priority > prio) {
783 spin_unlock(&irq->irq_lock); 784 _raw_spin_unlock(&irq->irq_lock);
784 break; 785 break;
785 } 786 }
786 787
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
791 prio = irq->priority; 792 prio = irq->priority;
792 } 793 }
793 794
794 spin_unlock(&irq->irq_lock); 795 raw_spin_unlock(&irq->irq_lock);
795 796
796 if (count == kvm_vgic_global_state.nr_lr) { 797 if (count == kvm_vgic_global_state.nr_lr) {
797 if (!list_is_last(&irq->ap_list, 798 if (!list_is_last(&irq->ap_list,
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
872 873
873 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 874 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
874 875
875 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); 876 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
876 vgic_flush_lr_state(vcpu); 877 vgic_flush_lr_state(vcpu);
877 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); 878 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
878 879
879 if (can_access_vgic_from_kernel()) 880 if (can_access_vgic_from_kernel())
880 vgic_restore_state(vcpu); 881 vgic_restore_state(vcpu);
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
918 919
919 vgic_get_vmcr(vcpu, &vmcr); 920 vgic_get_vmcr(vcpu, &vmcr);
920 921
921 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 922 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
922 923
923 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 924 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
924 spin_lock(&irq->irq_lock); 925 raw_spin_lock(&irq->irq_lock);
925 pending = irq_is_pending(irq) && irq->enabled && 926 pending = irq_is_pending(irq) && irq->enabled &&
926 !irq->active && 927 !irq->active &&
927 irq->priority < vmcr.pmr; 928 irq->priority < vmcr.pmr;
928 spin_unlock(&irq->irq_lock); 929 raw_spin_unlock(&irq->irq_lock);
929 930
930 if (pending) 931 if (pending)
931 break; 932 break;
932 } 933 }
933 934
934 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); 935 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
935 936
936 return pending; 937 return pending;
937} 938}
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
963 return false; 964 return false;
964 965
965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 966 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
966 spin_lock_irqsave(&irq->irq_lock, flags); 967 raw_spin_lock_irqsave(&irq->irq_lock, flags);
967 map_is_active = irq->hw && irq->active; 968 map_is_active = irq->hw && irq->active;
968 spin_unlock_irqrestore(&irq->irq_lock, flags); 969 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
969 vgic_put_irq(vcpu->kvm, irq); 970 vgic_put_irq(vcpu->kvm, irq);
970 971
971 return map_is_active; 972 return map_is_active;
972} 973}
973