diff options
32 files changed, 838 insertions, 635 deletions
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virtual/kvm/devices/arm-vgic-its.txt index eb06beb75960..8d5830eab26a 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic-its.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic-its.txt | |||
@@ -33,6 +33,10 @@ Groups: | |||
33 | request the initialization of the ITS, no additional parameter in | 33 | request the initialization of the ITS, no additional parameter in |
34 | kvm_device_attr.addr. | 34 | kvm_device_attr.addr. |
35 | 35 | ||
36 | KVM_DEV_ARM_ITS_CTRL_RESET | ||
37 | reset the ITS, no additional parameter in kvm_device_attr.addr. | ||
38 | See "ITS Reset State" section. | ||
39 | |||
36 | KVM_DEV_ARM_ITS_SAVE_TABLES | 40 | KVM_DEV_ARM_ITS_SAVE_TABLES |
37 | save the ITS table data into guest RAM, at the location provisioned | 41 | save the ITS table data into guest RAM, at the location provisioned |
38 | by the guest in corresponding registers/table entries. | 42 | by the guest in corresponding registers/table entries. |
@@ -157,3 +161,19 @@ Then vcpus can be started. | |||
157 | - pINTID is the physical LPI ID; if zero, it means the entry is not valid | 161 | - pINTID is the physical LPI ID; if zero, it means the entry is not valid |
158 | and other fields are not meaningful. | 162 | and other fields are not meaningful. |
159 | - ICID is the collection ID | 163 | - ICID is the collection ID |
164 | |||
165 | ITS Reset State: | ||
166 | ---------------- | ||
167 | |||
168 | RESET returns the ITS to the same state that it was when first created and | ||
169 | initialized. When the RESET command returns, the following things are | ||
170 | guaranteed: | ||
171 | |||
172 | - The ITS is not enabled and quiescent | ||
173 | GITS_CTLR.Enabled = 0 .Quiescent=1 | ||
174 | - There is no internally cached state | ||
175 | - No collection or device table are used | ||
176 | GITS_BASER<n>.Valid = 0 | ||
177 | - GITS_CBASER = 0, GITS_CREADR = 0, GITS_CWRITER = 0 | ||
178 | - The ABI version is unchanged and remains the one set when the ITS | ||
179 | device was first created. | ||
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 14d68a4d826f..36dd2962a42d 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h | |||
@@ -68,6 +68,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | |||
68 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 68 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
69 | extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); | 69 | extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); |
70 | 70 | ||
71 | extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); | ||
72 | |||
71 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | 73 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
72 | 74 | ||
73 | extern void __init_stage2_translation(void); | 75 | extern void __init_stage2_translation(void); |
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 98089ffd91bb..3d22eb87f919 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -25,7 +25,22 @@ | |||
25 | #include <asm/kvm_arm.h> | 25 | #include <asm/kvm_arm.h> |
26 | #include <asm/cputype.h> | 26 | #include <asm/cputype.h> |
27 | 27 | ||
28 | /* arm64 compatibility macros */ | ||
29 | #define COMPAT_PSR_MODE_ABT ABT_MODE | ||
30 | #define COMPAT_PSR_MODE_UND UND_MODE | ||
31 | #define COMPAT_PSR_T_BIT PSR_T_BIT | ||
32 | #define COMPAT_PSR_I_BIT PSR_I_BIT | ||
33 | #define COMPAT_PSR_A_BIT PSR_A_BIT | ||
34 | #define COMPAT_PSR_E_BIT PSR_E_BIT | ||
35 | #define COMPAT_PSR_IT_MASK PSR_IT_MASK | ||
36 | |||
28 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); | 37 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
38 | |||
39 | static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num) | ||
40 | { | ||
41 | return vcpu_reg(vcpu, reg_num); | ||
42 | } | ||
43 | |||
29 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); | 44 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); |
30 | 45 | ||
31 | static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, | 46 | static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, |
@@ -42,10 +57,25 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | |||
42 | 57 | ||
43 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); | 58 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
44 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | 59 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); |
45 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 60 | void kvm_inject_undef32(struct kvm_vcpu *vcpu); |
61 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
62 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
46 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); | 63 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
47 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 64 | |
48 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 65 | static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
66 | { | ||
67 | kvm_inject_undef32(vcpu); | ||
68 | } | ||
69 | |||
70 | static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
71 | { | ||
72 | kvm_inject_dabt32(vcpu, addr); | ||
73 | } | ||
74 | |||
75 | static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
76 | { | ||
77 | kvm_inject_pabt32(vcpu, addr); | ||
78 | } | ||
49 | 79 | ||
50 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | 80 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) |
51 | { | 81 | { |
@@ -203,7 +233,7 @@ static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) | |||
203 | 233 | ||
204 | static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) | 234 | static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) |
205 | { | 235 | { |
206 | switch (kvm_vcpu_trap_get_fault_type(vcpu)) { | 236 | switch (kvm_vcpu_trap_get_fault(vcpu)) { |
207 | case FSC_SEA: | 237 | case FSC_SEA: |
208 | case FSC_SEA_TTW0: | 238 | case FSC_SEA_TTW0: |
209 | case FSC_SEA_TTW1: | 239 | case FSC_SEA_TTW1: |
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 14b5903f0224..ab20ffa8b9e7 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h | |||
@@ -98,8 +98,8 @@ | |||
98 | #define cntvoff_el2 CNTVOFF | 98 | #define cntvoff_el2 CNTVOFF |
99 | #define cnthctl_el2 CNTHCTL | 99 | #define cnthctl_el2 CNTHCTL |
100 | 100 | ||
101 | void __timer_save_state(struct kvm_vcpu *vcpu); | 101 | void __timer_enable_traps(struct kvm_vcpu *vcpu); |
102 | void __timer_restore_state(struct kvm_vcpu *vcpu); | 102 | void __timer_disable_traps(struct kvm_vcpu *vcpu); |
103 | 103 | ||
104 | void __vgic_v2_save_state(struct kvm_vcpu *vcpu); | 104 | void __vgic_v2_save_state(struct kvm_vcpu *vcpu); |
105 | void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); | 105 | void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); |
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 5db2d4c6a55f..b56895593c84 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h | |||
@@ -151,6 +151,12 @@ struct kvm_arch_memory_slot { | |||
151 | (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) | 151 | (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) |
152 | #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) | 152 | #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) |
153 | 153 | ||
154 | /* PL1 Physical Timer Registers */ | ||
155 | #define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) | ||
156 | #define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) | ||
157 | #define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) | ||
158 | |||
159 | /* Virtual Timer Registers */ | ||
154 | #define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) | 160 | #define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) |
155 | #define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) | 161 | #define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) |
156 | #define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) | 162 | #define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) |
@@ -215,6 +221,7 @@ struct kvm_arch_memory_slot { | |||
215 | #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 | 221 | #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 |
216 | #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 | 222 | #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 |
217 | #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 | 223 | #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 |
224 | #define KVM_DEV_ARM_ITS_CTRL_RESET 4 | ||
218 | 225 | ||
219 | /* KVM_IRQ_LINE irq field index values */ | 226 | /* KVM_IRQ_LINE irq field index values */ |
220 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 227 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index 0064b86a2c87..cdff963f133a 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c | |||
@@ -165,145 +165,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) | |||
165 | * Inject exceptions into the guest | 165 | * Inject exceptions into the guest |
166 | */ | 166 | */ |
167 | 167 | ||
168 | static u32 exc_vector_base(struct kvm_vcpu *vcpu) | ||
169 | { | ||
170 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
171 | u32 vbar = vcpu_cp15(vcpu, c12_VBAR); | ||
172 | |||
173 | if (sctlr & SCTLR_V) | ||
174 | return 0xffff0000; | ||
175 | else /* always have security exceptions */ | ||
176 | return vbar; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * Switch to an exception mode, updating both CPSR and SPSR. Follow | ||
181 | * the logic described in AArch32.EnterMode() from the ARMv8 ARM. | ||
182 | */ | ||
183 | static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) | ||
184 | { | ||
185 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
186 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
187 | |||
188 | *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; | ||
189 | |||
190 | switch (mode) { | ||
191 | case FIQ_MODE: | ||
192 | *vcpu_cpsr(vcpu) |= PSR_F_BIT; | ||
193 | /* Fall through */ | ||
194 | case ABT_MODE: | ||
195 | case IRQ_MODE: | ||
196 | *vcpu_cpsr(vcpu) |= PSR_A_BIT; | ||
197 | /* Fall through */ | ||
198 | default: | ||
199 | *vcpu_cpsr(vcpu) |= PSR_I_BIT; | ||
200 | } | ||
201 | |||
202 | *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); | ||
203 | |||
204 | if (sctlr & SCTLR_TE) | ||
205 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; | ||
206 | if (sctlr & SCTLR_EE) | ||
207 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; | ||
208 | |||
209 | /* Note: These now point to the mode banked copies */ | ||
210 | *vcpu_spsr(vcpu) = cpsr; | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * kvm_inject_undefined - inject an undefined exception into the guest | ||
215 | * @vcpu: The VCPU to receive the undefined exception | ||
216 | * | ||
217 | * It is assumed that this code is called from the VCPU thread and that the | ||
218 | * VCPU therefore is not currently executing guest code. | ||
219 | * | ||
220 | * Modelled after TakeUndefInstrException() pseudocode. | ||
221 | */ | ||
222 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | ||
223 | { | ||
224 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
225 | bool is_thumb = (cpsr & PSR_T_BIT); | ||
226 | u32 vect_offset = 4; | ||
227 | u32 return_offset = (is_thumb) ? 2 : 4; | ||
228 | |||
229 | kvm_update_psr(vcpu, UND_MODE); | ||
230 | *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset; | ||
231 | |||
232 | /* Branch to exception vector */ | ||
233 | *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
238 | * pseudocode. | ||
239 | */ | ||
240 | static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) | ||
241 | { | ||
242 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
243 | bool is_thumb = (cpsr & PSR_T_BIT); | ||
244 | u32 vect_offset; | ||
245 | u32 return_offset = (is_thumb) ? 4 : 0; | ||
246 | bool is_lpae; | ||
247 | |||
248 | kvm_update_psr(vcpu, ABT_MODE); | ||
249 | *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
250 | |||
251 | if (is_pabt) | ||
252 | vect_offset = 12; | ||
253 | else | ||
254 | vect_offset = 16; | ||
255 | |||
256 | /* Branch to exception vector */ | ||
257 | *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; | ||
258 | |||
259 | if (is_pabt) { | ||
260 | /* Set IFAR and IFSR */ | ||
261 | vcpu_cp15(vcpu, c6_IFAR) = addr; | ||
262 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
263 | /* Always give debug fault for now - should give guest a clue */ | ||
264 | if (is_lpae) | ||
265 | vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22; | ||
266 | else | ||
267 | vcpu_cp15(vcpu, c5_IFSR) = 2; | ||
268 | } else { /* !iabt */ | ||
269 | /* Set DFAR and DFSR */ | ||
270 | vcpu_cp15(vcpu, c6_DFAR) = addr; | ||
271 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
272 | /* Always give debug fault for now - should give guest a clue */ | ||
273 | if (is_lpae) | ||
274 | vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22; | ||
275 | else | ||
276 | vcpu_cp15(vcpu, c5_DFSR) = 2; | ||
277 | } | ||
278 | |||
279 | } | ||
280 | |||
281 | /** | ||
282 | * kvm_inject_dabt - inject a data abort into the guest | ||
283 | * @vcpu: The VCPU to receive the undefined exception | ||
284 | * @addr: The address to report in the DFAR | ||
285 | * | ||
286 | * It is assumed that this code is called from the VCPU thread and that the | ||
287 | * VCPU therefore is not currently executing guest code. | ||
288 | */ | ||
289 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
290 | { | ||
291 | inject_abt(vcpu, false, addr); | ||
292 | } | ||
293 | |||
294 | /** | ||
295 | * kvm_inject_pabt - inject a prefetch abort into the guest | ||
296 | * @vcpu: The VCPU to receive the undefined exception | ||
297 | * @addr: The address to report in the DFAR | ||
298 | * | ||
299 | * It is assumed that this code is called from the VCPU thread and that the | ||
300 | * VCPU therefore is not currently executing guest code. | ||
301 | */ | ||
302 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
303 | { | ||
304 | inject_abt(vcpu, true, addr); | ||
305 | } | ||
306 | |||
307 | /** | 168 | /** |
308 | * kvm_inject_vabt - inject an async abort / SError into the guest | 169 | * kvm_inject_vabt - inject an async abort / SError into the guest |
309 | * @vcpu: The VCPU to receive the exception | 170 | * @vcpu: The VCPU to receive the exception |
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index ebd2dd46adf7..330c9ce34ba5 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c | |||
@@ -174,7 +174,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) | |||
174 | __activate_vm(vcpu); | 174 | __activate_vm(vcpu); |
175 | 175 | ||
176 | __vgic_restore_state(vcpu); | 176 | __vgic_restore_state(vcpu); |
177 | __timer_restore_state(vcpu); | 177 | __timer_enable_traps(vcpu); |
178 | 178 | ||
179 | __sysreg_restore_state(guest_ctxt); | 179 | __sysreg_restore_state(guest_ctxt); |
180 | __banked_restore_state(guest_ctxt); | 180 | __banked_restore_state(guest_ctxt); |
@@ -191,7 +191,8 @@ again: | |||
191 | 191 | ||
192 | __banked_save_state(guest_ctxt); | 192 | __banked_save_state(guest_ctxt); |
193 | __sysreg_save_state(guest_ctxt); | 193 | __sysreg_save_state(guest_ctxt); |
194 | __timer_save_state(vcpu); | 194 | __timer_disable_traps(vcpu); |
195 | |||
195 | __vgic_save_state(vcpu); | 196 | __vgic_save_state(vcpu); |
196 | 197 | ||
197 | __deactivate_traps(vcpu); | 198 | __deactivate_traps(vcpu); |
@@ -237,7 +238,7 @@ void __hyp_text __noreturn __hyp_panic(int cause) | |||
237 | 238 | ||
238 | vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); | 239 | vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); |
239 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | 240 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); |
240 | __timer_save_state(vcpu); | 241 | __timer_disable_traps(vcpu); |
241 | __deactivate_traps(vcpu); | 242 | __deactivate_traps(vcpu); |
242 | __deactivate_vm(vcpu); | 243 | __deactivate_vm(vcpu); |
243 | __banked_restore_state(host_ctxt); | 244 | __banked_restore_state(host_ctxt); |
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index a652ce0a5cb2..04275de614db 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h | |||
@@ -52,6 +52,7 @@ struct arch_timer_erratum_workaround { | |||
52 | const char *desc; | 52 | const char *desc; |
53 | u32 (*read_cntp_tval_el0)(void); | 53 | u32 (*read_cntp_tval_el0)(void); |
54 | u32 (*read_cntv_tval_el0)(void); | 54 | u32 (*read_cntv_tval_el0)(void); |
55 | u64 (*read_cntpct_el0)(void); | ||
55 | u64 (*read_cntvct_el0)(void); | 56 | u64 (*read_cntvct_el0)(void); |
56 | int (*set_next_event_phys)(unsigned long, struct clock_event_device *); | 57 | int (*set_next_event_phys)(unsigned long, struct clock_event_device *); |
57 | int (*set_next_event_virt)(unsigned long, struct clock_event_device *); | 58 | int (*set_next_event_virt)(unsigned long, struct clock_event_device *); |
@@ -148,11 +149,8 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) | |||
148 | 149 | ||
149 | static inline u64 arch_counter_get_cntpct(void) | 150 | static inline u64 arch_counter_get_cntpct(void) |
150 | { | 151 | { |
151 | /* | 152 | isb(); |
152 | * AArch64 kernel and user space mandate the use of CNTVCT. | 153 | return arch_timer_reg_read_stable(cntpct_el0); |
153 | */ | ||
154 | BUG(); | ||
155 | return 0; | ||
156 | } | 154 | } |
157 | 155 | ||
158 | static inline u64 arch_counter_get_cntvct(void) | 156 | static inline u64 arch_counter_get_cntvct(void) |
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 26a64d0f9ab9..ab4d0a926043 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -55,6 +55,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | |||
55 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 55 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
56 | extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); | 56 | extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); |
57 | 57 | ||
58 | extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); | ||
59 | |||
58 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | 60 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
59 | 61 | ||
60 | extern u64 __vgic_v3_get_ich_vtr_el2(void); | 62 | extern u64 __vgic_v3_get_ich_vtr_el2(void); |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index e5df3fce0008..5f28dfa14cee 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -41,6 +41,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); | |||
41 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); | 41 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
42 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 42 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
43 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 43 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
44 | void kvm_inject_undef32(struct kvm_vcpu *vcpu); | ||
45 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
46 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
44 | 47 | ||
45 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | 48 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
46 | { | 49 | { |
@@ -237,7 +240,7 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | |||
237 | 240 | ||
238 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) | 241 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) |
239 | { | 242 | { |
240 | switch (kvm_vcpu_trap_get_fault_type(vcpu)) { | 243 | switch (kvm_vcpu_trap_get_fault(vcpu)) { |
241 | case FSC_SEA: | 244 | case FSC_SEA: |
242 | case FSC_SEA_TTW0: | 245 | case FSC_SEA_TTW0: |
243 | case FSC_SEA_TTW1: | 246 | case FSC_SEA_TTW1: |
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 4572a9b560fa..08d3bb66c8b7 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h | |||
@@ -129,8 +129,8 @@ void __vgic_v3_save_state(struct kvm_vcpu *vcpu); | |||
129 | void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); | 129 | void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); |
130 | int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); | 130 | int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); |
131 | 131 | ||
132 | void __timer_save_state(struct kvm_vcpu *vcpu); | 132 | void __timer_enable_traps(struct kvm_vcpu *vcpu); |
133 | void __timer_restore_state(struct kvm_vcpu *vcpu); | 133 | void __timer_disable_traps(struct kvm_vcpu *vcpu); |
134 | 134 | ||
135 | void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); | 135 | void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); |
136 | void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); | 136 | void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); |
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h index 81a076eb37fa..9ad60bae5c8d 100644 --- a/arch/arm64/include/asm/timex.h +++ b/arch/arm64/include/asm/timex.h | |||
@@ -22,7 +22,7 @@ | |||
22 | * Use the current timer as a cycle counter since this is what we use for | 22 | * Use the current timer as a cycle counter since this is what we use for |
23 | * the delay loop. | 23 | * the delay loop. |
24 | */ | 24 | */ |
25 | #define get_cycles() arch_counter_get_cntvct() | 25 | #define get_cycles() arch_timer_read_counter() |
26 | 26 | ||
27 | #include <asm-generic/timex.h> | 27 | #include <asm-generic/timex.h> |
28 | 28 | ||
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 9f3ca24bbcc6..37ca7394549c 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot { | |||
195 | 195 | ||
196 | #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) | 196 | #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) |
197 | 197 | ||
198 | /* Physical Timer EL0 Registers */ | ||
199 | #define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1) | ||
200 | #define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2) | ||
201 | #define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1) | ||
202 | |||
203 | /* EL0 Virtual Timer Registers */ | ||
198 | #define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) | 204 | #define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) |
199 | #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) | 205 | #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) |
200 | #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) | 206 | #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) |
@@ -227,6 +233,7 @@ struct kvm_arch_memory_slot { | |||
227 | #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 | 233 | #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 |
228 | #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 | 234 | #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 |
229 | #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 | 235 | #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 |
236 | #define KVM_DEV_ARM_ITS_CTRL_RESET 4 | ||
230 | 237 | ||
231 | /* Device Control API on vcpu fd */ | 238 | /* Device Control API on vcpu fd */ |
232 | #define KVM_ARM_VCPU_PMU_V3_CTRL 0 | 239 | #define KVM_ARM_VCPU_PMU_V3_CTRL 0 |
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 945e79c641c4..4994f4bdaca5 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -298,7 +298,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) | |||
298 | __activate_vm(vcpu); | 298 | __activate_vm(vcpu); |
299 | 299 | ||
300 | __vgic_restore_state(vcpu); | 300 | __vgic_restore_state(vcpu); |
301 | __timer_restore_state(vcpu); | 301 | __timer_enable_traps(vcpu); |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * We must restore the 32-bit state before the sysregs, thanks | 304 | * We must restore the 32-bit state before the sysregs, thanks |
@@ -368,7 +368,7 @@ again: | |||
368 | 368 | ||
369 | __sysreg_save_guest_state(guest_ctxt); | 369 | __sysreg_save_guest_state(guest_ctxt); |
370 | __sysreg32_save_state(vcpu); | 370 | __sysreg32_save_state(vcpu); |
371 | __timer_save_state(vcpu); | 371 | __timer_disable_traps(vcpu); |
372 | __vgic_save_state(vcpu); | 372 | __vgic_save_state(vcpu); |
373 | 373 | ||
374 | __deactivate_traps(vcpu); | 374 | __deactivate_traps(vcpu); |
@@ -436,7 +436,7 @@ void __hyp_text __noreturn __hyp_panic(void) | |||
436 | 436 | ||
437 | vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); | 437 | vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); |
438 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | 438 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); |
439 | __timer_save_state(vcpu); | 439 | __timer_disable_traps(vcpu); |
440 | __deactivate_traps(vcpu); | 440 | __deactivate_traps(vcpu); |
441 | __deactivate_vm(vcpu); | 441 | __deactivate_vm(vcpu); |
442 | __sysreg_restore_host_state(host_ctxt); | 442 | __sysreg_restore_host_state(host_ctxt); |
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index da6a8cfa54a0..8ecbcb40e317 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -33,74 +33,6 @@ | |||
33 | #define LOWER_EL_AArch64_VECTOR 0x400 | 33 | #define LOWER_EL_AArch64_VECTOR 0x400 |
34 | #define LOWER_EL_AArch32_VECTOR 0x600 | 34 | #define LOWER_EL_AArch32_VECTOR 0x600 |
35 | 35 | ||
36 | static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | ||
37 | { | ||
38 | unsigned long cpsr; | ||
39 | unsigned long new_spsr_value = *vcpu_cpsr(vcpu); | ||
40 | bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); | ||
41 | u32 return_offset = (is_thumb) ? 4 : 0; | ||
42 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
43 | |||
44 | cpsr = mode | COMPAT_PSR_I_BIT; | ||
45 | |||
46 | if (sctlr & (1 << 30)) | ||
47 | cpsr |= COMPAT_PSR_T_BIT; | ||
48 | if (sctlr & (1 << 25)) | ||
49 | cpsr |= COMPAT_PSR_E_BIT; | ||
50 | |||
51 | *vcpu_cpsr(vcpu) = cpsr; | ||
52 | |||
53 | /* Note: These now point to the banked copies */ | ||
54 | *vcpu_spsr(vcpu) = new_spsr_value; | ||
55 | *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
56 | |||
57 | /* Branch to exception vector */ | ||
58 | if (sctlr & (1 << 13)) | ||
59 | vect_offset += 0xffff0000; | ||
60 | else /* always have security exceptions */ | ||
61 | vect_offset += vcpu_cp15(vcpu, c12_VBAR); | ||
62 | |||
63 | *vcpu_pc(vcpu) = vect_offset; | ||
64 | } | ||
65 | |||
66 | static void inject_undef32(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
73 | * pseudocode. | ||
74 | */ | ||
75 | static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, | ||
76 | unsigned long addr) | ||
77 | { | ||
78 | u32 vect_offset; | ||
79 | u32 *far, *fsr; | ||
80 | bool is_lpae; | ||
81 | |||
82 | if (is_pabt) { | ||
83 | vect_offset = 12; | ||
84 | far = &vcpu_cp15(vcpu, c6_IFAR); | ||
85 | fsr = &vcpu_cp15(vcpu, c5_IFSR); | ||
86 | } else { /* !iabt */ | ||
87 | vect_offset = 16; | ||
88 | far = &vcpu_cp15(vcpu, c6_DFAR); | ||
89 | fsr = &vcpu_cp15(vcpu, c5_DFSR); | ||
90 | } | ||
91 | |||
92 | prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); | ||
93 | |||
94 | *far = addr; | ||
95 | |||
96 | /* Give the guest an IMPLEMENTATION DEFINED exception */ | ||
97 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
98 | if (is_lpae) | ||
99 | *fsr = 1 << 9 | 0x34; | ||
100 | else | ||
101 | *fsr = 0x14; | ||
102 | } | ||
103 | |||
104 | enum exception_type { | 36 | enum exception_type { |
105 | except_type_sync = 0, | 37 | except_type_sync = 0, |
106 | except_type_irq = 0x80, | 38 | except_type_irq = 0x80, |
@@ -197,7 +129,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) | |||
197 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | 129 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) |
198 | { | 130 | { |
199 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 131 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
200 | inject_abt32(vcpu, false, addr); | 132 | kvm_inject_dabt32(vcpu, addr); |
201 | else | 133 | else |
202 | inject_abt64(vcpu, false, addr); | 134 | inject_abt64(vcpu, false, addr); |
203 | } | 135 | } |
@@ -213,7 +145,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
213 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | 145 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) |
214 | { | 146 | { |
215 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 147 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
216 | inject_abt32(vcpu, true, addr); | 148 | kvm_inject_pabt32(vcpu, addr); |
217 | else | 149 | else |
218 | inject_abt64(vcpu, true, addr); | 150 | inject_abt64(vcpu, true, addr); |
219 | } | 151 | } |
@@ -227,7 +159,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
227 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | 159 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
228 | { | 160 | { |
229 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 161 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
230 | inject_undef32(vcpu); | 162 | kvm_inject_undef32(vcpu); |
231 | else | 163 | else |
232 | inject_undef64(vcpu); | 164 | inject_undef64(vcpu); |
233 | } | 165 | } |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e070d3baf9f..bb0e41b3154e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -841,13 +841,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu, | |||
841 | struct sys_reg_params *p, | 841 | struct sys_reg_params *p, |
842 | const struct sys_reg_desc *r) | 842 | const struct sys_reg_desc *r) |
843 | { | 843 | { |
844 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
845 | u64 now = kvm_phys_timer_read(); | 844 | u64 now = kvm_phys_timer_read(); |
845 | u64 cval; | ||
846 | 846 | ||
847 | if (p->is_write) | 847 | if (p->is_write) { |
848 | ptimer->cnt_cval = p->regval + now; | 848 | kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, |
849 | else | 849 | p->regval + now); |
850 | p->regval = ptimer->cnt_cval - now; | 850 | } else { |
851 | cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); | ||
852 | p->regval = cval - now; | ||
853 | } | ||
851 | 854 | ||
852 | return true; | 855 | return true; |
853 | } | 856 | } |
@@ -856,24 +859,10 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu, | |||
856 | struct sys_reg_params *p, | 859 | struct sys_reg_params *p, |
857 | const struct sys_reg_desc *r) | 860 | const struct sys_reg_desc *r) |
858 | { | 861 | { |
859 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 862 | if (p->is_write) |
860 | 863 | kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval); | |
861 | if (p->is_write) { | 864 | else |
862 | /* ISTATUS bit is read-only */ | 865 | p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL); |
863 | ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT; | ||
864 | } else { | ||
865 | u64 now = kvm_phys_timer_read(); | ||
866 | |||
867 | p->regval = ptimer->cnt_ctl; | ||
868 | /* | ||
869 | * Set ISTATUS bit if it's expired. | ||
870 | * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is | ||
871 | * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit | ||
872 | * regardless of ENABLE bit for our implementation convenience. | ||
873 | */ | ||
874 | if (ptimer->cnt_cval <= now) | ||
875 | p->regval |= ARCH_TIMER_CTRL_IT_STAT; | ||
876 | } | ||
877 | 866 | ||
878 | return true; | 867 | return true; |
879 | } | 868 | } |
@@ -882,12 +871,10 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu, | |||
882 | struct sys_reg_params *p, | 871 | struct sys_reg_params *p, |
883 | const struct sys_reg_desc *r) | 872 | const struct sys_reg_desc *r) |
884 | { | 873 | { |
885 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
886 | |||
887 | if (p->is_write) | 874 | if (p->is_write) |
888 | ptimer->cnt_cval = p->regval; | 875 | kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval); |
889 | else | 876 | else |
890 | p->regval = ptimer->cnt_cval; | 877 | p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); |
891 | 878 | ||
892 | return true; | 879 | return true; |
893 | } | 880 | } |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fd4b7f684bd0..061476e92db7 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -158,6 +158,7 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, | |||
158 | * if we don't have the cp15 accessors we won't have a problem. | 158 | * if we don't have the cp15 accessors we won't have a problem. |
159 | */ | 159 | */ |
160 | u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; | 160 | u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; |
161 | EXPORT_SYMBOL_GPL(arch_timer_read_counter); | ||
161 | 162 | ||
162 | static u64 arch_counter_read(struct clocksource *cs) | 163 | static u64 arch_counter_read(struct clocksource *cs) |
163 | { | 164 | { |
@@ -217,6 +218,11 @@ static u32 notrace fsl_a008585_read_cntv_tval_el0(void) | |||
217 | return __fsl_a008585_read_reg(cntv_tval_el0); | 218 | return __fsl_a008585_read_reg(cntv_tval_el0); |
218 | } | 219 | } |
219 | 220 | ||
221 | static u64 notrace fsl_a008585_read_cntpct_el0(void) | ||
222 | { | ||
223 | return __fsl_a008585_read_reg(cntpct_el0); | ||
224 | } | ||
225 | |||
220 | static u64 notrace fsl_a008585_read_cntvct_el0(void) | 226 | static u64 notrace fsl_a008585_read_cntvct_el0(void) |
221 | { | 227 | { |
222 | return __fsl_a008585_read_reg(cntvct_el0); | 228 | return __fsl_a008585_read_reg(cntvct_el0); |
@@ -258,6 +264,11 @@ static u32 notrace hisi_161010101_read_cntv_tval_el0(void) | |||
258 | return __hisi_161010101_read_reg(cntv_tval_el0); | 264 | return __hisi_161010101_read_reg(cntv_tval_el0); |
259 | } | 265 | } |
260 | 266 | ||
267 | static u64 notrace hisi_161010101_read_cntpct_el0(void) | ||
268 | { | ||
269 | return __hisi_161010101_read_reg(cntpct_el0); | ||
270 | } | ||
271 | |||
261 | static u64 notrace hisi_161010101_read_cntvct_el0(void) | 272 | static u64 notrace hisi_161010101_read_cntvct_el0(void) |
262 | { | 273 | { |
263 | return __hisi_161010101_read_reg(cntvct_el0); | 274 | return __hisi_161010101_read_reg(cntvct_el0); |
@@ -288,6 +299,15 @@ static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { | |||
288 | #endif | 299 | #endif |
289 | 300 | ||
290 | #ifdef CONFIG_ARM64_ERRATUM_858921 | 301 | #ifdef CONFIG_ARM64_ERRATUM_858921 |
302 | static u64 notrace arm64_858921_read_cntpct_el0(void) | ||
303 | { | ||
304 | u64 old, new; | ||
305 | |||
306 | old = read_sysreg(cntpct_el0); | ||
307 | new = read_sysreg(cntpct_el0); | ||
308 | return (((old ^ new) >> 32) & 1) ? old : new; | ||
309 | } | ||
310 | |||
291 | static u64 notrace arm64_858921_read_cntvct_el0(void) | 311 | static u64 notrace arm64_858921_read_cntvct_el0(void) |
292 | { | 312 | { |
293 | u64 old, new; | 313 | u64 old, new; |
@@ -310,16 +330,19 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long | |||
310 | struct clock_event_device *clk) | 330 | struct clock_event_device *clk) |
311 | { | 331 | { |
312 | unsigned long ctrl; | 332 | unsigned long ctrl; |
313 | u64 cval = evt + arch_counter_get_cntvct(); | 333 | u64 cval; |
314 | 334 | ||
315 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); | 335 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); |
316 | ctrl |= ARCH_TIMER_CTRL_ENABLE; | 336 | ctrl |= ARCH_TIMER_CTRL_ENABLE; |
317 | ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; | 337 | ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; |
318 | 338 | ||
319 | if (access == ARCH_TIMER_PHYS_ACCESS) | 339 | if (access == ARCH_TIMER_PHYS_ACCESS) { |
340 | cval = evt + arch_counter_get_cntpct(); | ||
320 | write_sysreg(cval, cntp_cval_el0); | 341 | write_sysreg(cval, cntp_cval_el0); |
321 | else | 342 | } else { |
343 | cval = evt + arch_counter_get_cntvct(); | ||
322 | write_sysreg(cval, cntv_cval_el0); | 344 | write_sysreg(cval, cntv_cval_el0); |
345 | } | ||
323 | 346 | ||
324 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); | 347 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); |
325 | } | 348 | } |
@@ -346,6 +369,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { | |||
346 | .desc = "Freescale erratum a005858", | 369 | .desc = "Freescale erratum a005858", |
347 | .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, | 370 | .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, |
348 | .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, | 371 | .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, |
372 | .read_cntpct_el0 = fsl_a008585_read_cntpct_el0, | ||
349 | .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, | 373 | .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, |
350 | .set_next_event_phys = erratum_set_next_event_tval_phys, | 374 | .set_next_event_phys = erratum_set_next_event_tval_phys, |
351 | .set_next_event_virt = erratum_set_next_event_tval_virt, | 375 | .set_next_event_virt = erratum_set_next_event_tval_virt, |
@@ -358,6 +382,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { | |||
358 | .desc = "HiSilicon erratum 161010101", | 382 | .desc = "HiSilicon erratum 161010101", |
359 | .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, | 383 | .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, |
360 | .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, | 384 | .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, |
385 | .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, | ||
361 | .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, | 386 | .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, |
362 | .set_next_event_phys = erratum_set_next_event_tval_phys, | 387 | .set_next_event_phys = erratum_set_next_event_tval_phys, |
363 | .set_next_event_virt = erratum_set_next_event_tval_virt, | 388 | .set_next_event_virt = erratum_set_next_event_tval_virt, |
@@ -368,6 +393,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { | |||
368 | .desc = "HiSilicon erratum 161010101", | 393 | .desc = "HiSilicon erratum 161010101", |
369 | .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, | 394 | .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, |
370 | .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, | 395 | .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, |
396 | .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, | ||
371 | .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, | 397 | .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, |
372 | .set_next_event_phys = erratum_set_next_event_tval_phys, | 398 | .set_next_event_phys = erratum_set_next_event_tval_phys, |
373 | .set_next_event_virt = erratum_set_next_event_tval_virt, | 399 | .set_next_event_virt = erratum_set_next_event_tval_virt, |
@@ -378,6 +404,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { | |||
378 | .match_type = ate_match_local_cap_id, | 404 | .match_type = ate_match_local_cap_id, |
379 | .id = (void *)ARM64_WORKAROUND_858921, | 405 | .id = (void *)ARM64_WORKAROUND_858921, |
380 | .desc = "ARM erratum 858921", | 406 | .desc = "ARM erratum 858921", |
407 | .read_cntpct_el0 = arm64_858921_read_cntpct_el0, | ||
381 | .read_cntvct_el0 = arm64_858921_read_cntvct_el0, | 408 | .read_cntvct_el0 = arm64_858921_read_cntvct_el0, |
382 | }, | 409 | }, |
383 | #endif | 410 | #endif |
@@ -890,7 +917,7 @@ static void __init arch_counter_register(unsigned type) | |||
890 | 917 | ||
891 | /* Register the CP15 based counter if we have one */ | 918 | /* Register the CP15 based counter if we have one */ |
892 | if (type & ARCH_TIMER_TYPE_CP15) { | 919 | if (type & ARCH_TIMER_TYPE_CP15) { |
893 | if (IS_ENABLED(CONFIG_ARM64) || | 920 | if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || |
894 | arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) | 921 | arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) |
895 | arch_timer_read_counter = arch_counter_get_cntvct; | 922 | arch_timer_read_counter = arch_counter_get_cntvct; |
896 | else | 923 | else |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index b5df99c6f680..854334a6f225 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -1228,7 +1228,9 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare | |||
1228 | goto out_unmap_rdist; | 1228 | goto out_unmap_rdist; |
1229 | 1229 | ||
1230 | gic_populate_ppi_partitions(node); | 1230 | gic_populate_ppi_partitions(node); |
1231 | gic_of_setup_kvm_info(node); | 1231 | |
1232 | if (static_key_true(&supports_deactivate)) | ||
1233 | gic_of_setup_kvm_info(node); | ||
1232 | return 0; | 1234 | return 0; |
1233 | 1235 | ||
1234 | out_unmap_rdist: | 1236 | out_unmap_rdist: |
@@ -1517,7 +1519,9 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) | |||
1517 | goto out_fwhandle_free; | 1519 | goto out_fwhandle_free; |
1518 | 1520 | ||
1519 | acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); | 1521 | acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); |
1520 | gic_acpi_setup_kvm_info(); | 1522 | |
1523 | if (static_key_true(&supports_deactivate)) | ||
1524 | gic_acpi_setup_kvm_info(); | ||
1521 | 1525 | ||
1522 | return 0; | 1526 | return 0; |
1523 | 1527 | ||
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 651d726e8b12..cd9371b749c2 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -1367,7 +1367,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) | |||
1367 | if (ret) | 1367 | if (ret) |
1368 | return; | 1368 | return; |
1369 | 1369 | ||
1370 | gic_set_kvm_info(&gic_v2_kvm_info); | 1370 | if (static_key_true(&supports_deactivate)) |
1371 | gic_set_kvm_info(&gic_v2_kvm_info); | ||
1371 | } | 1372 | } |
1372 | 1373 | ||
1373 | int __init | 1374 | int __init |
@@ -1599,7 +1600,8 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, | |||
1599 | if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) | 1600 | if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) |
1600 | gicv2m_init(NULL, gic_data[0].domain); | 1601 | gicv2m_init(NULL, gic_data[0].domain); |
1601 | 1602 | ||
1602 | gic_acpi_setup_kvm_info(); | 1603 | if (static_key_true(&supports_deactivate)) |
1604 | gic_acpi_setup_kvm_info(); | ||
1603 | 1605 | ||
1604 | return 0; | 1606 | return 0; |
1605 | } | 1607 | } |
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index f0053f884b4a..01ee473517e2 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h | |||
@@ -31,8 +31,15 @@ struct arch_timer_context { | |||
31 | /* Timer IRQ */ | 31 | /* Timer IRQ */ |
32 | struct kvm_irq_level irq; | 32 | struct kvm_irq_level irq; |
33 | 33 | ||
34 | /* Active IRQ state caching */ | 34 | /* |
35 | bool active_cleared_last; | 35 | * We have multiple paths which can save/restore the timer state |
36 | * onto the hardware, so we need some way of keeping track of | ||
37 | * where the latest state is. | ||
38 | * | ||
39 | * loaded == true: State is loaded on the hardware registers. | ||
40 | * loaded == false: State is stored in memory. | ||
41 | */ | ||
42 | bool loaded; | ||
36 | 43 | ||
37 | /* Virtual offset */ | 44 | /* Virtual offset */ |
38 | u64 cntvoff; | 45 | u64 cntvoff; |
@@ -43,13 +50,13 @@ struct arch_timer_cpu { | |||
43 | struct arch_timer_context ptimer; | 50 | struct arch_timer_context ptimer; |
44 | 51 | ||
45 | /* Background timer used when the guest is not running */ | 52 | /* Background timer used when the guest is not running */ |
46 | struct hrtimer timer; | 53 | struct hrtimer bg_timer; |
47 | 54 | ||
48 | /* Work queued with the above timer expires */ | 55 | /* Work queued with the above timer expires */ |
49 | struct work_struct expired; | 56 | struct work_struct expired; |
50 | 57 | ||
51 | /* Background timer active */ | 58 | /* Physical timer emulation */ |
52 | bool armed; | 59 | struct hrtimer phys_timer; |
53 | 60 | ||
54 | /* Is the timer enabled */ | 61 | /* Is the timer enabled */ |
55 | bool enabled; | 62 | bool enabled; |
@@ -59,7 +66,6 @@ int kvm_timer_hyp_init(void); | |||
59 | int kvm_timer_enable(struct kvm_vcpu *vcpu); | 66 | int kvm_timer_enable(struct kvm_vcpu *vcpu); |
60 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); | 67 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); |
61 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); | 68 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); |
62 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); | ||
63 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); | 69 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); |
64 | bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); | 70 | bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); |
65 | void kvm_timer_update_run(struct kvm_vcpu *vcpu); | 71 | void kvm_timer_update_run(struct kvm_vcpu *vcpu); |
@@ -72,16 +78,22 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); | |||
72 | int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); | 78 | int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); |
73 | int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); | 79 | int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); |
74 | 80 | ||
75 | bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); | 81 | bool kvm_timer_is_pending(struct kvm_vcpu *vcpu); |
82 | |||
76 | void kvm_timer_schedule(struct kvm_vcpu *vcpu); | 83 | void kvm_timer_schedule(struct kvm_vcpu *vcpu); |
77 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu); | 84 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu); |
78 | 85 | ||
79 | u64 kvm_phys_timer_read(void); | 86 | u64 kvm_phys_timer_read(void); |
80 | 87 | ||
88 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); | ||
81 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); | 89 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); |
82 | 90 | ||
83 | void kvm_timer_init_vhe(void); | 91 | void kvm_timer_init_vhe(void); |
84 | 92 | ||
85 | #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) | 93 | #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) |
86 | #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) | 94 | #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) |
95 | |||
96 | void enable_el1_phys_timer_access(void); | ||
97 | void disable_el1_phys_timer_access(void); | ||
98 | |||
87 | #endif | 99 | #endif |
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index 79c7c357804b..8bc479fa37e6 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c | |||
@@ -25,11 +25,6 @@ | |||
25 | #include <asm/kvm_emulate.h> | 25 | #include <asm/kvm_emulate.h> |
26 | #include <asm/kvm_hyp.h> | 26 | #include <asm/kvm_hyp.h> |
27 | 27 | ||
28 | #ifndef CONFIG_ARM64 | ||
29 | #define COMPAT_PSR_T_BIT PSR_T_BIT | ||
30 | #define COMPAT_PSR_IT_MASK PSR_IT_MASK | ||
31 | #endif | ||
32 | |||
33 | /* | 28 | /* |
34 | * stolen from arch/arm/kernel/opcodes.c | 29 | * stolen from arch/arm/kernel/opcodes.c |
35 | * | 30 | * |
@@ -150,3 +145,95 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) | |||
150 | *vcpu_pc(vcpu) += 4; | 145 | *vcpu_pc(vcpu) += 4; |
151 | kvm_adjust_itstate(vcpu); | 146 | kvm_adjust_itstate(vcpu); |
152 | } | 147 | } |
148 | |||
149 | /* | ||
150 | * Table taken from ARMv8 ARM DDI0487B-B, table G1-10. | ||
151 | */ | ||
152 | static const u8 return_offsets[8][2] = { | ||
153 | [0] = { 0, 0 }, /* Reset, unused */ | ||
154 | [1] = { 4, 2 }, /* Undefined */ | ||
155 | [2] = { 0, 0 }, /* SVC, unused */ | ||
156 | [3] = { 4, 4 }, /* Prefetch abort */ | ||
157 | [4] = { 8, 8 }, /* Data abort */ | ||
158 | [5] = { 0, 0 }, /* HVC, unused */ | ||
159 | [6] = { 4, 4 }, /* IRQ, unused */ | ||
160 | [7] = { 4, 4 }, /* FIQ, unused */ | ||
161 | }; | ||
162 | |||
163 | static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | ||
164 | { | ||
165 | unsigned long cpsr; | ||
166 | unsigned long new_spsr_value = *vcpu_cpsr(vcpu); | ||
167 | bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); | ||
168 | u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; | ||
169 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
170 | |||
171 | cpsr = mode | COMPAT_PSR_I_BIT; | ||
172 | |||
173 | if (sctlr & (1 << 30)) | ||
174 | cpsr |= COMPAT_PSR_T_BIT; | ||
175 | if (sctlr & (1 << 25)) | ||
176 | cpsr |= COMPAT_PSR_E_BIT; | ||
177 | |||
178 | *vcpu_cpsr(vcpu) = cpsr; | ||
179 | |||
180 | /* Note: These now point to the banked copies */ | ||
181 | *vcpu_spsr(vcpu) = new_spsr_value; | ||
182 | *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
183 | |||
184 | /* Branch to exception vector */ | ||
185 | if (sctlr & (1 << 13)) | ||
186 | vect_offset += 0xffff0000; | ||
187 | else /* always have security exceptions */ | ||
188 | vect_offset += vcpu_cp15(vcpu, c12_VBAR); | ||
189 | |||
190 | *vcpu_pc(vcpu) = vect_offset; | ||
191 | } | ||
192 | |||
193 | void kvm_inject_undef32(struct kvm_vcpu *vcpu) | ||
194 | { | ||
195 | prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
200 | * pseudocode. | ||
201 | */ | ||
202 | static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, | ||
203 | unsigned long addr) | ||
204 | { | ||
205 | u32 vect_offset; | ||
206 | u32 *far, *fsr; | ||
207 | bool is_lpae; | ||
208 | |||
209 | if (is_pabt) { | ||
210 | vect_offset = 12; | ||
211 | far = &vcpu_cp15(vcpu, c6_IFAR); | ||
212 | fsr = &vcpu_cp15(vcpu, c5_IFSR); | ||
213 | } else { /* !iabt */ | ||
214 | vect_offset = 16; | ||
215 | far = &vcpu_cp15(vcpu, c6_DFAR); | ||
216 | fsr = &vcpu_cp15(vcpu, c5_DFSR); | ||
217 | } | ||
218 | |||
219 | prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); | ||
220 | |||
221 | *far = addr; | ||
222 | |||
223 | /* Give the guest an IMPLEMENTATION DEFINED exception */ | ||
224 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
225 | if (is_lpae) | ||
226 | *fsr = 1 << 9 | 0x34; | ||
227 | else | ||
228 | *fsr = 0x14; | ||
229 | } | ||
230 | |||
231 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) | ||
232 | { | ||
233 | inject_abt32(vcpu, false, addr); | ||
234 | } | ||
235 | |||
236 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr) | ||
237 | { | ||
238 | inject_abt32(vcpu, true, addr); | ||
239 | } | ||
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 8e89d63005c7..4db54ff08d9e 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -46,49 +46,68 @@ static const struct kvm_irq_level default_vtimer_irq = { | |||
46 | .level = 1, | 46 | .level = 1, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) | 49 | static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); |
50 | { | 50 | static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, |
51 | vcpu_vtimer(vcpu)->active_cleared_last = false; | 51 | struct arch_timer_context *timer_ctx); |
52 | } | 52 | static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); |
53 | 53 | ||
54 | u64 kvm_phys_timer_read(void) | 54 | u64 kvm_phys_timer_read(void) |
55 | { | 55 | { |
56 | return timecounter->cc->read(timecounter->cc); | 56 | return timecounter->cc->read(timecounter->cc); |
57 | } | 57 | } |
58 | 58 | ||
59 | static bool timer_is_armed(struct arch_timer_cpu *timer) | 59 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) |
60 | { | 60 | { |
61 | return timer->armed; | 61 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), |
62 | HRTIMER_MODE_ABS); | ||
62 | } | 63 | } |
63 | 64 | ||
64 | /* timer_arm: as in "arm the timer", not as in ARM the company */ | 65 | static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) |
65 | static void timer_arm(struct arch_timer_cpu *timer, u64 ns) | ||
66 | { | 66 | { |
67 | timer->armed = true; | 67 | hrtimer_cancel(hrt); |
68 | hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns), | 68 | if (work) |
69 | HRTIMER_MODE_ABS); | 69 | cancel_work_sync(work); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void timer_disarm(struct arch_timer_cpu *timer) | 72 | static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu) |
73 | { | 73 | { |
74 | if (timer_is_armed(timer)) { | 74 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
75 | hrtimer_cancel(&timer->timer); | 75 | |
76 | cancel_work_sync(&timer->expired); | 76 | /* |
77 | timer->armed = false; | 77 | * When using a userspace irqchip with the architected timers, we must |
78 | } | 78 | * prevent continuously exiting from the guest, and therefore mask the |
79 | * physical interrupt by disabling it on the host interrupt controller | ||
80 | * when the virtual level is high, such that the guest can make | ||
81 | * forward progress. Once we detect the output level being | ||
82 | * de-asserted, we unmask the interrupt again so that we exit from the | ||
83 | * guest when the timer fires. | ||
84 | */ | ||
85 | if (vtimer->irq.level) | ||
86 | disable_percpu_irq(host_vtimer_irq); | ||
87 | else | ||
88 | enable_percpu_irq(host_vtimer_irq, 0); | ||
79 | } | 89 | } |
80 | 90 | ||
81 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | 91 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) |
82 | { | 92 | { |
83 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; | 93 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; |
94 | struct arch_timer_context *vtimer; | ||
95 | |||
96 | if (!vcpu) { | ||
97 | pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n"); | ||
98 | return IRQ_NONE; | ||
99 | } | ||
100 | vtimer = vcpu_vtimer(vcpu); | ||
101 | |||
102 | if (!vtimer->irq.level) { | ||
103 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); | ||
104 | if (kvm_timer_irq_can_fire(vtimer)) | ||
105 | kvm_timer_update_irq(vcpu, true, vtimer); | ||
106 | } | ||
107 | |||
108 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | ||
109 | kvm_vtimer_update_mask_user(vcpu); | ||
84 | 110 | ||
85 | /* | ||
86 | * We disable the timer in the world switch and let it be | ||
87 | * handled by kvm_timer_sync_hwstate(). Getting a timer | ||
88 | * interrupt at this point is a sure sign of some major | ||
89 | * breakage. | ||
90 | */ | ||
91 | pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu); | ||
92 | return IRQ_HANDLED; | 111 | return IRQ_HANDLED; |
93 | } | 112 | } |
94 | 113 | ||
@@ -158,13 +177,13 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) | |||
158 | return min(min_virt, min_phys); | 177 | return min(min_virt, min_phys); |
159 | } | 178 | } |
160 | 179 | ||
161 | static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) | 180 | static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) |
162 | { | 181 | { |
163 | struct arch_timer_cpu *timer; | 182 | struct arch_timer_cpu *timer; |
164 | struct kvm_vcpu *vcpu; | 183 | struct kvm_vcpu *vcpu; |
165 | u64 ns; | 184 | u64 ns; |
166 | 185 | ||
167 | timer = container_of(hrt, struct arch_timer_cpu, timer); | 186 | timer = container_of(hrt, struct arch_timer_cpu, bg_timer); |
168 | vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); | 187 | vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); |
169 | 188 | ||
170 | /* | 189 | /* |
@@ -182,7 +201,33 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) | |||
182 | return HRTIMER_NORESTART; | 201 | return HRTIMER_NORESTART; |
183 | } | 202 | } |
184 | 203 | ||
185 | bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) | 204 | static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) |
205 | { | ||
206 | struct arch_timer_context *ptimer; | ||
207 | struct arch_timer_cpu *timer; | ||
208 | struct kvm_vcpu *vcpu; | ||
209 | u64 ns; | ||
210 | |||
211 | timer = container_of(hrt, struct arch_timer_cpu, phys_timer); | ||
212 | vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); | ||
213 | ptimer = vcpu_ptimer(vcpu); | ||
214 | |||
215 | /* | ||
216 | * Check that the timer has really expired from the guest's | ||
217 | * PoV (NTP on the host may have forced it to expire | ||
218 | * early). If not ready, schedule for a later time. | ||
219 | */ | ||
220 | ns = kvm_timer_compute_delta(ptimer); | ||
221 | if (unlikely(ns)) { | ||
222 | hrtimer_forward_now(hrt, ns_to_ktime(ns)); | ||
223 | return HRTIMER_RESTART; | ||
224 | } | ||
225 | |||
226 | kvm_timer_update_irq(vcpu, true, ptimer); | ||
227 | return HRTIMER_NORESTART; | ||
228 | } | ||
229 | |||
230 | static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) | ||
186 | { | 231 | { |
187 | u64 cval, now; | 232 | u64 cval, now; |
188 | 233 | ||
@@ -195,6 +240,25 @@ bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) | |||
195 | return cval <= now; | 240 | return cval <= now; |
196 | } | 241 | } |
197 | 242 | ||
243 | bool kvm_timer_is_pending(struct kvm_vcpu *vcpu) | ||
244 | { | ||
245 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
246 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
247 | |||
248 | if (vtimer->irq.level || ptimer->irq.level) | ||
249 | return true; | ||
250 | |||
251 | /* | ||
252 | * When this is called from withing the wait loop of kvm_vcpu_block(), | ||
253 | * the software view of the timer state is up to date (timer->loaded | ||
254 | * is false), and so we can simply check if the timer should fire now. | ||
255 | */ | ||
256 | if (!vtimer->loaded && kvm_timer_should_fire(vtimer)) | ||
257 | return true; | ||
258 | |||
259 | return kvm_timer_should_fire(ptimer); | ||
260 | } | ||
261 | |||
198 | /* | 262 | /* |
199 | * Reflect the timer output level into the kvm_run structure | 263 | * Reflect the timer output level into the kvm_run structure |
200 | */ | 264 | */ |
@@ -218,7 +282,6 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, | |||
218 | { | 282 | { |
219 | int ret; | 283 | int ret; |
220 | 284 | ||
221 | timer_ctx->active_cleared_last = false; | ||
222 | timer_ctx->irq.level = new_level; | 285 | timer_ctx->irq.level = new_level; |
223 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, | 286 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, |
224 | timer_ctx->irq.level); | 287 | timer_ctx->irq.level); |
@@ -232,9 +295,29 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, | |||
232 | } | 295 | } |
233 | } | 296 | } |
234 | 297 | ||
298 | /* Schedule the background timer for the emulated timer. */ | ||
299 | static void phys_timer_emulate(struct kvm_vcpu *vcpu) | ||
300 | { | ||
301 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
302 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
303 | |||
304 | /* | ||
305 | * If the timer can fire now we have just raised the IRQ line and we | ||
306 | * don't need to have a soft timer scheduled for the future. If the | ||
307 | * timer cannot fire at all, then we also don't need a soft timer. | ||
308 | */ | ||
309 | if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) { | ||
310 | soft_timer_cancel(&timer->phys_timer, NULL); | ||
311 | return; | ||
312 | } | ||
313 | |||
314 | soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(ptimer)); | ||
315 | } | ||
316 | |||
235 | /* | 317 | /* |
236 | * Check if there was a change in the timer state (should we raise or lower | 318 | * Check if there was a change in the timer state, so that we should either |
237 | * the line level to the GIC). | 319 | * raise or lower the line level to the GIC or schedule a background timer to |
320 | * emulate the physical timer. | ||
238 | */ | 321 | */ |
239 | static void kvm_timer_update_state(struct kvm_vcpu *vcpu) | 322 | static void kvm_timer_update_state(struct kvm_vcpu *vcpu) |
240 | { | 323 | { |
@@ -242,12 +325,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) | |||
242 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 325 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
243 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 326 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
244 | 327 | ||
245 | /* | ||
246 | * If userspace modified the timer registers via SET_ONE_REG before | ||
247 | * the vgic was initialized, we mustn't set the vtimer->irq.level value | ||
248 | * because the guest would never see the interrupt. Instead wait | ||
249 | * until we call this function from kvm_timer_flush_hwstate. | ||
250 | */ | ||
251 | if (unlikely(!timer->enabled)) | 328 | if (unlikely(!timer->enabled)) |
252 | return; | 329 | return; |
253 | 330 | ||
@@ -256,22 +333,32 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) | |||
256 | 333 | ||
257 | if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) | 334 | if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) |
258 | kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); | 335 | kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); |
336 | |||
337 | phys_timer_emulate(vcpu); | ||
259 | } | 338 | } |
260 | 339 | ||
261 | /* Schedule the background timer for the emulated timer. */ | 340 | static void vtimer_save_state(struct kvm_vcpu *vcpu) |
262 | static void kvm_timer_emulate(struct kvm_vcpu *vcpu, | ||
263 | struct arch_timer_context *timer_ctx) | ||
264 | { | 341 | { |
265 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 342 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
343 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
344 | unsigned long flags; | ||
266 | 345 | ||
267 | if (kvm_timer_should_fire(timer_ctx)) | 346 | local_irq_save(flags); |
268 | return; | ||
269 | 347 | ||
270 | if (!kvm_timer_irq_can_fire(timer_ctx)) | 348 | if (!vtimer->loaded) |
271 | return; | 349 | goto out; |
272 | 350 | ||
273 | /* The timer has not yet expired, schedule a background timer */ | 351 | if (timer->enabled) { |
274 | timer_arm(timer, kvm_timer_compute_delta(timer_ctx)); | 352 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); |
353 | vtimer->cnt_cval = read_sysreg_el0(cntv_cval); | ||
354 | } | ||
355 | |||
356 | /* Disable the virtual timer */ | ||
357 | write_sysreg_el0(0, cntv_ctl); | ||
358 | |||
359 | vtimer->loaded = false; | ||
360 | out: | ||
361 | local_irq_restore(flags); | ||
275 | } | 362 | } |
276 | 363 | ||
277 | /* | 364 | /* |
@@ -285,7 +372,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) | |||
285 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 372 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
286 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 373 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
287 | 374 | ||
288 | BUG_ON(timer_is_armed(timer)); | 375 | vtimer_save_state(vcpu); |
289 | 376 | ||
290 | /* | 377 | /* |
291 | * No need to schedule a background timer if any guest timer has | 378 | * No need to schedule a background timer if any guest timer has |
@@ -306,70 +393,97 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) | |||
306 | * The guest timers have not yet expired, schedule a background timer. | 393 | * The guest timers have not yet expired, schedule a background timer. |
307 | * Set the earliest expiration time among the guest timers. | 394 | * Set the earliest expiration time among the guest timers. |
308 | */ | 395 | */ |
309 | timer_arm(timer, kvm_timer_earliest_exp(vcpu)); | 396 | soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); |
397 | } | ||
398 | |||
399 | static void vtimer_restore_state(struct kvm_vcpu *vcpu) | ||
400 | { | ||
401 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
402 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
403 | unsigned long flags; | ||
404 | |||
405 | local_irq_save(flags); | ||
406 | |||
407 | if (vtimer->loaded) | ||
408 | goto out; | ||
409 | |||
410 | if (timer->enabled) { | ||
411 | write_sysreg_el0(vtimer->cnt_cval, cntv_cval); | ||
412 | isb(); | ||
413 | write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); | ||
414 | } | ||
415 | |||
416 | vtimer->loaded = true; | ||
417 | out: | ||
418 | local_irq_restore(flags); | ||
310 | } | 419 | } |
311 | 420 | ||
312 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu) | 421 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu) |
313 | { | 422 | { |
314 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 423 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
315 | timer_disarm(timer); | 424 | |
425 | vtimer_restore_state(vcpu); | ||
426 | |||
427 | soft_timer_cancel(&timer->bg_timer, &timer->expired); | ||
428 | } | ||
429 | |||
430 | static void set_cntvoff(u64 cntvoff) | ||
431 | { | ||
432 | u32 low = lower_32_bits(cntvoff); | ||
433 | u32 high = upper_32_bits(cntvoff); | ||
434 | |||
435 | /* | ||
436 | * Since kvm_call_hyp doesn't fully support the ARM PCS especially on | ||
437 | * 32-bit systems, but rather passes register by register shifted one | ||
438 | * place (we put the function address in r0/x0), we cannot simply pass | ||
439 | * a 64-bit value as an argument, but have to split the value in two | ||
440 | * 32-bit halves. | ||
441 | */ | ||
442 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); | ||
316 | } | 443 | } |
317 | 444 | ||
318 | static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu) | 445 | static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu) |
319 | { | 446 | { |
320 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 447 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
321 | bool phys_active; | 448 | bool phys_active; |
322 | int ret; | 449 | int ret; |
323 | 450 | ||
324 | /* | ||
325 | * If we enter the guest with the virtual input level to the VGIC | ||
326 | * asserted, then we have already told the VGIC what we need to, and | ||
327 | * we don't need to exit from the guest until the guest deactivates | ||
328 | * the already injected interrupt, so therefore we should set the | ||
329 | * hardware active state to prevent unnecessary exits from the guest. | ||
330 | * | ||
331 | * Also, if we enter the guest with the virtual timer interrupt active, | ||
332 | * then it must be active on the physical distributor, because we set | ||
333 | * the HW bit and the guest must be able to deactivate the virtual and | ||
334 | * physical interrupt at the same time. | ||
335 | * | ||
336 | * Conversely, if the virtual input level is deasserted and the virtual | ||
337 | * interrupt is not active, then always clear the hardware active state | ||
338 | * to ensure that hardware interrupts from the timer triggers a guest | ||
339 | * exit. | ||
340 | */ | ||
341 | phys_active = vtimer->irq.level || | 451 | phys_active = vtimer->irq.level || |
342 | kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); | 452 | kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); |
343 | |||
344 | /* | ||
345 | * We want to avoid hitting the (re)distributor as much as | ||
346 | * possible, as this is a potentially expensive MMIO access | ||
347 | * (not to mention locks in the irq layer), and a solution for | ||
348 | * this is to cache the "active" state in memory. | ||
349 | * | ||
350 | * Things to consider: we cannot cache an "active set" state, | ||
351 | * because the HW can change this behind our back (it becomes | ||
352 | * "clear" in the HW). We must then restrict the caching to | ||
353 | * the "clear" state. | ||
354 | * | ||
355 | * The cache is invalidated on: | ||
356 | * - vcpu put, indicating that the HW cannot be trusted to be | ||
357 | * in a sane state on the next vcpu load, | ||
358 | * - any change in the interrupt state | ||
359 | * | ||
360 | * Usage conditions: | ||
361 | * - cached value is "active clear" | ||
362 | * - value to be programmed is "active clear" | ||
363 | */ | ||
364 | if (vtimer->active_cleared_last && !phys_active) | ||
365 | return; | ||
366 | 453 | ||
367 | ret = irq_set_irqchip_state(host_vtimer_irq, | 454 | ret = irq_set_irqchip_state(host_vtimer_irq, |
368 | IRQCHIP_STATE_ACTIVE, | 455 | IRQCHIP_STATE_ACTIVE, |
369 | phys_active); | 456 | phys_active); |
370 | WARN_ON(ret); | 457 | WARN_ON(ret); |
458 | } | ||
371 | 459 | ||
372 | vtimer->active_cleared_last = !phys_active; | 460 | static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu) |
461 | { | ||
462 | kvm_vtimer_update_mask_user(vcpu); | ||
463 | } | ||
464 | |||
465 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | ||
466 | { | ||
467 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
468 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
469 | |||
470 | if (unlikely(!timer->enabled)) | ||
471 | return; | ||
472 | |||
473 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | ||
474 | kvm_timer_vcpu_load_user(vcpu); | ||
475 | else | ||
476 | kvm_timer_vcpu_load_vgic(vcpu); | ||
477 | |||
478 | set_cntvoff(vtimer->cntvoff); | ||
479 | |||
480 | vtimer_restore_state(vcpu); | ||
481 | |||
482 | if (has_vhe()) | ||
483 | disable_el1_phys_timer_access(); | ||
484 | |||
485 | /* Set the background timer for the physical timer emulation. */ | ||
486 | phys_timer_emulate(vcpu); | ||
373 | } | 487 | } |
374 | 488 | ||
375 | bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) | 489 | bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) |
@@ -389,48 +503,60 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) | |||
389 | ptimer->irq.level != plevel; | 503 | ptimer->irq.level != plevel; |
390 | } | 504 | } |
391 | 505 | ||
392 | static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu) | 506 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) |
393 | { | 507 | { |
394 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 508 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
509 | |||
510 | if (unlikely(!timer->enabled)) | ||
511 | return; | ||
512 | |||
513 | if (has_vhe()) | ||
514 | enable_el1_phys_timer_access(); | ||
515 | |||
516 | vtimer_save_state(vcpu); | ||
395 | 517 | ||
396 | /* | 518 | /* |
397 | * To prevent continuously exiting from the guest, we mask the | 519 | * Cancel the physical timer emulation, because the only case where we |
398 | * physical interrupt such that the guest can make forward progress. | 520 | * need it after a vcpu_put is in the context of a sleeping VCPU, and |
399 | * Once we detect the output level being deasserted, we unmask the | 521 | * in that case we already factor in the deadline for the physical |
400 | * interrupt again so that we exit from the guest when the timer | 522 | * timer when scheduling the bg_timer. |
401 | * fires. | 523 | * |
402 | */ | 524 | * In any case, we re-schedule the hrtimer for the physical timer when |
403 | if (vtimer->irq.level) | 525 | * coming back to the VCPU thread in kvm_timer_vcpu_load(). |
404 | disable_percpu_irq(host_vtimer_irq); | 526 | */ |
405 | else | 527 | soft_timer_cancel(&timer->phys_timer, NULL); |
406 | enable_percpu_irq(host_vtimer_irq, 0); | 528 | |
529 | /* | ||
530 | * The kernel may decide to run userspace after calling vcpu_put, so | ||
531 | * we reset cntvoff to 0 to ensure a consistent read between user | ||
532 | * accesses to the virtual counter and kernel access to the physical | ||
533 | * counter. | ||
534 | */ | ||
535 | set_cntvoff(0); | ||
407 | } | 536 | } |
408 | 537 | ||
409 | /** | 538 | static void unmask_vtimer_irq(struct kvm_vcpu *vcpu) |
410 | * kvm_timer_flush_hwstate - prepare timers before running the vcpu | ||
411 | * @vcpu: The vcpu pointer | ||
412 | * | ||
413 | * Check if the virtual timer has expired while we were running in the host, | ||
414 | * and inject an interrupt if that was the case, making sure the timer is | ||
415 | * masked or disabled on the host so that we keep executing. Also schedule a | ||
416 | * software timer for the physical timer if it is enabled. | ||
417 | */ | ||
418 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | ||
419 | { | 539 | { |
420 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 540 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
421 | 541 | ||
422 | if (unlikely(!timer->enabled)) | 542 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { |
543 | kvm_vtimer_update_mask_user(vcpu); | ||
423 | return; | 544 | return; |
545 | } | ||
424 | 546 | ||
425 | kvm_timer_update_state(vcpu); | 547 | /* |
426 | 548 | * If the guest disabled the timer without acking the interrupt, then | |
427 | /* Set the background timer for the physical timer emulation. */ | 549 | * we must make sure the physical and virtual active states are in |
428 | kvm_timer_emulate(vcpu, vcpu_ptimer(vcpu)); | 550 | * sync by deactivating the physical interrupt, because otherwise we |
429 | 551 | * wouldn't see the next timer interrupt in the host. | |
430 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | 552 | */ |
431 | kvm_timer_flush_hwstate_user(vcpu); | 553 | if (!kvm_vgic_map_is_active(vcpu, vtimer->irq.irq)) { |
432 | else | 554 | int ret; |
433 | kvm_timer_flush_hwstate_vgic(vcpu); | 555 | ret = irq_set_irqchip_state(host_vtimer_irq, |
556 | IRQCHIP_STATE_ACTIVE, | ||
557 | false); | ||
558 | WARN_ON(ret); | ||
559 | } | ||
434 | } | 560 | } |
435 | 561 | ||
436 | /** | 562 | /** |
@@ -442,19 +568,21 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | |||
442 | */ | 568 | */ |
443 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | 569 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) |
444 | { | 570 | { |
445 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 571 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
446 | |||
447 | /* | ||
448 | * This is to cancel the background timer for the physical timer | ||
449 | * emulation if it is set. | ||
450 | */ | ||
451 | timer_disarm(timer); | ||
452 | 572 | ||
453 | /* | 573 | /* |
454 | * The guest could have modified the timer registers or the timer | 574 | * If we entered the guest with the vtimer output asserted we have to |
455 | * could have expired, update the timer state. | 575 | * check if the guest has modified the timer so that we should lower |
576 | * the line at this point. | ||
456 | */ | 577 | */ |
457 | kvm_timer_update_state(vcpu); | 578 | if (vtimer->irq.level) { |
579 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); | ||
580 | vtimer->cnt_cval = read_sysreg_el0(cntv_cval); | ||
581 | if (!kvm_timer_should_fire(vtimer)) { | ||
582 | kvm_timer_update_irq(vcpu, false, vtimer); | ||
583 | unmask_vtimer_irq(vcpu); | ||
584 | } | ||
585 | } | ||
458 | } | 586 | } |
459 | 587 | ||
460 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | 588 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) |
@@ -505,8 +633,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) | |||
505 | vcpu_ptimer(vcpu)->cntvoff = 0; | 633 | vcpu_ptimer(vcpu)->cntvoff = 0; |
506 | 634 | ||
507 | INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); | 635 | INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); |
508 | hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 636 | hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
509 | timer->timer.function = kvm_timer_expire; | 637 | timer->bg_timer.function = kvm_bg_timer_expire; |
638 | |||
639 | hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
640 | timer->phys_timer.function = kvm_phys_timer_expire; | ||
510 | 641 | ||
511 | vtimer->irq.irq = default_vtimer_irq.irq; | 642 | vtimer->irq.irq = default_vtimer_irq.irq; |
512 | ptimer->irq.irq = default_ptimer_irq.irq; | 643 | ptimer->irq.irq = default_ptimer_irq.irq; |
@@ -520,10 +651,11 @@ static void kvm_timer_init_interrupt(void *info) | |||
520 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | 651 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) |
521 | { | 652 | { |
522 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 653 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
654 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
523 | 655 | ||
524 | switch (regid) { | 656 | switch (regid) { |
525 | case KVM_REG_ARM_TIMER_CTL: | 657 | case KVM_REG_ARM_TIMER_CTL: |
526 | vtimer->cnt_ctl = value; | 658 | vtimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT; |
527 | break; | 659 | break; |
528 | case KVM_REG_ARM_TIMER_CNT: | 660 | case KVM_REG_ARM_TIMER_CNT: |
529 | update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); | 661 | update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); |
@@ -531,6 +663,13 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | |||
531 | case KVM_REG_ARM_TIMER_CVAL: | 663 | case KVM_REG_ARM_TIMER_CVAL: |
532 | vtimer->cnt_cval = value; | 664 | vtimer->cnt_cval = value; |
533 | break; | 665 | break; |
666 | case KVM_REG_ARM_PTIMER_CTL: | ||
667 | ptimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT; | ||
668 | break; | ||
669 | case KVM_REG_ARM_PTIMER_CVAL: | ||
670 | ptimer->cnt_cval = value; | ||
671 | break; | ||
672 | |||
534 | default: | 673 | default: |
535 | return -1; | 674 | return -1; |
536 | } | 675 | } |
@@ -539,17 +678,38 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | |||
539 | return 0; | 678 | return 0; |
540 | } | 679 | } |
541 | 680 | ||
681 | static u64 read_timer_ctl(struct arch_timer_context *timer) | ||
682 | { | ||
683 | /* | ||
684 | * Set ISTATUS bit if it's expired. | ||
685 | * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is | ||
686 | * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit | ||
687 | * regardless of ENABLE bit for our implementation convenience. | ||
688 | */ | ||
689 | if (!kvm_timer_compute_delta(timer)) | ||
690 | return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT; | ||
691 | else | ||
692 | return timer->cnt_ctl; | ||
693 | } | ||
694 | |||
542 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | 695 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) |
543 | { | 696 | { |
697 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
544 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 698 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
545 | 699 | ||
546 | switch (regid) { | 700 | switch (regid) { |
547 | case KVM_REG_ARM_TIMER_CTL: | 701 | case KVM_REG_ARM_TIMER_CTL: |
548 | return vtimer->cnt_ctl; | 702 | return read_timer_ctl(vtimer); |
549 | case KVM_REG_ARM_TIMER_CNT: | 703 | case KVM_REG_ARM_TIMER_CNT: |
550 | return kvm_phys_timer_read() - vtimer->cntvoff; | 704 | return kvm_phys_timer_read() - vtimer->cntvoff; |
551 | case KVM_REG_ARM_TIMER_CVAL: | 705 | case KVM_REG_ARM_TIMER_CVAL: |
552 | return vtimer->cnt_cval; | 706 | return vtimer->cnt_cval; |
707 | case KVM_REG_ARM_PTIMER_CTL: | ||
708 | return read_timer_ctl(ptimer); | ||
709 | case KVM_REG_ARM_PTIMER_CVAL: | ||
710 | return ptimer->cnt_cval; | ||
711 | case KVM_REG_ARM_PTIMER_CNT: | ||
712 | return kvm_phys_timer_read(); | ||
553 | } | 713 | } |
554 | return (u64)-1; | 714 | return (u64)-1; |
555 | } | 715 | } |
@@ -602,11 +762,20 @@ int kvm_timer_hyp_init(void) | |||
602 | return err; | 762 | return err; |
603 | } | 763 | } |
604 | 764 | ||
765 | err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus()); | ||
766 | if (err) { | ||
767 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); | ||
768 | goto out_free_irq; | ||
769 | } | ||
770 | |||
605 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); | 771 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); |
606 | 772 | ||
607 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, | 773 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, |
608 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, | 774 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, |
609 | kvm_timer_dying_cpu); | 775 | kvm_timer_dying_cpu); |
776 | return 0; | ||
777 | out_free_irq: | ||
778 | free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); | ||
610 | return err; | 779 | return err; |
611 | } | 780 | } |
612 | 781 | ||
@@ -615,7 +784,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) | |||
615 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 784 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
616 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 785 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
617 | 786 | ||
618 | timer_disarm(timer); | 787 | soft_timer_cancel(&timer->bg_timer, &timer->expired); |
788 | soft_timer_cancel(&timer->phys_timer, NULL); | ||
619 | kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq); | 789 | kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq); |
620 | } | 790 | } |
621 | 791 | ||
@@ -691,7 +861,11 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) | |||
691 | return ret; | 861 | return ret; |
692 | 862 | ||
693 | no_vgic: | 863 | no_vgic: |
864 | preempt_disable(); | ||
694 | timer->enabled = 1; | 865 | timer->enabled = 1; |
866 | kvm_timer_vcpu_load_vgic(vcpu); | ||
867 | preempt_enable(); | ||
868 | |||
695 | return 0; | 869 | return 0; |
696 | } | 870 | } |
697 | 871 | ||
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index b9f68e4add71..bc126fb99a3d 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -307,8 +307,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
307 | 307 | ||
308 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 308 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
309 | { | 309 | { |
310 | return kvm_timer_should_fire(vcpu_vtimer(vcpu)) || | 310 | return kvm_timer_is_pending(vcpu); |
311 | kvm_timer_should_fire(vcpu_ptimer(vcpu)); | ||
312 | } | 311 | } |
313 | 312 | ||
314 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) | 313 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
@@ -354,18 +353,18 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
354 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); | 353 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); |
355 | 354 | ||
356 | kvm_arm_set_running_vcpu(vcpu); | 355 | kvm_arm_set_running_vcpu(vcpu); |
357 | |||
358 | kvm_vgic_load(vcpu); | 356 | kvm_vgic_load(vcpu); |
357 | kvm_timer_vcpu_load(vcpu); | ||
359 | } | 358 | } |
360 | 359 | ||
361 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 360 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
362 | { | 361 | { |
362 | kvm_timer_vcpu_put(vcpu); | ||
363 | kvm_vgic_put(vcpu); | 363 | kvm_vgic_put(vcpu); |
364 | 364 | ||
365 | vcpu->cpu = -1; | 365 | vcpu->cpu = -1; |
366 | 366 | ||
367 | kvm_arm_set_running_vcpu(NULL); | 367 | kvm_arm_set_running_vcpu(NULL); |
368 | kvm_timer_vcpu_put(vcpu); | ||
369 | } | 368 | } |
370 | 369 | ||
371 | static void vcpu_power_off(struct kvm_vcpu *vcpu) | 370 | static void vcpu_power_off(struct kvm_vcpu *vcpu) |
@@ -654,11 +653,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
654 | 653 | ||
655 | kvm_pmu_flush_hwstate(vcpu); | 654 | kvm_pmu_flush_hwstate(vcpu); |
656 | 655 | ||
657 | kvm_timer_flush_hwstate(vcpu); | ||
658 | kvm_vgic_flush_hwstate(vcpu); | ||
659 | |||
660 | local_irq_disable(); | 656 | local_irq_disable(); |
661 | 657 | ||
658 | kvm_vgic_flush_hwstate(vcpu); | ||
659 | |||
662 | /* | 660 | /* |
663 | * If we have a singal pending, or need to notify a userspace | 661 | * If we have a singal pending, or need to notify a userspace |
664 | * irqchip about timer or PMU level changes, then we exit (and | 662 | * irqchip about timer or PMU level changes, then we exit (and |
@@ -683,10 +681,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
683 | if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || | 681 | if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || |
684 | kvm_request_pending(vcpu)) { | 682 | kvm_request_pending(vcpu)) { |
685 | vcpu->mode = OUTSIDE_GUEST_MODE; | 683 | vcpu->mode = OUTSIDE_GUEST_MODE; |
686 | local_irq_enable(); | ||
687 | kvm_pmu_sync_hwstate(vcpu); | 684 | kvm_pmu_sync_hwstate(vcpu); |
688 | kvm_timer_sync_hwstate(vcpu); | 685 | kvm_timer_sync_hwstate(vcpu); |
689 | kvm_vgic_sync_hwstate(vcpu); | 686 | kvm_vgic_sync_hwstate(vcpu); |
687 | local_irq_enable(); | ||
690 | preempt_enable(); | 688 | preempt_enable(); |
691 | continue; | 689 | continue; |
692 | } | 690 | } |
@@ -710,6 +708,27 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
710 | kvm_arm_clear_debug(vcpu); | 708 | kvm_arm_clear_debug(vcpu); |
711 | 709 | ||
712 | /* | 710 | /* |
711 | * We must sync the PMU state before the vgic state so | ||
712 | * that the vgic can properly sample the updated state of the | ||
713 | * interrupt line. | ||
714 | */ | ||
715 | kvm_pmu_sync_hwstate(vcpu); | ||
716 | |||
717 | /* | ||
718 | * Sync the vgic state before syncing the timer state because | ||
719 | * the timer code needs to know if the virtual timer | ||
720 | * interrupts are active. | ||
721 | */ | ||
722 | kvm_vgic_sync_hwstate(vcpu); | ||
723 | |||
724 | /* | ||
725 | * Sync the timer hardware state before enabling interrupts as | ||
726 | * we don't want vtimer interrupts to race with syncing the | ||
727 | * timer virtual interrupt state. | ||
728 | */ | ||
729 | kvm_timer_sync_hwstate(vcpu); | ||
730 | |||
731 | /* | ||
713 | * We may have taken a host interrupt in HYP mode (ie | 732 | * We may have taken a host interrupt in HYP mode (ie |
714 | * while executing the guest). This interrupt is still | 733 | * while executing the guest). This interrupt is still |
715 | * pending, as we haven't serviced it yet! | 734 | * pending, as we haven't serviced it yet! |
@@ -732,16 +751,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
732 | guest_exit(); | 751 | guest_exit(); |
733 | trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); | 752 | trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); |
734 | 753 | ||
735 | /* | ||
736 | * We must sync the PMU and timer state before the vgic state so | ||
737 | * that the vgic can properly sample the updated state of the | ||
738 | * interrupt line. | ||
739 | */ | ||
740 | kvm_pmu_sync_hwstate(vcpu); | ||
741 | kvm_timer_sync_hwstate(vcpu); | ||
742 | |||
743 | kvm_vgic_sync_hwstate(vcpu); | ||
744 | |||
745 | preempt_enable(); | 754 | preempt_enable(); |
746 | 755 | ||
747 | ret = handle_exit(vcpu, run, ret); | 756 | ret = handle_exit(vcpu, run, ret); |
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c index 4734915ab71f..f39861639f08 100644 --- a/virt/kvm/arm/hyp/timer-sr.c +++ b/virt/kvm/arm/hyp/timer-sr.c | |||
@@ -21,58 +21,48 @@ | |||
21 | 21 | ||
22 | #include <asm/kvm_hyp.h> | 22 | #include <asm/kvm_hyp.h> |
23 | 23 | ||
24 | /* vcpu is already in the HYP VA space */ | 24 | void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high) |
25 | void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) | 25 | { |
26 | u64 cntvoff = (u64)cntvoff_high << 32 | cntvoff_low; | ||
27 | write_sysreg(cntvoff, cntvoff_el2); | ||
28 | } | ||
29 | |||
30 | void __hyp_text enable_el1_phys_timer_access(void) | ||
26 | { | 31 | { |
27 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
28 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
29 | u64 val; | 32 | u64 val; |
30 | 33 | ||
31 | if (timer->enabled) { | 34 | /* Allow physical timer/counter access for the host */ |
32 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); | 35 | val = read_sysreg(cnthctl_el2); |
33 | vtimer->cnt_cval = read_sysreg_el0(cntv_cval); | 36 | val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; |
34 | } | 37 | write_sysreg(val, cnthctl_el2); |
38 | } | ||
35 | 39 | ||
36 | /* Disable the virtual timer */ | 40 | void __hyp_text disable_el1_phys_timer_access(void) |
37 | write_sysreg_el0(0, cntv_ctl); | 41 | { |
42 | u64 val; | ||
38 | 43 | ||
39 | /* | 44 | /* |
45 | * Disallow physical timer access for the guest | ||
46 | * Physical counter access is allowed | ||
47 | */ | ||
48 | val = read_sysreg(cnthctl_el2); | ||
49 | val &= ~CNTHCTL_EL1PCEN; | ||
50 | val |= CNTHCTL_EL1PCTEN; | ||
51 | write_sysreg(val, cnthctl_el2); | ||
52 | } | ||
53 | |||
54 | void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) | ||
55 | { | ||
56 | /* | ||
40 | * We don't need to do this for VHE since the host kernel runs in EL2 | 57 | * We don't need to do this for VHE since the host kernel runs in EL2 |
41 | * with HCR_EL2.TGE ==1, which makes those bits have no impact. | 58 | * with HCR_EL2.TGE ==1, which makes those bits have no impact. |
42 | */ | 59 | */ |
43 | if (!has_vhe()) { | 60 | if (!has_vhe()) |
44 | /* Allow physical timer/counter access for the host */ | 61 | enable_el1_phys_timer_access(); |
45 | val = read_sysreg(cnthctl_el2); | ||
46 | val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; | ||
47 | write_sysreg(val, cnthctl_el2); | ||
48 | } | ||
49 | |||
50 | /* Clear cntvoff for the host */ | ||
51 | write_sysreg(0, cntvoff_el2); | ||
52 | } | 62 | } |
53 | 63 | ||
54 | void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) | 64 | void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) |
55 | { | 65 | { |
56 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 66 | if (!has_vhe()) |
57 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 67 | disable_el1_phys_timer_access(); |
58 | u64 val; | ||
59 | |||
60 | /* Those bits are already configured at boot on VHE-system */ | ||
61 | if (!has_vhe()) { | ||
62 | /* | ||
63 | * Disallow physical timer access for the guest | ||
64 | * Physical counter access is allowed | ||
65 | */ | ||
66 | val = read_sysreg(cnthctl_el2); | ||
67 | val &= ~CNTHCTL_EL1PCEN; | ||
68 | val |= CNTHCTL_EL1PCTEN; | ||
69 | write_sysreg(val, cnthctl_el2); | ||
70 | } | ||
71 | |||
72 | if (timer->enabled) { | ||
73 | write_sysreg(vtimer->cntvoff, cntvoff_el2); | ||
74 | write_sysreg_el0(vtimer->cnt_cval, cntv_cval); | ||
75 | isb(); | ||
76 | write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); | ||
77 | } | ||
78 | } | 68 | } |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index f51c1e1b3f70..40791c121710 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
@@ -278,6 +278,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
278 | u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); | 278 | u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); |
279 | u8 prop; | 279 | u8 prop; |
280 | int ret; | 280 | int ret; |
281 | unsigned long flags; | ||
281 | 282 | ||
282 | ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, | 283 | ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, |
283 | &prop, 1); | 284 | &prop, 1); |
@@ -285,15 +286,15 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
285 | if (ret) | 286 | if (ret) |
286 | return ret; | 287 | return ret; |
287 | 288 | ||
288 | spin_lock(&irq->irq_lock); | 289 | spin_lock_irqsave(&irq->irq_lock, flags); |
289 | 290 | ||
290 | if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { | 291 | if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { |
291 | irq->priority = LPI_PROP_PRIORITY(prop); | 292 | irq->priority = LPI_PROP_PRIORITY(prop); |
292 | irq->enabled = LPI_PROP_ENABLE_BIT(prop); | 293 | irq->enabled = LPI_PROP_ENABLE_BIT(prop); |
293 | 294 | ||
294 | vgic_queue_irq_unlock(kvm, irq); | 295 | vgic_queue_irq_unlock(kvm, irq, flags); |
295 | } else { | 296 | } else { |
296 | spin_unlock(&irq->irq_lock); | 297 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
297 | } | 298 | } |
298 | 299 | ||
299 | return 0; | 300 | return 0; |
@@ -393,6 +394,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
393 | int ret = 0; | 394 | int ret = 0; |
394 | u32 *intids; | 395 | u32 *intids; |
395 | int nr_irqs, i; | 396 | int nr_irqs, i; |
397 | unsigned long flags; | ||
396 | 398 | ||
397 | nr_irqs = vgic_copy_lpi_list(vcpu, &intids); | 399 | nr_irqs = vgic_copy_lpi_list(vcpu, &intids); |
398 | if (nr_irqs < 0) | 400 | if (nr_irqs < 0) |
@@ -420,9 +422,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
420 | } | 422 | } |
421 | 423 | ||
422 | irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); | 424 | irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); |
423 | spin_lock(&irq->irq_lock); | 425 | spin_lock_irqsave(&irq->irq_lock, flags); |
424 | irq->pending_latch = pendmask & (1U << bit_nr); | 426 | irq->pending_latch = pendmask & (1U << bit_nr); |
425 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 427 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
426 | vgic_put_irq(vcpu->kvm, irq); | 428 | vgic_put_irq(vcpu->kvm, irq); |
427 | } | 429 | } |
428 | 430 | ||
@@ -515,6 +517,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, | |||
515 | { | 517 | { |
516 | struct kvm_vcpu *vcpu; | 518 | struct kvm_vcpu *vcpu; |
517 | struct its_ite *ite; | 519 | struct its_ite *ite; |
520 | unsigned long flags; | ||
518 | 521 | ||
519 | if (!its->enabled) | 522 | if (!its->enabled) |
520 | return -EBUSY; | 523 | return -EBUSY; |
@@ -530,9 +533,9 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, | |||
530 | if (!vcpu->arch.vgic_cpu.lpis_enabled) | 533 | if (!vcpu->arch.vgic_cpu.lpis_enabled) |
531 | return -EBUSY; | 534 | return -EBUSY; |
532 | 535 | ||
533 | spin_lock(&ite->irq->irq_lock); | 536 | spin_lock_irqsave(&ite->irq->irq_lock, flags); |
534 | ite->irq->pending_latch = true; | 537 | ite->irq->pending_latch = true; |
535 | vgic_queue_irq_unlock(kvm, ite->irq); | 538 | vgic_queue_irq_unlock(kvm, ite->irq, flags); |
536 | 539 | ||
537 | return 0; | 540 | return 0; |
538 | } | 541 | } |
@@ -894,7 +897,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, | |||
894 | } | 897 | } |
895 | 898 | ||
896 | /* Requires the its_lock to be held. */ | 899 | /* Requires the its_lock to be held. */ |
897 | static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device) | 900 | static void vgic_its_free_device(struct kvm *kvm, struct its_device *device) |
898 | { | 901 | { |
899 | struct its_ite *ite, *temp; | 902 | struct its_ite *ite, *temp; |
900 | 903 | ||
@@ -910,6 +913,24 @@ static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device) | |||
910 | kfree(device); | 913 | kfree(device); |
911 | } | 914 | } |
912 | 915 | ||
916 | /* its lock must be held */ | ||
917 | static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its) | ||
918 | { | ||
919 | struct its_device *cur, *temp; | ||
920 | |||
921 | list_for_each_entry_safe(cur, temp, &its->device_list, dev_list) | ||
922 | vgic_its_free_device(kvm, cur); | ||
923 | } | ||
924 | |||
925 | /* its lock must be held */ | ||
926 | static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its) | ||
927 | { | ||
928 | struct its_collection *cur, *temp; | ||
929 | |||
930 | list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list) | ||
931 | vgic_its_free_collection(its, cur->collection_id); | ||
932 | } | ||
933 | |||
913 | /* Must be called with its_lock mutex held */ | 934 | /* Must be called with its_lock mutex held */ |
914 | static struct its_device *vgic_its_alloc_device(struct vgic_its *its, | 935 | static struct its_device *vgic_its_alloc_device(struct vgic_its *its, |
915 | u32 device_id, gpa_t itt_addr, | 936 | u32 device_id, gpa_t itt_addr, |
@@ -957,7 +978,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its, | |||
957 | * by removing the mapping and re-establishing it. | 978 | * by removing the mapping and re-establishing it. |
958 | */ | 979 | */ |
959 | if (device) | 980 | if (device) |
960 | vgic_its_unmap_device(kvm, device); | 981 | vgic_its_free_device(kvm, device); |
961 | 982 | ||
962 | /* | 983 | /* |
963 | * The spec does not say whether unmapping a not-mapped device | 984 | * The spec does not say whether unmapping a not-mapped device |
@@ -1410,7 +1431,7 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, | |||
1410 | unsigned long val) | 1431 | unsigned long val) |
1411 | { | 1432 | { |
1412 | const struct vgic_its_abi *abi = vgic_its_get_abi(its); | 1433 | const struct vgic_its_abi *abi = vgic_its_get_abi(its); |
1413 | u64 entry_size, device_type; | 1434 | u64 entry_size, table_type; |
1414 | u64 reg, *regptr, clearbits = 0; | 1435 | u64 reg, *regptr, clearbits = 0; |
1415 | 1436 | ||
1416 | /* When GITS_CTLR.Enable is 1, we ignore write accesses. */ | 1437 | /* When GITS_CTLR.Enable is 1, we ignore write accesses. */ |
@@ -1421,12 +1442,12 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, | |||
1421 | case 0: | 1442 | case 0: |
1422 | regptr = &its->baser_device_table; | 1443 | regptr = &its->baser_device_table; |
1423 | entry_size = abi->dte_esz; | 1444 | entry_size = abi->dte_esz; |
1424 | device_type = GITS_BASER_TYPE_DEVICE; | 1445 | table_type = GITS_BASER_TYPE_DEVICE; |
1425 | break; | 1446 | break; |
1426 | case 1: | 1447 | case 1: |
1427 | regptr = &its->baser_coll_table; | 1448 | regptr = &its->baser_coll_table; |
1428 | entry_size = abi->cte_esz; | 1449 | entry_size = abi->cte_esz; |
1429 | device_type = GITS_BASER_TYPE_COLLECTION; | 1450 | table_type = GITS_BASER_TYPE_COLLECTION; |
1430 | clearbits = GITS_BASER_INDIRECT; | 1451 | clearbits = GITS_BASER_INDIRECT; |
1431 | break; | 1452 | break; |
1432 | default: | 1453 | default: |
@@ -1438,10 +1459,24 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, | |||
1438 | reg &= ~clearbits; | 1459 | reg &= ~clearbits; |
1439 | 1460 | ||
1440 | reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; | 1461 | reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; |
1441 | reg |= device_type << GITS_BASER_TYPE_SHIFT; | 1462 | reg |= table_type << GITS_BASER_TYPE_SHIFT; |
1442 | reg = vgic_sanitise_its_baser(reg); | 1463 | reg = vgic_sanitise_its_baser(reg); |
1443 | 1464 | ||
1444 | *regptr = reg; | 1465 | *regptr = reg; |
1466 | |||
1467 | if (!(reg & GITS_BASER_VALID)) { | ||
1468 | /* Take the its_lock to prevent a race with a save/restore */ | ||
1469 | mutex_lock(&its->its_lock); | ||
1470 | switch (table_type) { | ||
1471 | case GITS_BASER_TYPE_DEVICE: | ||
1472 | vgic_its_free_device_list(kvm, its); | ||
1473 | break; | ||
1474 | case GITS_BASER_TYPE_COLLECTION: | ||
1475 | vgic_its_free_collection_list(kvm, its); | ||
1476 | break; | ||
1477 | } | ||
1478 | mutex_unlock(&its->its_lock); | ||
1479 | } | ||
1445 | } | 1480 | } |
1446 | 1481 | ||
1447 | static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, | 1482 | static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, |
@@ -1612,46 +1647,17 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) | |||
1612 | return vgic_its_set_abi(its, NR_ITS_ABIS - 1); | 1647 | return vgic_its_set_abi(its, NR_ITS_ABIS - 1); |
1613 | } | 1648 | } |
1614 | 1649 | ||
1615 | static void vgic_its_free_device(struct kvm *kvm, struct its_device *dev) | ||
1616 | { | ||
1617 | struct its_ite *ite, *tmp; | ||
1618 | |||
1619 | list_for_each_entry_safe(ite, tmp, &dev->itt_head, ite_list) | ||
1620 | its_free_ite(kvm, ite); | ||
1621 | list_del(&dev->dev_list); | ||
1622 | kfree(dev); | ||
1623 | } | ||
1624 | |||
1625 | static void vgic_its_destroy(struct kvm_device *kvm_dev) | 1650 | static void vgic_its_destroy(struct kvm_device *kvm_dev) |
1626 | { | 1651 | { |
1627 | struct kvm *kvm = kvm_dev->kvm; | 1652 | struct kvm *kvm = kvm_dev->kvm; |
1628 | struct vgic_its *its = kvm_dev->private; | 1653 | struct vgic_its *its = kvm_dev->private; |
1629 | struct list_head *cur, *temp; | ||
1630 | |||
1631 | /* | ||
1632 | * We may end up here without the lists ever having been initialized. | ||
1633 | * Check this and bail out early to avoid dereferencing a NULL pointer. | ||
1634 | */ | ||
1635 | if (!its->device_list.next) | ||
1636 | return; | ||
1637 | 1654 | ||
1638 | mutex_lock(&its->its_lock); | 1655 | mutex_lock(&its->its_lock); |
1639 | list_for_each_safe(cur, temp, &its->device_list) { | ||
1640 | struct its_device *dev; | ||
1641 | 1656 | ||
1642 | dev = list_entry(cur, struct its_device, dev_list); | 1657 | vgic_its_free_device_list(kvm, its); |
1643 | vgic_its_free_device(kvm, dev); | 1658 | vgic_its_free_collection_list(kvm, its); |
1644 | } | ||
1645 | 1659 | ||
1646 | list_for_each_safe(cur, temp, &its->collection_list) { | ||
1647 | struct its_collection *coll; | ||
1648 | |||
1649 | coll = list_entry(cur, struct its_collection, coll_list); | ||
1650 | list_del(cur); | ||
1651 | kfree(coll); | ||
1652 | } | ||
1653 | mutex_unlock(&its->its_lock); | 1660 | mutex_unlock(&its->its_lock); |
1654 | |||
1655 | kfree(its); | 1661 | kfree(its); |
1656 | } | 1662 | } |
1657 | 1663 | ||
@@ -2267,29 +2273,13 @@ static int vgic_its_restore_collection_table(struct vgic_its *its) | |||
2267 | */ | 2273 | */ |
2268 | static int vgic_its_save_tables_v0(struct vgic_its *its) | 2274 | static int vgic_its_save_tables_v0(struct vgic_its *its) |
2269 | { | 2275 | { |
2270 | struct kvm *kvm = its->dev->kvm; | ||
2271 | int ret; | 2276 | int ret; |
2272 | 2277 | ||
2273 | mutex_lock(&kvm->lock); | ||
2274 | mutex_lock(&its->its_lock); | ||
2275 | |||
2276 | if (!lock_all_vcpus(kvm)) { | ||
2277 | mutex_unlock(&its->its_lock); | ||
2278 | mutex_unlock(&kvm->lock); | ||
2279 | return -EBUSY; | ||
2280 | } | ||
2281 | |||
2282 | ret = vgic_its_save_device_tables(its); | 2278 | ret = vgic_its_save_device_tables(its); |
2283 | if (ret) | 2279 | if (ret) |
2284 | goto out; | 2280 | return ret; |
2285 | |||
2286 | ret = vgic_its_save_collection_table(its); | ||
2287 | 2281 | ||
2288 | out: | 2282 | return vgic_its_save_collection_table(its); |
2289 | unlock_all_vcpus(kvm); | ||
2290 | mutex_unlock(&its->its_lock); | ||
2291 | mutex_unlock(&kvm->lock); | ||
2292 | return ret; | ||
2293 | } | 2283 | } |
2294 | 2284 | ||
2295 | /** | 2285 | /** |
@@ -2299,29 +2289,13 @@ out: | |||
2299 | */ | 2289 | */ |
2300 | static int vgic_its_restore_tables_v0(struct vgic_its *its) | 2290 | static int vgic_its_restore_tables_v0(struct vgic_its *its) |
2301 | { | 2291 | { |
2302 | struct kvm *kvm = its->dev->kvm; | ||
2303 | int ret; | 2292 | int ret; |
2304 | 2293 | ||
2305 | mutex_lock(&kvm->lock); | ||
2306 | mutex_lock(&its->its_lock); | ||
2307 | |||
2308 | if (!lock_all_vcpus(kvm)) { | ||
2309 | mutex_unlock(&its->its_lock); | ||
2310 | mutex_unlock(&kvm->lock); | ||
2311 | return -EBUSY; | ||
2312 | } | ||
2313 | |||
2314 | ret = vgic_its_restore_collection_table(its); | 2294 | ret = vgic_its_restore_collection_table(its); |
2315 | if (ret) | 2295 | if (ret) |
2316 | goto out; | 2296 | return ret; |
2317 | |||
2318 | ret = vgic_its_restore_device_tables(its); | ||
2319 | out: | ||
2320 | unlock_all_vcpus(kvm); | ||
2321 | mutex_unlock(&its->its_lock); | ||
2322 | mutex_unlock(&kvm->lock); | ||
2323 | 2297 | ||
2324 | return ret; | 2298 | return vgic_its_restore_device_tables(its); |
2325 | } | 2299 | } |
2326 | 2300 | ||
2327 | static int vgic_its_commit_v0(struct vgic_its *its) | 2301 | static int vgic_its_commit_v0(struct vgic_its *its) |
@@ -2340,6 +2314,19 @@ static int vgic_its_commit_v0(struct vgic_its *its) | |||
2340 | return 0; | 2314 | return 0; |
2341 | } | 2315 | } |
2342 | 2316 | ||
2317 | static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its) | ||
2318 | { | ||
2319 | /* We need to keep the ABI specific field values */ | ||
2320 | its->baser_coll_table &= ~GITS_BASER_VALID; | ||
2321 | its->baser_device_table &= ~GITS_BASER_VALID; | ||
2322 | its->cbaser = 0; | ||
2323 | its->creadr = 0; | ||
2324 | its->cwriter = 0; | ||
2325 | its->enabled = 0; | ||
2326 | vgic_its_free_device_list(kvm, its); | ||
2327 | vgic_its_free_collection_list(kvm, its); | ||
2328 | } | ||
2329 | |||
2343 | static int vgic_its_has_attr(struct kvm_device *dev, | 2330 | static int vgic_its_has_attr(struct kvm_device *dev, |
2344 | struct kvm_device_attr *attr) | 2331 | struct kvm_device_attr *attr) |
2345 | { | 2332 | { |
@@ -2354,6 +2341,8 @@ static int vgic_its_has_attr(struct kvm_device *dev, | |||
2354 | switch (attr->attr) { | 2341 | switch (attr->attr) { |
2355 | case KVM_DEV_ARM_VGIC_CTRL_INIT: | 2342 | case KVM_DEV_ARM_VGIC_CTRL_INIT: |
2356 | return 0; | 2343 | return 0; |
2344 | case KVM_DEV_ARM_ITS_CTRL_RESET: | ||
2345 | return 0; | ||
2357 | case KVM_DEV_ARM_ITS_SAVE_TABLES: | 2346 | case KVM_DEV_ARM_ITS_SAVE_TABLES: |
2358 | return 0; | 2347 | return 0; |
2359 | case KVM_DEV_ARM_ITS_RESTORE_TABLES: | 2348 | case KVM_DEV_ARM_ITS_RESTORE_TABLES: |
@@ -2366,6 +2355,41 @@ static int vgic_its_has_attr(struct kvm_device *dev, | |||
2366 | return -ENXIO; | 2355 | return -ENXIO; |
2367 | } | 2356 | } |
2368 | 2357 | ||
2358 | static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) | ||
2359 | { | ||
2360 | const struct vgic_its_abi *abi = vgic_its_get_abi(its); | ||
2361 | int ret = 0; | ||
2362 | |||
2363 | if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */ | ||
2364 | return 0; | ||
2365 | |||
2366 | mutex_lock(&kvm->lock); | ||
2367 | mutex_lock(&its->its_lock); | ||
2368 | |||
2369 | if (!lock_all_vcpus(kvm)) { | ||
2370 | mutex_unlock(&its->its_lock); | ||
2371 | mutex_unlock(&kvm->lock); | ||
2372 | return -EBUSY; | ||
2373 | } | ||
2374 | |||
2375 | switch (attr) { | ||
2376 | case KVM_DEV_ARM_ITS_CTRL_RESET: | ||
2377 | vgic_its_reset(kvm, its); | ||
2378 | break; | ||
2379 | case KVM_DEV_ARM_ITS_SAVE_TABLES: | ||
2380 | ret = abi->save_tables(its); | ||
2381 | break; | ||
2382 | case KVM_DEV_ARM_ITS_RESTORE_TABLES: | ||
2383 | ret = abi->restore_tables(its); | ||
2384 | break; | ||
2385 | } | ||
2386 | |||
2387 | unlock_all_vcpus(kvm); | ||
2388 | mutex_unlock(&its->its_lock); | ||
2389 | mutex_unlock(&kvm->lock); | ||
2390 | return ret; | ||
2391 | } | ||
2392 | |||
2369 | static int vgic_its_set_attr(struct kvm_device *dev, | 2393 | static int vgic_its_set_attr(struct kvm_device *dev, |
2370 | struct kvm_device_attr *attr) | 2394 | struct kvm_device_attr *attr) |
2371 | { | 2395 | { |
@@ -2391,19 +2415,8 @@ static int vgic_its_set_attr(struct kvm_device *dev, | |||
2391 | 2415 | ||
2392 | return vgic_register_its_iodev(dev->kvm, its, addr); | 2416 | return vgic_register_its_iodev(dev->kvm, its, addr); |
2393 | } | 2417 | } |
2394 | case KVM_DEV_ARM_VGIC_GRP_CTRL: { | 2418 | case KVM_DEV_ARM_VGIC_GRP_CTRL: |
2395 | const struct vgic_its_abi *abi = vgic_its_get_abi(its); | 2419 | return vgic_its_ctrl(dev->kvm, its, attr->attr); |
2396 | |||
2397 | switch (attr->attr) { | ||
2398 | case KVM_DEV_ARM_VGIC_CTRL_INIT: | ||
2399 | /* Nothing to do */ | ||
2400 | return 0; | ||
2401 | case KVM_DEV_ARM_ITS_SAVE_TABLES: | ||
2402 | return abi->save_tables(its); | ||
2403 | case KVM_DEV_ARM_ITS_RESTORE_TABLES: | ||
2404 | return abi->restore_tables(its); | ||
2405 | } | ||
2406 | } | ||
2407 | case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: { | 2420 | case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: { |
2408 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | 2421 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; |
2409 | u64 reg; | 2422 | u64 reg; |
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index b3d4a10f09a1..e21e2f49b005 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c | |||
@@ -74,6 +74,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, | |||
74 | int mode = (val >> 24) & 0x03; | 74 | int mode = (val >> 24) & 0x03; |
75 | int c; | 75 | int c; |
76 | struct kvm_vcpu *vcpu; | 76 | struct kvm_vcpu *vcpu; |
77 | unsigned long flags; | ||
77 | 78 | ||
78 | switch (mode) { | 79 | switch (mode) { |
79 | case 0x0: /* as specified by targets */ | 80 | case 0x0: /* as specified by targets */ |
@@ -97,11 +98,11 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, | |||
97 | 98 | ||
98 | irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); | 99 | irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); |
99 | 100 | ||
100 | spin_lock(&irq->irq_lock); | 101 | spin_lock_irqsave(&irq->irq_lock, flags); |
101 | irq->pending_latch = true; | 102 | irq->pending_latch = true; |
102 | irq->source |= 1U << source_vcpu->vcpu_id; | 103 | irq->source |= 1U << source_vcpu->vcpu_id; |
103 | 104 | ||
104 | vgic_queue_irq_unlock(source_vcpu->kvm, irq); | 105 | vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags); |
105 | vgic_put_irq(source_vcpu->kvm, irq); | 106 | vgic_put_irq(source_vcpu->kvm, irq); |
106 | } | 107 | } |
107 | } | 108 | } |
@@ -131,6 +132,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, | |||
131 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); | 132 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); |
132 | u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0); | 133 | u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0); |
133 | int i; | 134 | int i; |
135 | unsigned long flags; | ||
134 | 136 | ||
135 | /* GICD_ITARGETSR[0-7] are read-only */ | 137 | /* GICD_ITARGETSR[0-7] are read-only */ |
136 | if (intid < VGIC_NR_PRIVATE_IRQS) | 138 | if (intid < VGIC_NR_PRIVATE_IRQS) |
@@ -140,13 +142,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, | |||
140 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); | 142 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); |
141 | int target; | 143 | int target; |
142 | 144 | ||
143 | spin_lock(&irq->irq_lock); | 145 | spin_lock_irqsave(&irq->irq_lock, flags); |
144 | 146 | ||
145 | irq->targets = (val >> (i * 8)) & cpu_mask; | 147 | irq->targets = (val >> (i * 8)) & cpu_mask; |
146 | target = irq->targets ? __ffs(irq->targets) : 0; | 148 | target = irq->targets ? __ffs(irq->targets) : 0; |
147 | irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); | 149 | irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); |
148 | 150 | ||
149 | spin_unlock(&irq->irq_lock); | 151 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
150 | vgic_put_irq(vcpu->kvm, irq); | 152 | vgic_put_irq(vcpu->kvm, irq); |
151 | } | 153 | } |
152 | } | 154 | } |
@@ -174,17 +176,18 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, | |||
174 | { | 176 | { |
175 | u32 intid = addr & 0x0f; | 177 | u32 intid = addr & 0x0f; |
176 | int i; | 178 | int i; |
179 | unsigned long flags; | ||
177 | 180 | ||
178 | for (i = 0; i < len; i++) { | 181 | for (i = 0; i < len; i++) { |
179 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 182 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
180 | 183 | ||
181 | spin_lock(&irq->irq_lock); | 184 | spin_lock_irqsave(&irq->irq_lock, flags); |
182 | 185 | ||
183 | irq->source &= ~((val >> (i * 8)) & 0xff); | 186 | irq->source &= ~((val >> (i * 8)) & 0xff); |
184 | if (!irq->source) | 187 | if (!irq->source) |
185 | irq->pending_latch = false; | 188 | irq->pending_latch = false; |
186 | 189 | ||
187 | spin_unlock(&irq->irq_lock); | 190 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
188 | vgic_put_irq(vcpu->kvm, irq); | 191 | vgic_put_irq(vcpu->kvm, irq); |
189 | } | 192 | } |
190 | } | 193 | } |
@@ -195,19 +198,20 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, | |||
195 | { | 198 | { |
196 | u32 intid = addr & 0x0f; | 199 | u32 intid = addr & 0x0f; |
197 | int i; | 200 | int i; |
201 | unsigned long flags; | ||
198 | 202 | ||
199 | for (i = 0; i < len; i++) { | 203 | for (i = 0; i < len; i++) { |
200 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 204 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
201 | 205 | ||
202 | spin_lock(&irq->irq_lock); | 206 | spin_lock_irqsave(&irq->irq_lock, flags); |
203 | 207 | ||
204 | irq->source |= (val >> (i * 8)) & 0xff; | 208 | irq->source |= (val >> (i * 8)) & 0xff; |
205 | 209 | ||
206 | if (irq->source) { | 210 | if (irq->source) { |
207 | irq->pending_latch = true; | 211 | irq->pending_latch = true; |
208 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 212 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
209 | } else { | 213 | } else { |
210 | spin_unlock(&irq->irq_lock); | 214 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
211 | } | 215 | } |
212 | vgic_put_irq(vcpu->kvm, irq); | 216 | vgic_put_irq(vcpu->kvm, irq); |
213 | } | 217 | } |
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 408ef06638fc..83786108829e 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c | |||
@@ -129,6 +129,7 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, | |||
129 | { | 129 | { |
130 | int intid = VGIC_ADDR_TO_INTID(addr, 64); | 130 | int intid = VGIC_ADDR_TO_INTID(addr, 64); |
131 | struct vgic_irq *irq; | 131 | struct vgic_irq *irq; |
132 | unsigned long flags; | ||
132 | 133 | ||
133 | /* The upper word is WI for us since we don't implement Aff3. */ | 134 | /* The upper word is WI for us since we don't implement Aff3. */ |
134 | if (addr & 4) | 135 | if (addr & 4) |
@@ -139,13 +140,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, | |||
139 | if (!irq) | 140 | if (!irq) |
140 | return; | 141 | return; |
141 | 142 | ||
142 | spin_lock(&irq->irq_lock); | 143 | spin_lock_irqsave(&irq->irq_lock, flags); |
143 | 144 | ||
144 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ | 145 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ |
145 | irq->mpidr = val & GENMASK(23, 0); | 146 | irq->mpidr = val & GENMASK(23, 0); |
146 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); | 147 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); |
147 | 148 | ||
148 | spin_unlock(&irq->irq_lock); | 149 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
149 | vgic_put_irq(vcpu->kvm, irq); | 150 | vgic_put_irq(vcpu->kvm, irq); |
150 | } | 151 | } |
151 | 152 | ||
@@ -241,11 +242,12 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, | |||
241 | { | 242 | { |
242 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | 243 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
243 | int i; | 244 | int i; |
245 | unsigned long flags; | ||
244 | 246 | ||
245 | for (i = 0; i < len * 8; i++) { | 247 | for (i = 0; i < len * 8; i++) { |
246 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 248 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
247 | 249 | ||
248 | spin_lock(&irq->irq_lock); | 250 | spin_lock_irqsave(&irq->irq_lock, flags); |
249 | if (test_bit(i, &val)) { | 251 | if (test_bit(i, &val)) { |
250 | /* | 252 | /* |
251 | * pending_latch is set irrespective of irq type | 253 | * pending_latch is set irrespective of irq type |
@@ -253,10 +255,10 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, | |||
253 | * restore irq config before pending info. | 255 | * restore irq config before pending info. |
254 | */ | 256 | */ |
255 | irq->pending_latch = true; | 257 | irq->pending_latch = true; |
256 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 258 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
257 | } else { | 259 | } else { |
258 | irq->pending_latch = false; | 260 | irq->pending_latch = false; |
259 | spin_unlock(&irq->irq_lock); | 261 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
260 | } | 262 | } |
261 | 263 | ||
262 | vgic_put_irq(vcpu->kvm, irq); | 264 | vgic_put_irq(vcpu->kvm, irq); |
@@ -799,6 +801,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) | |||
799 | int sgi, c; | 801 | int sgi, c; |
800 | int vcpu_id = vcpu->vcpu_id; | 802 | int vcpu_id = vcpu->vcpu_id; |
801 | bool broadcast; | 803 | bool broadcast; |
804 | unsigned long flags; | ||
802 | 805 | ||
803 | sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; | 806 | sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; |
804 | broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); | 807 | broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); |
@@ -837,10 +840,10 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) | |||
837 | 840 | ||
838 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); | 841 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); |
839 | 842 | ||
840 | spin_lock(&irq->irq_lock); | 843 | spin_lock_irqsave(&irq->irq_lock, flags); |
841 | irq->pending_latch = true; | 844 | irq->pending_latch = true; |
842 | 845 | ||
843 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 846 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
844 | vgic_put_irq(vcpu->kvm, irq); | 847 | vgic_put_irq(vcpu->kvm, irq); |
845 | } | 848 | } |
846 | } | 849 | } |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index c1e4bdd66131..deb51ee16a3d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -69,13 +69,14 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, | |||
69 | { | 69 | { |
70 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | 70 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
71 | int i; | 71 | int i; |
72 | unsigned long flags; | ||
72 | 73 | ||
73 | for_each_set_bit(i, &val, len * 8) { | 74 | for_each_set_bit(i, &val, len * 8) { |
74 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 75 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
75 | 76 | ||
76 | spin_lock(&irq->irq_lock); | 77 | spin_lock_irqsave(&irq->irq_lock, flags); |
77 | irq->enabled = true; | 78 | irq->enabled = true; |
78 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 79 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
79 | 80 | ||
80 | vgic_put_irq(vcpu->kvm, irq); | 81 | vgic_put_irq(vcpu->kvm, irq); |
81 | } | 82 | } |
@@ -87,15 +88,16 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, | |||
87 | { | 88 | { |
88 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | 89 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
89 | int i; | 90 | int i; |
91 | unsigned long flags; | ||
90 | 92 | ||
91 | for_each_set_bit(i, &val, len * 8) { | 93 | for_each_set_bit(i, &val, len * 8) { |
92 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 94 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
93 | 95 | ||
94 | spin_lock(&irq->irq_lock); | 96 | spin_lock_irqsave(&irq->irq_lock, flags); |
95 | 97 | ||
96 | irq->enabled = false; | 98 | irq->enabled = false; |
97 | 99 | ||
98 | spin_unlock(&irq->irq_lock); | 100 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
99 | vgic_put_irq(vcpu->kvm, irq); | 101 | vgic_put_irq(vcpu->kvm, irq); |
100 | } | 102 | } |
101 | } | 103 | } |
@@ -126,14 +128,15 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, | |||
126 | { | 128 | { |
127 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | 129 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
128 | int i; | 130 | int i; |
131 | unsigned long flags; | ||
129 | 132 | ||
130 | for_each_set_bit(i, &val, len * 8) { | 133 | for_each_set_bit(i, &val, len * 8) { |
131 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 134 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
132 | 135 | ||
133 | spin_lock(&irq->irq_lock); | 136 | spin_lock_irqsave(&irq->irq_lock, flags); |
134 | irq->pending_latch = true; | 137 | irq->pending_latch = true; |
135 | 138 | ||
136 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 139 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
137 | vgic_put_irq(vcpu->kvm, irq); | 140 | vgic_put_irq(vcpu->kvm, irq); |
138 | } | 141 | } |
139 | } | 142 | } |
@@ -144,15 +147,16 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, | |||
144 | { | 147 | { |
145 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | 148 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
146 | int i; | 149 | int i; |
150 | unsigned long flags; | ||
147 | 151 | ||
148 | for_each_set_bit(i, &val, len * 8) { | 152 | for_each_set_bit(i, &val, len * 8) { |
149 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 153 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
150 | 154 | ||
151 | spin_lock(&irq->irq_lock); | 155 | spin_lock_irqsave(&irq->irq_lock, flags); |
152 | 156 | ||
153 | irq->pending_latch = false; | 157 | irq->pending_latch = false; |
154 | 158 | ||
155 | spin_unlock(&irq->irq_lock); | 159 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
156 | vgic_put_irq(vcpu->kvm, irq); | 160 | vgic_put_irq(vcpu->kvm, irq); |
157 | } | 161 | } |
158 | } | 162 | } |
@@ -181,7 +185,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
181 | bool new_active_state) | 185 | bool new_active_state) |
182 | { | 186 | { |
183 | struct kvm_vcpu *requester_vcpu; | 187 | struct kvm_vcpu *requester_vcpu; |
184 | spin_lock(&irq->irq_lock); | 188 | unsigned long flags; |
189 | spin_lock_irqsave(&irq->irq_lock, flags); | ||
185 | 190 | ||
186 | /* | 191 | /* |
187 | * The vcpu parameter here can mean multiple things depending on how | 192 | * The vcpu parameter here can mean multiple things depending on how |
@@ -216,9 +221,9 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
216 | 221 | ||
217 | irq->active = new_active_state; | 222 | irq->active = new_active_state; |
218 | if (new_active_state) | 223 | if (new_active_state) |
219 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 224 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
220 | else | 225 | else |
221 | spin_unlock(&irq->irq_lock); | 226 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
222 | } | 227 | } |
223 | 228 | ||
224 | /* | 229 | /* |
@@ -352,14 +357,15 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, | |||
352 | { | 357 | { |
353 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); | 358 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); |
354 | int i; | 359 | int i; |
360 | unsigned long flags; | ||
355 | 361 | ||
356 | for (i = 0; i < len; i++) { | 362 | for (i = 0; i < len; i++) { |
357 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 363 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
358 | 364 | ||
359 | spin_lock(&irq->irq_lock); | 365 | spin_lock_irqsave(&irq->irq_lock, flags); |
360 | /* Narrow the priority range to what we actually support */ | 366 | /* Narrow the priority range to what we actually support */ |
361 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); | 367 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); |
362 | spin_unlock(&irq->irq_lock); | 368 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
363 | 369 | ||
364 | vgic_put_irq(vcpu->kvm, irq); | 370 | vgic_put_irq(vcpu->kvm, irq); |
365 | } | 371 | } |
@@ -390,6 +396,7 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, | |||
390 | { | 396 | { |
391 | u32 intid = VGIC_ADDR_TO_INTID(addr, 2); | 397 | u32 intid = VGIC_ADDR_TO_INTID(addr, 2); |
392 | int i; | 398 | int i; |
399 | unsigned long flags; | ||
393 | 400 | ||
394 | for (i = 0; i < len * 4; i++) { | 401 | for (i = 0; i < len * 4; i++) { |
395 | struct vgic_irq *irq; | 402 | struct vgic_irq *irq; |
@@ -404,14 +411,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, | |||
404 | continue; | 411 | continue; |
405 | 412 | ||
406 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 413 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
407 | spin_lock(&irq->irq_lock); | 414 | spin_lock_irqsave(&irq->irq_lock, flags); |
408 | 415 | ||
409 | if (test_bit(i * 2 + 1, &val)) | 416 | if (test_bit(i * 2 + 1, &val)) |
410 | irq->config = VGIC_CONFIG_EDGE; | 417 | irq->config = VGIC_CONFIG_EDGE; |
411 | else | 418 | else |
412 | irq->config = VGIC_CONFIG_LEVEL; | 419 | irq->config = VGIC_CONFIG_LEVEL; |
413 | 420 | ||
414 | spin_unlock(&irq->irq_lock); | 421 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
415 | vgic_put_irq(vcpu->kvm, irq); | 422 | vgic_put_irq(vcpu->kvm, irq); |
416 | } | 423 | } |
417 | } | 424 | } |
@@ -443,6 +450,7 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, | |||
443 | { | 450 | { |
444 | int i; | 451 | int i; |
445 | int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; | 452 | int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
453 | unsigned long flags; | ||
446 | 454 | ||
447 | for (i = 0; i < 32; i++) { | 455 | for (i = 0; i < 32; i++) { |
448 | struct vgic_irq *irq; | 456 | struct vgic_irq *irq; |
@@ -459,12 +467,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, | |||
459 | * restore irq config before line level. | 467 | * restore irq config before line level. |
460 | */ | 468 | */ |
461 | new_level = !!(val & (1U << i)); | 469 | new_level = !!(val & (1U << i)); |
462 | spin_lock(&irq->irq_lock); | 470 | spin_lock_irqsave(&irq->irq_lock, flags); |
463 | irq->line_level = new_level; | 471 | irq->line_level = new_level; |
464 | if (new_level) | 472 | if (new_level) |
465 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 473 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
466 | else | 474 | else |
467 | spin_unlock(&irq->irq_lock); | 475 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
468 | 476 | ||
469 | vgic_put_irq(vcpu->kvm, irq); | 477 | vgic_put_irq(vcpu->kvm, irq); |
470 | } | 478 | } |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index e4187e52bb26..80897102da26 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -62,6 +62,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
62 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 62 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
63 | struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; | 63 | struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; |
64 | int lr; | 64 | int lr; |
65 | unsigned long flags; | ||
65 | 66 | ||
66 | cpuif->vgic_hcr &= ~GICH_HCR_UIE; | 67 | cpuif->vgic_hcr &= ~GICH_HCR_UIE; |
67 | 68 | ||
@@ -77,7 +78,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
77 | 78 | ||
78 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | 79 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
79 | 80 | ||
80 | spin_lock(&irq->irq_lock); | 81 | spin_lock_irqsave(&irq->irq_lock, flags); |
81 | 82 | ||
82 | /* Always preserve the active bit */ | 83 | /* Always preserve the active bit */ |
83 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); | 84 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); |
@@ -104,7 +105,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
104 | irq->pending_latch = false; | 105 | irq->pending_latch = false; |
105 | } | 106 | } |
106 | 107 | ||
107 | spin_unlock(&irq->irq_lock); | 108 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
108 | vgic_put_irq(vcpu->kvm, irq); | 109 | vgic_put_irq(vcpu->kvm, irq); |
109 | } | 110 | } |
110 | 111 | ||
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 96ea597db0e7..863351c090d8 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -44,6 +44,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
44 | struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; | 44 | struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; |
45 | u32 model = vcpu->kvm->arch.vgic.vgic_model; | 45 | u32 model = vcpu->kvm->arch.vgic.vgic_model; |
46 | int lr; | 46 | int lr; |
47 | unsigned long flags; | ||
47 | 48 | ||
48 | cpuif->vgic_hcr &= ~ICH_HCR_UIE; | 49 | cpuif->vgic_hcr &= ~ICH_HCR_UIE; |
49 | 50 | ||
@@ -66,7 +67,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
66 | if (!irq) /* An LPI could have been unmapped. */ | 67 | if (!irq) /* An LPI could have been unmapped. */ |
67 | continue; | 68 | continue; |
68 | 69 | ||
69 | spin_lock(&irq->irq_lock); | 70 | spin_lock_irqsave(&irq->irq_lock, flags); |
70 | 71 | ||
71 | /* Always preserve the active bit */ | 72 | /* Always preserve the active bit */ |
72 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); | 73 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); |
@@ -94,7 +95,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
94 | irq->pending_latch = false; | 95 | irq->pending_latch = false; |
95 | } | 96 | } |
96 | 97 | ||
97 | spin_unlock(&irq->irq_lock); | 98 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
98 | vgic_put_irq(vcpu->kvm, irq); | 99 | vgic_put_irq(vcpu->kvm, irq); |
99 | } | 100 | } |
100 | 101 | ||
@@ -278,6 +279,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) | |||
278 | bool status; | 279 | bool status; |
279 | u8 val; | 280 | u8 val; |
280 | int ret; | 281 | int ret; |
282 | unsigned long flags; | ||
281 | 283 | ||
282 | retry: | 284 | retry: |
283 | vcpu = irq->target_vcpu; | 285 | vcpu = irq->target_vcpu; |
@@ -296,13 +298,13 @@ retry: | |||
296 | 298 | ||
297 | status = val & (1 << bit_nr); | 299 | status = val & (1 << bit_nr); |
298 | 300 | ||
299 | spin_lock(&irq->irq_lock); | 301 | spin_lock_irqsave(&irq->irq_lock, flags); |
300 | if (irq->target_vcpu != vcpu) { | 302 | if (irq->target_vcpu != vcpu) { |
301 | spin_unlock(&irq->irq_lock); | 303 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
302 | goto retry; | 304 | goto retry; |
303 | } | 305 | } |
304 | irq->pending_latch = status; | 306 | irq->pending_latch = status; |
305 | vgic_queue_irq_unlock(vcpu->kvm, irq); | 307 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
306 | 308 | ||
307 | if (status) { | 309 | if (status) { |
308 | /* clear consumed data */ | 310 | /* clear consumed data */ |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index fed717e07938..e54ef2fdf73d 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -53,6 +53,10 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { | |||
53 | * vcpuX->vcpu_id < vcpuY->vcpu_id: | 53 | * vcpuX->vcpu_id < vcpuY->vcpu_id: |
54 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); | 54 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); |
55 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); | 55 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); |
56 | * | ||
57 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have | ||
58 | * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer | ||
59 | * spinlocks for any lock that may be taken while injecting an interrupt. | ||
56 | */ | 60 | */ |
57 | 61 | ||
58 | /* | 62 | /* |
@@ -261,7 +265,8 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne | |||
261 | * Needs to be entered with the IRQ lock already held, but will return | 265 | * Needs to be entered with the IRQ lock already held, but will return |
262 | * with all locks dropped. | 266 | * with all locks dropped. |
263 | */ | 267 | */ |
264 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) | 268 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, |
269 | unsigned long flags) | ||
265 | { | 270 | { |
266 | struct kvm_vcpu *vcpu; | 271 | struct kvm_vcpu *vcpu; |
267 | 272 | ||
@@ -279,7 +284,7 @@ retry: | |||
279 | * not need to be inserted into an ap_list and there is also | 284 | * not need to be inserted into an ap_list and there is also |
280 | * no more work for us to do. | 285 | * no more work for us to do. |
281 | */ | 286 | */ |
282 | spin_unlock(&irq->irq_lock); | 287 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
283 | 288 | ||
284 | /* | 289 | /* |
285 | * We have to kick the VCPU here, because we could be | 290 | * We have to kick the VCPU here, because we could be |
@@ -301,11 +306,11 @@ retry: | |||
301 | * We must unlock the irq lock to take the ap_list_lock where | 306 | * We must unlock the irq lock to take the ap_list_lock where |
302 | * we are going to insert this new pending interrupt. | 307 | * we are going to insert this new pending interrupt. |
303 | */ | 308 | */ |
304 | spin_unlock(&irq->irq_lock); | 309 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
305 | 310 | ||
306 | /* someone can do stuff here, which we re-check below */ | 311 | /* someone can do stuff here, which we re-check below */ |
307 | 312 | ||
308 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | 313 | spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
309 | spin_lock(&irq->irq_lock); | 314 | spin_lock(&irq->irq_lock); |
310 | 315 | ||
311 | /* | 316 | /* |
@@ -322,9 +327,9 @@ retry: | |||
322 | 327 | ||
323 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { | 328 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { |
324 | spin_unlock(&irq->irq_lock); | 329 | spin_unlock(&irq->irq_lock); |
325 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 330 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
326 | 331 | ||
327 | spin_lock(&irq->irq_lock); | 332 | spin_lock_irqsave(&irq->irq_lock, flags); |
328 | goto retry; | 333 | goto retry; |
329 | } | 334 | } |
330 | 335 | ||
@@ -337,7 +342,7 @@ retry: | |||
337 | irq->vcpu = vcpu; | 342 | irq->vcpu = vcpu; |
338 | 343 | ||
339 | spin_unlock(&irq->irq_lock); | 344 | spin_unlock(&irq->irq_lock); |
340 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 345 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
341 | 346 | ||
342 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | 347 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
343 | kvm_vcpu_kick(vcpu); | 348 | kvm_vcpu_kick(vcpu); |
@@ -367,6 +372,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |||
367 | { | 372 | { |
368 | struct kvm_vcpu *vcpu; | 373 | struct kvm_vcpu *vcpu; |
369 | struct vgic_irq *irq; | 374 | struct vgic_irq *irq; |
375 | unsigned long flags; | ||
370 | int ret; | 376 | int ret; |
371 | 377 | ||
372 | trace_vgic_update_irq_pending(cpuid, intid, level); | 378 | trace_vgic_update_irq_pending(cpuid, intid, level); |
@@ -383,11 +389,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |||
383 | if (!irq) | 389 | if (!irq) |
384 | return -EINVAL; | 390 | return -EINVAL; |
385 | 391 | ||
386 | spin_lock(&irq->irq_lock); | 392 | spin_lock_irqsave(&irq->irq_lock, flags); |
387 | 393 | ||
388 | if (!vgic_validate_injection(irq, level, owner)) { | 394 | if (!vgic_validate_injection(irq, level, owner)) { |
389 | /* Nothing to see here, move along... */ | 395 | /* Nothing to see here, move along... */ |
390 | spin_unlock(&irq->irq_lock); | 396 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
391 | vgic_put_irq(kvm, irq); | 397 | vgic_put_irq(kvm, irq); |
392 | return 0; | 398 | return 0; |
393 | } | 399 | } |
@@ -397,7 +403,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |||
397 | else | 403 | else |
398 | irq->pending_latch = true; | 404 | irq->pending_latch = true; |
399 | 405 | ||
400 | vgic_queue_irq_unlock(kvm, irq); | 406 | vgic_queue_irq_unlock(kvm, irq, flags); |
401 | vgic_put_irq(kvm, irq); | 407 | vgic_put_irq(kvm, irq); |
402 | 408 | ||
403 | return 0; | 409 | return 0; |
@@ -406,15 +412,16 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |||
406 | int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) | 412 | int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) |
407 | { | 413 | { |
408 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); | 414 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); |
415 | unsigned long flags; | ||
409 | 416 | ||
410 | BUG_ON(!irq); | 417 | BUG_ON(!irq); |
411 | 418 | ||
412 | spin_lock(&irq->irq_lock); | 419 | spin_lock_irqsave(&irq->irq_lock, flags); |
413 | 420 | ||
414 | irq->hw = true; | 421 | irq->hw = true; |
415 | irq->hwintid = phys_irq; | 422 | irq->hwintid = phys_irq; |
416 | 423 | ||
417 | spin_unlock(&irq->irq_lock); | 424 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
418 | vgic_put_irq(vcpu->kvm, irq); | 425 | vgic_put_irq(vcpu->kvm, irq); |
419 | 426 | ||
420 | return 0; | 427 | return 0; |
@@ -423,6 +430,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) | |||
423 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) | 430 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) |
424 | { | 431 | { |
425 | struct vgic_irq *irq; | 432 | struct vgic_irq *irq; |
433 | unsigned long flags; | ||
426 | 434 | ||
427 | if (!vgic_initialized(vcpu->kvm)) | 435 | if (!vgic_initialized(vcpu->kvm)) |
428 | return -EAGAIN; | 436 | return -EAGAIN; |
@@ -430,12 +438,12 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) | |||
430 | irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); | 438 | irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); |
431 | BUG_ON(!irq); | 439 | BUG_ON(!irq); |
432 | 440 | ||
433 | spin_lock(&irq->irq_lock); | 441 | spin_lock_irqsave(&irq->irq_lock, flags); |
434 | 442 | ||
435 | irq->hw = false; | 443 | irq->hw = false; |
436 | irq->hwintid = 0; | 444 | irq->hwintid = 0; |
437 | 445 | ||
438 | spin_unlock(&irq->irq_lock); | 446 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
439 | vgic_put_irq(vcpu->kvm, irq); | 447 | vgic_put_irq(vcpu->kvm, irq); |
440 | 448 | ||
441 | return 0; | 449 | return 0; |
@@ -486,9 +494,10 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) | |||
486 | { | 494 | { |
487 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 495 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
488 | struct vgic_irq *irq, *tmp; | 496 | struct vgic_irq *irq, *tmp; |
497 | unsigned long flags; | ||
489 | 498 | ||
490 | retry: | 499 | retry: |
491 | spin_lock(&vgic_cpu->ap_list_lock); | 500 | spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
492 | 501 | ||
493 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { | 502 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { |
494 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; | 503 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; |
@@ -528,7 +537,7 @@ retry: | |||
528 | /* This interrupt looks like it has to be migrated. */ | 537 | /* This interrupt looks like it has to be migrated. */ |
529 | 538 | ||
530 | spin_unlock(&irq->irq_lock); | 539 | spin_unlock(&irq->irq_lock); |
531 | spin_unlock(&vgic_cpu->ap_list_lock); | 540 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
532 | 541 | ||
533 | /* | 542 | /* |
534 | * Ensure locking order by always locking the smallest | 543 | * Ensure locking order by always locking the smallest |
@@ -542,7 +551,7 @@ retry: | |||
542 | vcpuB = vcpu; | 551 | vcpuB = vcpu; |
543 | } | 552 | } |
544 | 553 | ||
545 | spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); | 554 | spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); |
546 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, | 555 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, |
547 | SINGLE_DEPTH_NESTING); | 556 | SINGLE_DEPTH_NESTING); |
548 | spin_lock(&irq->irq_lock); | 557 | spin_lock(&irq->irq_lock); |
@@ -566,11 +575,11 @@ retry: | |||
566 | 575 | ||
567 | spin_unlock(&irq->irq_lock); | 576 | spin_unlock(&irq->irq_lock); |
568 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); | 577 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); |
569 | spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); | 578 | spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); |
570 | goto retry; | 579 | goto retry; |
571 | } | 580 | } |
572 | 581 | ||
573 | spin_unlock(&vgic_cpu->ap_list_lock); | 582 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
574 | } | 583 | } |
575 | 584 | ||
576 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) | 585 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
@@ -703,6 +712,8 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
703 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) | 712 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) |
704 | return; | 713 | return; |
705 | 714 | ||
715 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | ||
716 | |||
706 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | 717 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
707 | vgic_flush_lr_state(vcpu); | 718 | vgic_flush_lr_state(vcpu); |
708 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 719 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
@@ -735,11 +746,12 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
735 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 746 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
736 | struct vgic_irq *irq; | 747 | struct vgic_irq *irq; |
737 | bool pending = false; | 748 | bool pending = false; |
749 | unsigned long flags; | ||
738 | 750 | ||
739 | if (!vcpu->kvm->arch.vgic.enabled) | 751 | if (!vcpu->kvm->arch.vgic.enabled) |
740 | return false; | 752 | return false; |
741 | 753 | ||
742 | spin_lock(&vgic_cpu->ap_list_lock); | 754 | spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
743 | 755 | ||
744 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 756 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
745 | spin_lock(&irq->irq_lock); | 757 | spin_lock(&irq->irq_lock); |
@@ -750,7 +762,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
750 | break; | 762 | break; |
751 | } | 763 | } |
752 | 764 | ||
753 | spin_unlock(&vgic_cpu->ap_list_lock); | 765 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
754 | 766 | ||
755 | return pending; | 767 | return pending; |
756 | } | 768 | } |
@@ -776,10 +788,14 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) | |||
776 | { | 788 | { |
777 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); | 789 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); |
778 | bool map_is_active; | 790 | bool map_is_active; |
791 | unsigned long flags; | ||
779 | 792 | ||
780 | spin_lock(&irq->irq_lock); | 793 | if (!vgic_initialized(vcpu->kvm)) |
794 | return false; | ||
795 | |||
796 | spin_lock_irqsave(&irq->irq_lock, flags); | ||
781 | map_is_active = irq->hw && irq->active; | 797 | map_is_active = irq->hw && irq->active; |
782 | spin_unlock(&irq->irq_lock); | 798 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
783 | vgic_put_irq(vcpu->kvm, irq); | 799 | vgic_put_irq(vcpu->kvm, irq); |
784 | 800 | ||
785 | return map_is_active; | 801 | return map_is_active; |
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index bf9ceab67c77..4f8aecb07ae6 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h | |||
@@ -140,7 +140,8 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, | |||
140 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, | 140 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, |
141 | u32 intid); | 141 | u32 intid); |
142 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); | 142 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); |
143 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq); | 143 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, |
144 | unsigned long flags); | ||
144 | void vgic_kick_vcpus(struct kvm *kvm); | 145 | void vgic_kick_vcpus(struct kvm *kvm); |
145 | 146 | ||
146 | int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, | 147 | int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, |