aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/kvm_coproc.h3
-rw-r--r--arch/arm/kvm/coproc.c106
-rw-r--r--arch/arm/kvm/handle_exit.c4
-rw-r--r--arch/arm/kvm/hyp/Makefile2
-rw-r--r--arch/arm/kvm/hyp/switch.c4
-rw-r--r--arch/arm/kvm/trace.h8
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--include/kvm/arm_vgic.h5
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c18
-rw-r--r--virt/kvm/arm/mmu.c33
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c7
14 files changed, 150 insertions, 66 deletions
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
index 4917c2f7e459..e74ab0fbab79 100644
--- a/arch/arm/include/asm/kvm_coproc.h
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
31int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); 31int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
32int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 32int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
33int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 33int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
34int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 34int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
35int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
35int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 36int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
36int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 37int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
37 38
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 2c14b69511e9..6d1d2e26dfe5 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -32,6 +32,7 @@
32#include <asm/vfp.h> 32#include <asm/vfp.h>
33#include "../vfp/vfpinstr.h" 33#include "../vfp/vfpinstr.h"
34 34
35#define CREATE_TRACE_POINTS
35#include "trace.h" 36#include "trace.h"
36#include "coproc.h" 37#include "coproc.h"
37 38
@@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
111 return 1; 112 return 1;
112} 113}
113 114
114int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
115{
116 kvm_inject_undefined(vcpu);
117 return 1;
118}
119
120static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 115static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
121{ 116{
122 /* 117 /*
@@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
284 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 279 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
285 * all PM registers, which doesn't crash the guest kernel at least. 280 * all PM registers, which doesn't crash the guest kernel at least.
286 */ 281 */
287static bool pm_fake(struct kvm_vcpu *vcpu, 282static bool trap_raz_wi(struct kvm_vcpu *vcpu,
288 const struct coproc_params *p, 283 const struct coproc_params *p,
289 const struct coproc_reg *r) 284 const struct coproc_reg *r)
290{ 285{
@@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
294 return read_zero(vcpu, p); 289 return read_zero(vcpu, p);
295} 290}
296 291
297#define access_pmcr pm_fake 292#define access_pmcr trap_raz_wi
298#define access_pmcntenset pm_fake 293#define access_pmcntenset trap_raz_wi
299#define access_pmcntenclr pm_fake 294#define access_pmcntenclr trap_raz_wi
300#define access_pmovsr pm_fake 295#define access_pmovsr trap_raz_wi
301#define access_pmselr pm_fake 296#define access_pmselr trap_raz_wi
302#define access_pmceid0 pm_fake 297#define access_pmceid0 trap_raz_wi
303#define access_pmceid1 pm_fake 298#define access_pmceid1 trap_raz_wi
304#define access_pmccntr pm_fake 299#define access_pmccntr trap_raz_wi
305#define access_pmxevtyper pm_fake 300#define access_pmxevtyper trap_raz_wi
306#define access_pmxevcntr pm_fake 301#define access_pmxevcntr trap_raz_wi
307#define access_pmuserenr pm_fake 302#define access_pmuserenr trap_raz_wi
308#define access_pmintenset pm_fake 303#define access_pmintenset trap_raz_wi
309#define access_pmintenclr pm_fake 304#define access_pmintenclr trap_raz_wi
310 305
311/* Architected CP15 registers. 306/* Architected CP15 registers.
312 * CRn denotes the primary register number, but is copied to the CRm in the 307 * CRn denotes the primary register number, but is copied to the CRm in the
@@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
532 return 1; 527 return 1;
533} 528}
534 529
535/** 530static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
536 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
537 * @vcpu: The VCPU pointer
538 * @run: The kvm_run struct
539 */
540int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
541{ 531{
542 struct coproc_params params; 532 struct coproc_params params;
543 533
@@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
551 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; 541 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
552 params.CRm = 0; 542 params.CRm = 0;
553 543
544 return params;
545}
546
547/**
548 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
549 * @vcpu: The VCPU pointer
550 * @run: The kvm_run struct
551 */
552int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
553{
554 struct coproc_params params = decode_64bit_hsr(vcpu);
555
554 return emulate_cp15(vcpu, &params); 556 return emulate_cp15(vcpu, &params);
555} 557}
556 558
559/**
560 * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
561 * @vcpu: The VCPU pointer
562 * @run: The kvm_run struct
563 */
564int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
565{
566 struct coproc_params params = decode_64bit_hsr(vcpu);
567
568 /* raz_wi cp14 */
569 trap_raz_wi(vcpu, &params, NULL);
570
571 /* handled */
572 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
573 return 1;
574}
575
557static void reset_coproc_regs(struct kvm_vcpu *vcpu, 576static void reset_coproc_regs(struct kvm_vcpu *vcpu,
558 const struct coproc_reg *table, size_t num) 577 const struct coproc_reg *table, size_t num)
559{ 578{
@@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
564 table[i].reset(vcpu, &table[i]); 583 table[i].reset(vcpu, &table[i]);
565} 584}
566 585
567/** 586static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
568 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
569 * @vcpu: The VCPU pointer
570 * @run: The kvm_run struct
571 */
572int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
573{ 587{
574 struct coproc_params params; 588 struct coproc_params params;
575 589
@@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
583 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; 597 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
584 params.Rt2 = 0; 598 params.Rt2 = 0;
585 599
600 return params;
601}
602
603/**
604 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
605 * @vcpu: The VCPU pointer
606 * @run: The kvm_run struct
607 */
608int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
609{
610 struct coproc_params params = decode_32bit_hsr(vcpu);
586 return emulate_cp15(vcpu, &params); 611 return emulate_cp15(vcpu, &params);
587} 612}
588 613
614/**
615 * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
616 * @vcpu: The VCPU pointer
617 * @run: The kvm_run struct
618 */
619int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
620{
621 struct coproc_params params = decode_32bit_hsr(vcpu);
622
623 /* raz_wi cp14 */
624 trap_raz_wi(vcpu, &params, NULL);
625
626 /* handled */
627 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
628 return 1;
629}
630
589/****************************************************************************** 631/******************************************************************************
590 * Userspace API 632 * Userspace API
591 *****************************************************************************/ 633 *****************************************************************************/
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 5fd7968cdae9..f86a9aaef462 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -95,9 +95,9 @@ static exit_handle_fn arm_exit_handlers[] = {
95 [HSR_EC_WFI] = kvm_handle_wfx, 95 [HSR_EC_WFI] = kvm_handle_wfx,
96 [HSR_EC_CP15_32] = kvm_handle_cp15_32, 96 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
97 [HSR_EC_CP15_64] = kvm_handle_cp15_64, 97 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
98 [HSR_EC_CP14_MR] = kvm_handle_cp14_access, 98 [HSR_EC_CP14_MR] = kvm_handle_cp14_32,
99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, 99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
100 [HSR_EC_CP14_64] = kvm_handle_cp14_access, 100 [HSR_EC_CP14_64] = kvm_handle_cp14_64,
101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, 101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id, 102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
103 [HSR_EC_HVC] = handle_hvc, 103 [HSR_EC_HVC] = handle_hvc,
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 3023bb530edf..8679405b0b2b 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
2# Makefile for Kernel-based Virtual Machine module, HYP part 2# Makefile for Kernel-based Virtual Machine module, HYP part
3# 3#
4 4
5ccflags-y += -fno-stack-protector
6
5KVM=../../../../virt/kvm 7KVM=../../../../virt/kvm
6 8
7obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o 9obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 92678b7bd046..624a510d31df 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
48 write_sysreg(HSTR_T(15), HSTR); 48 write_sysreg(HSTR_T(15), HSTR);
49 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); 49 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
50 val = read_sysreg(HDCR); 50 val = read_sysreg(HDCR);
51 write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); 51 val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
52 val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
53 write_sysreg(val, HDCR);
52} 54}
53 55
54static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) 56static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index fc0943776db2..b0d10648c486 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -1,5 +1,5 @@
1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 1#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_H 2#define _TRACE_ARM_KVM_H
3 3
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5 5
@@ -74,10 +74,10 @@ TRACE_EVENT(kvm_hvc,
74 __entry->vcpu_pc, __entry->r0, __entry->imm) 74 __entry->vcpu_pc, __entry->r0, __entry->imm)
75); 75);
76 76
77#endif /* _TRACE_KVM_H */ 77#endif /* _TRACE_ARM_KVM_H */
78 78
79#undef TRACE_INCLUDE_PATH 79#undef TRACE_INCLUDE_PATH
80#define TRACE_INCLUDE_PATH arch/arm/kvm 80#define TRACE_INCLUDE_PATH .
81#undef TRACE_INCLUDE_FILE 81#undef TRACE_INCLUDE_FILE
82#define TRACE_INCLUDE_FILE trace 82#define TRACE_INCLUDE_FILE trace
83 83
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index aaf42ae8d8c3..14c4e3b14bcb 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
2# Makefile for Kernel-based Virtual Machine module, HYP part 2# Makefile for Kernel-based Virtual Machine module, HYP part
3# 3#
4 4
5ccflags-y += -fno-stack-protector
6
5KVM=../../../../virt/kvm 7KVM=../../../../virt/kvm
6 8
7obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o 9obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 97b8d3728b31..ef718586321c 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -195,7 +195,10 @@ struct vgic_dist {
195 /* either a GICv2 CPU interface */ 195 /* either a GICv2 CPU interface */
196 gpa_t vgic_cpu_base; 196 gpa_t vgic_cpu_base;
197 /* or a number of GICv3 redistributor regions */ 197 /* or a number of GICv3 redistributor regions */
198 gpa_t vgic_redist_base; 198 struct {
199 gpa_t vgic_redist_base;
200 gpa_t vgic_redist_free_offset;
201 };
199 }; 202 };
200 203
201 /* distributor enabled */ 204 /* distributor enabled */
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index bce6037cf01d..32c3295929b0 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -22,7 +22,7 @@
22#include <asm/kvm_hyp.h> 22#include <asm/kvm_hyp.h>
23 23
24#define vtr_to_max_lr_idx(v) ((v) & 0xf) 24#define vtr_to_max_lr_idx(v) ((v) & 0xf)
25#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) 25#define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1)
26 26
27static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) 27static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
28{ 28{
@@ -135,13 +135,13 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
135 135
136 if (used_lrs) { 136 if (used_lrs) {
137 int i; 137 int i;
138 u32 nr_pri_bits; 138 u32 nr_pre_bits;
139 139
140 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); 140 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
141 141
142 write_gicreg(0, ICH_HCR_EL2); 142 write_gicreg(0, ICH_HCR_EL2);
143 val = read_gicreg(ICH_VTR_EL2); 143 val = read_gicreg(ICH_VTR_EL2);
144 nr_pri_bits = vtr_to_nr_pri_bits(val); 144 nr_pre_bits = vtr_to_nr_pre_bits(val);
145 145
146 for (i = 0; i < used_lrs; i++) { 146 for (i = 0; i < used_lrs; i++) {
147 if (cpu_if->vgic_elrsr & (1 << i)) 147 if (cpu_if->vgic_elrsr & (1 << i))
@@ -152,7 +152,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
152 __gic_v3_set_lr(0, i); 152 __gic_v3_set_lr(0, i);
153 } 153 }
154 154
155 switch (nr_pri_bits) { 155 switch (nr_pre_bits) {
156 case 7: 156 case 7:
157 cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); 157 cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
158 cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); 158 cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
@@ -162,7 +162,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
162 cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); 162 cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
163 } 163 }
164 164
165 switch (nr_pri_bits) { 165 switch (nr_pre_bits) {
166 case 7: 166 case 7:
167 cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); 167 cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
168 cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); 168 cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
@@ -198,7 +198,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
198 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 198 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
199 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; 199 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
200 u64 val; 200 u64 val;
201 u32 nr_pri_bits; 201 u32 nr_pre_bits;
202 int i; 202 int i;
203 203
204 /* 204 /*
@@ -217,12 +217,12 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
217 } 217 }
218 218
219 val = read_gicreg(ICH_VTR_EL2); 219 val = read_gicreg(ICH_VTR_EL2);
220 nr_pri_bits = vtr_to_nr_pri_bits(val); 220 nr_pre_bits = vtr_to_nr_pre_bits(val);
221 221
222 if (used_lrs) { 222 if (used_lrs) {
223 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 223 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
224 224
225 switch (nr_pri_bits) { 225 switch (nr_pre_bits) {
226 case 7: 226 case 7:
227 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); 227 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
228 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); 228 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
@@ -232,7 +232,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
232 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); 232 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
233 } 233 }
234 234
235 switch (nr_pri_bits) { 235 switch (nr_pre_bits) {
236 case 7: 236 case 7:
237 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); 237 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
238 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); 238 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 313ee646480f..a2d63247d1bb 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
295 assert_spin_locked(&kvm->mmu_lock); 295 assert_spin_locked(&kvm->mmu_lock);
296 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
297 do { 297 do {
298 /*
299 * Make sure the page table is still active, as another thread
300 * could have possibly freed the page table, while we released
301 * the lock.
302 */
303 if (!READ_ONCE(kvm->arch.pgd))
304 break;
298 next = stage2_pgd_addr_end(addr, end); 305 next = stage2_pgd_addr_end(addr, end);
299 if (!stage2_pgd_none(*pgd)) 306 if (!stage2_pgd_none(*pgd))
300 unmap_stage2_puds(kvm, pgd, addr, next); 307 unmap_stage2_puds(kvm, pgd, addr, next);
@@ -829,22 +836,22 @@ void stage2_unmap_vm(struct kvm *kvm)
829 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all 836 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
830 * underlying level-2 and level-3 tables before freeing the actual level-1 table 837 * underlying level-2 and level-3 tables before freeing the actual level-1 table
831 * and setting the struct pointer to NULL. 838 * and setting the struct pointer to NULL.
832 *
833 * Note we don't need locking here as this is only called when the VM is
834 * destroyed, which can only be done once.
835 */ 839 */
836void kvm_free_stage2_pgd(struct kvm *kvm) 840void kvm_free_stage2_pgd(struct kvm *kvm)
837{ 841{
838 if (kvm->arch.pgd == NULL) 842 void *pgd = NULL;
839 return;
840 843
841 spin_lock(&kvm->mmu_lock); 844 spin_lock(&kvm->mmu_lock);
842 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 845 if (kvm->arch.pgd) {
846 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
847 pgd = READ_ONCE(kvm->arch.pgd);
848 kvm->arch.pgd = NULL;
849 }
843 spin_unlock(&kvm->mmu_lock); 850 spin_unlock(&kvm->mmu_lock);
844 851
845 /* Free the HW pgd, one page at a time */ 852 /* Free the HW pgd, one page at a time */
846 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); 853 if (pgd)
847 kvm->arch.pgd = NULL; 854 free_pages_exact(pgd, S2_PGD_SIZE);
848} 855}
849 856
850static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, 857static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1170,11 +1177,13 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1170 * large. Otherwise, we may see kernel panics with 1177 * large. Otherwise, we may see kernel panics with
1171 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, 1178 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1172 * CONFIG_LOCKDEP. Additionally, holding the lock too long 1179 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1173 * will also starve other vCPUs. 1180 * will also starve other vCPUs. We have to also make sure
1181 * that the page tables are not freed while we released
1182 * the lock.
1174 */ 1183 */
1175 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) 1184 cond_resched_lock(&kvm->mmu_lock);
1176 cond_resched_lock(&kvm->mmu_lock); 1185 if (!READ_ONCE(kvm->arch.pgd))
1177 1186 break;
1178 next = stage2_pgd_addr_end(addr, end); 1187 next = stage2_pgd_addr_end(addr, end);
1179 if (stage2_pgd_present(*pgd)) 1188 if (stage2_pgd_present(*pgd))
1180 stage2_wp_puds(pgd, addr, next); 1189 stage2_wp_puds(pgd, addr, next);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index dc68e2e424ab..3a0b8999f011 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -242,8 +242,11 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
242 * If we are creating a VCPU with a GICv3 we must also register the 242 * If we are creating a VCPU with a GICv3 we must also register the
243 * KVM io device for the redistributor that belongs to this VCPU. 243 * KVM io device for the redistributor that belongs to this VCPU.
244 */ 244 */
245 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 245 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
246 mutex_lock(&vcpu->kvm->lock);
246 ret = vgic_register_redist_iodev(vcpu); 247 ret = vgic_register_redist_iodev(vcpu);
248 mutex_unlock(&vcpu->kvm->lock);
249 }
247 return ret; 250 return ret;
248} 251}
249 252
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 99da1a207c19..201d5e2e973d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -586,7 +586,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
586 if (!vgic_v3_check_base(kvm)) 586 if (!vgic_v3_check_base(kvm))
587 return -EINVAL; 587 return -EINVAL;
588 588
589 rd_base = vgic->vgic_redist_base + kvm_vcpu_get_idx(vcpu) * SZ_64K * 2; 589 rd_base = vgic->vgic_redist_base + vgic->vgic_redist_free_offset;
590 sgi_base = rd_base + SZ_64K; 590 sgi_base = rd_base + SZ_64K;
591 591
592 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); 592 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
@@ -614,11 +614,15 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
614 mutex_lock(&kvm->slots_lock); 614 mutex_lock(&kvm->slots_lock);
615 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, 615 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
616 SZ_64K, &sgi_dev->dev); 616 SZ_64K, &sgi_dev->dev);
617 mutex_unlock(&kvm->slots_lock); 617 if (ret) {
618 if (ret)
619 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, 618 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
620 &rd_dev->dev); 619 &rd_dev->dev);
620 goto out;
621 }
621 622
623 vgic->vgic_redist_free_offset += 2 * SZ_64K;
624out:
625 mutex_unlock(&kvm->slots_lock);
622 return ret; 626 return ret;
623} 627}
624 628
@@ -644,10 +648,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
644 648
645 if (ret) { 649 if (ret) {
646 /* The current c failed, so we start with the previous one. */ 650 /* The current c failed, so we start with the previous one. */
651 mutex_lock(&kvm->slots_lock);
647 for (c--; c >= 0; c--) { 652 for (c--; c >= 0; c--) {
648 vcpu = kvm_get_vcpu(kvm, c); 653 vcpu = kvm_get_vcpu(kvm, c);
649 vgic_unregister_redist_iodev(vcpu); 654 vgic_unregister_redist_iodev(vcpu);
650 } 655 }
656 mutex_unlock(&kvm->slots_lock);
651 } 657 }
652 658
653 return ret; 659 return ret;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index a65757aab6d3..504b4bd0d651 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -149,6 +149,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
149 if (irq->hw) { 149 if (irq->hw) {
150 val |= GICH_LR_HW; 150 val |= GICH_LR_HW;
151 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; 151 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
152 /*
153 * Never set pending+active on a HW interrupt, as the
154 * pending state is kept at the physical distributor
155 * level.
156 */
157 if (irq->active && irq_is_pending(irq))
158 val &= ~GICH_LR_PENDING_BIT;
152 } else { 159 } else {
153 if (irq->config == VGIC_CONFIG_LEVEL) 160 if (irq->config == VGIC_CONFIG_LEVEL)
154 val |= GICH_LR_EOI; 161 val |= GICH_LR_EOI;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 8fa737edde6f..6fe3f003636a 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -127,6 +127,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
127 if (irq->hw) { 127 if (irq->hw) {
128 val |= ICH_LR_HW; 128 val |= ICH_LR_HW;
129 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; 129 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
130 /*
131 * Never set pending+active on a HW interrupt, as the
132 * pending state is kept at the physical distributor
133 * level.
134 */
135 if (irq->active && irq_is_pending(irq))
136 val &= ~ICH_LR_PENDING_BIT;
130 } else { 137 } else {
131 if (irq->config == VGIC_CONFIG_LEVEL) 138 if (irq->config == VGIC_CONFIG_LEVEL)
132 val |= ICH_LR_EOI; 139 val |= ICH_LR_EOI;