aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-08-22 08:07:56 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-22 08:07:56 -0400
commit631989303b06b8fdb15ec3b88aee2d25e80d4cec (patch)
tree25c00dc9392cb7b25e5c9355a6176f6f4be36924 /arch/arm64/kvm
parentad1d69735878a6bf797705b5d2a20316d35e1113 (diff)
parent976d34e2dab10ece5ea8fe7090b7692913f89084 (diff)
Merge tag 'kvmarm-for-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm updates for 4.19 - Support for Group0 interrupts in guests - Cache management optimizations for ARMv8.4 systems - Userspace interface for RAS, allowing error retrival and injection - Fault path optimization - Emulated physical timer fixes - Random cleanups
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/guest.c33
-rw-r--r--arch/arm64/kvm/hyp-init.S6
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c5
-rw-r--r--arch/arm64/kvm/inject_fault.c6
-rw-r--r--arch/arm64/kvm/reset.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c54
6 files changed, 92 insertions, 16 deletions
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index cdd4d9d6d575..07256b08226c 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -289,6 +289,39 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
289 return -EINVAL; 289 return -EINVAL;
290} 290}
291 291
292int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
293 struct kvm_vcpu_events *events)
294{
295 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
296 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
297
298 if (events->exception.serror_pending && events->exception.serror_has_esr)
299 events->exception.serror_esr = vcpu_get_vsesr(vcpu);
300
301 return 0;
302}
303
304int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
305 struct kvm_vcpu_events *events)
306{
307 bool serror_pending = events->exception.serror_pending;
308 bool has_esr = events->exception.serror_has_esr;
309
310 if (serror_pending && has_esr) {
311 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
312 return -EINVAL;
313
314 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
315 kvm_set_sei_esr(vcpu, events->exception.serror_esr);
316 else
317 return -EINVAL;
318 } else if (serror_pending) {
319 kvm_inject_vabt(vcpu);
320 }
321
322 return 0;
323}
324
292int __attribute_const__ kvm_target_cpu(void) 325int __attribute_const__ kvm_target_cpu(void)
293{ 326{
294 unsigned long implementor = read_cpuid_implementor(); 327 unsigned long implementor = read_cpuid_implementor();
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 6fd91b31a131..ea9225160786 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -57,6 +57,7 @@ __invalid:
57 * x0: HYP pgd 57 * x0: HYP pgd
58 * x1: HYP stack 58 * x1: HYP stack
59 * x2: HYP vectors 59 * x2: HYP vectors
60 * x3: per-CPU offset
60 */ 61 */
61__do_hyp_init: 62__do_hyp_init:
62 /* Check for a stub HVC call */ 63 /* Check for a stub HVC call */
@@ -119,9 +120,8 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
119 mov sp, x1 120 mov sp, x1
120 msr vbar_el2, x2 121 msr vbar_el2, x2
121 122
122 /* copy tpidr_el1 into tpidr_el2 for use by HYP */ 123 /* Set tpidr_el2 for use by HYP */
123 mrs x1, tpidr_el1 124 msr tpidr_el2, x3
124 msr tpidr_el2, x1
125 125
126 /* Hello, World! */ 126 /* Hello, World! */
127 eret 127 eret
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 35bc16832efe..9ce223944983 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -288,8 +288,3 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
288 288
289 vcpu->arch.sysregs_loaded_on_cpu = false; 289 vcpu->arch.sysregs_loaded_on_cpu = false;
290} 290}
291
292void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
293{
294 asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
295}
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index d8e71659ba7e..a55e91dfcf8f 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -164,9 +164,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
164 inject_undef64(vcpu); 164 inject_undef64(vcpu);
165} 165}
166 166
167static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr) 167void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
168{ 168{
169 vcpu_set_vsesr(vcpu, esr); 169 vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
170 *vcpu_hcr(vcpu) |= HCR_VSE; 170 *vcpu_hcr(vcpu) |= HCR_VSE;
171} 171}
172 172
@@ -184,5 +184,5 @@ static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
184 */ 184 */
185void kvm_inject_vabt(struct kvm_vcpu *vcpu) 185void kvm_inject_vabt(struct kvm_vcpu *vcpu)
186{ 186{
187 pend_guest_serror(vcpu, ESR_ELx_ISV); 187 kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
188} 188}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 4e4aedaf7ab7..e37c78bbe1ca 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -77,8 +77,12 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
77 case KVM_CAP_ARM_PMU_V3: 77 case KVM_CAP_ARM_PMU_V3:
78 r = kvm_arm_support_pmu_v3(); 78 r = kvm_arm_support_pmu_v3();
79 break; 79 break;
80 case KVM_CAP_ARM_INJECT_SERROR_ESR:
81 r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
82 break;
80 case KVM_CAP_SET_GUEST_DEBUG: 83 case KVM_CAP_SET_GUEST_DEBUG:
81 case KVM_CAP_VCPU_ATTRIBUTES: 84 case KVM_CAP_VCPU_ATTRIBUTES:
85 case KVM_CAP_VCPU_EVENTS:
82 r = 1; 86 r = 1;
83 break; 87 break;
84 default: 88 default:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a4363735d3f8..22fbbdbece3c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -194,7 +194,16 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
194 if (!p->is_write) 194 if (!p->is_write)
195 return read_from_write_only(vcpu, p, r); 195 return read_from_write_only(vcpu, p, r);
196 196
197 kvm_set_way_flush(vcpu); 197 /*
198 * Only track S/W ops if we don't have FWB. It still indicates
199 * that the guest is a bit broken (S/W operations should only
200 * be done by firmware, knowing that there is only a single
201 * CPU left in the system, and certainly not from non-secure
202 * software).
203 */
204 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
205 kvm_set_way_flush(vcpu);
206
198 return true; 207 return true;
199} 208}
200 209
@@ -243,10 +252,43 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
243 struct sys_reg_params *p, 252 struct sys_reg_params *p,
244 const struct sys_reg_desc *r) 253 const struct sys_reg_desc *r)
245{ 254{
255 bool g1;
256
246 if (!p->is_write) 257 if (!p->is_write)
247 return read_from_write_only(vcpu, p, r); 258 return read_from_write_only(vcpu, p, r);
248 259
249 vgic_v3_dispatch_sgi(vcpu, p->regval); 260 /*
261 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
262 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
263 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
264 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
265 * group.
266 */
267 if (p->is_aarch32) {
268 switch (p->Op1) {
269 default: /* Keep GCC quiet */
270 case 0: /* ICC_SGI1R */
271 g1 = true;
272 break;
273 case 1: /* ICC_ASGI1R */
274 case 2: /* ICC_SGI0R */
275 g1 = false;
276 break;
277 }
278 } else {
279 switch (p->Op2) {
280 default: /* Keep GCC quiet */
281 case 5: /* ICC_SGI1R_EL1 */
282 g1 = true;
283 break;
284 case 6: /* ICC_ASGI1R_EL1 */
285 case 7: /* ICC_SGI0R_EL1 */
286 g1 = false;
287 break;
288 }
289 }
290
291 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
250 292
251 return true; 293 return true;
252} 294}
@@ -1303,6 +1345,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1303 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only }, 1345 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1304 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only }, 1346 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1305 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, 1347 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1348 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1349 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1306 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only }, 1350 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1307 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only }, 1351 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1308 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only }, 1352 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
@@ -1613,8 +1657,6 @@ static const struct sys_reg_desc cp14_64_regs[] = {
1613 * register). 1657 * register).
1614 */ 1658 */
1615static const struct sys_reg_desc cp15_regs[] = { 1659static const struct sys_reg_desc cp15_regs[] = {
1616 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1617
1618 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, 1660 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1619 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 1661 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1620 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 1662 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
@@ -1737,8 +1779,10 @@ static const struct sys_reg_desc cp15_regs[] = {
1737static const struct sys_reg_desc cp15_64_regs[] = { 1779static const struct sys_reg_desc cp15_64_regs[] = {
1738 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 1780 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1739 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, 1781 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1740 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 1782 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
1741 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 1783 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1784 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
1785 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
1742 { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval }, 1786 { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
1743}; 1787};
1744 1788