aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2014-12-19 11:05:31 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2015-01-29 17:24:56 -0500
commit3c1e716508335eb132c9349cb1a1716c8f7e3d2e (patch)
tree39aa2fe691caa61a34cef1ac7d09f3348249ddb5 /arch/arm64
parentf3747379accba8e95d70cec0eae0582c8c182050 (diff)
arm/arm64: KVM: Use set/way op trapping to track the state of the caches
Trying to emulate the behaviour of set/way cache ops is fairly pointless, as there are too many ways we can end-up missing stuff. Also, there is some system caches out there that simply ignore set/way operations. So instead of trying to implement them, let's convert it to VA ops, and use them as a way to re-enable the trapping of VM ops. That way, we can detect the point when the MMU/caches are turned off, and do a full VM flush (which is what the guest was trying to do anyway). This allows a 32bit zImage to boot on the APM thingy, and will probably help bootloaders in general. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h3
-rw-r--r--arch/arm64/kvm/sys_regs.c75
4 files changed, 22 insertions, 69 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 865a7e28ea2d..3cb4c856b10d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45 vcpu->arch.hcr_el2 &= ~HCR_RW; 45 vcpu->arch.hcr_el2 &= ~HCR_RW;
46} 46}
47 47
48static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
49{
50 return vcpu->arch.hcr_el2;
51}
52
53static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
54{
55 vcpu->arch.hcr_el2 = hcr;
56}
57
48static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 58static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
49{ 59{
50 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 60 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0b7dfdb931df..acd101a9014d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
116 * Anything that is not used directly from assembly code goes 116 * Anything that is not used directly from assembly code goes
117 * here. 117 * here.
118 */ 118 */
119 /* dcache set/way operation pending */
120 int last_pcpu;
121 cpumask_t require_dcache_flush;
122 119
123 /* Don't run the guest */ 120 /* Don't run the guest */
124 bool pause; 121 bool pause;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..92d22e94a79b 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -260,7 +260,8 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
260 260
261#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 261#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
262 262
263void stage2_flush_vm(struct kvm *kvm); 263void kvm_set_way_flush(struct kvm_vcpu *vcpu);
264void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
264 265
265#endif /* __ASSEMBLY__ */ 266#endif /* __ASSEMBLY__ */
266#endif /* __ARM64_KVM_MMU_H__ */ 267#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3d7c2df89946..f31e8bb2bc5b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
69 return ccsidr; 69 return ccsidr;
70} 70}
71 71
72static void do_dc_cisw(u32 val) 72/*
73{ 73 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
74 asm volatile("dc cisw, %x0" : : "r" (val)); 74 */
75 dsb(ish);
76}
77
78static void do_dc_csw(u32 val)
79{
80 asm volatile("dc csw, %x0" : : "r" (val));
81 dsb(ish);
82}
83
84/* See note at ARM ARM B1.14.4 */
85static bool access_dcsw(struct kvm_vcpu *vcpu, 75static bool access_dcsw(struct kvm_vcpu *vcpu,
86 const struct sys_reg_params *p, 76 const struct sys_reg_params *p,
87 const struct sys_reg_desc *r) 77 const struct sys_reg_desc *r)
88{ 78{
89 unsigned long val;
90 int cpu;
91
92 if (!p->is_write) 79 if (!p->is_write)
93 return read_from_write_only(vcpu, p); 80 return read_from_write_only(vcpu, p);
94 81
95 cpu = get_cpu(); 82 kvm_set_way_flush(vcpu);
96
97 cpumask_setall(&vcpu->arch.require_dcache_flush);
98 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
99
100 /* If we were already preempted, take the long way around */
101 if (cpu != vcpu->arch.last_pcpu) {
102 flush_cache_all();
103 goto done;
104 }
105
106 val = *vcpu_reg(vcpu, p->Rt);
107
108 switch (p->CRm) {
109 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
110 case 14: /* DCCISW */
111 do_dc_cisw(val);
112 break;
113
114 case 10: /* DCCSW */
115 do_dc_csw(val);
116 break;
117 }
118
119done:
120 put_cpu();
121
122 return true; 83 return true;
123} 84}
124 85
125/* 86/*
126 * Generic accessor for VM registers. Only called as long as HCR_TVM 87 * Generic accessor for VM registers. Only called as long as HCR_TVM
127 * is set. 88 * is set. If the guest enables the MMU, we stop trapping the VM
89 * sys_regs and leave it in complete control of the caches.
128 */ 90 */
129static bool access_vm_reg(struct kvm_vcpu *vcpu, 91static bool access_vm_reg(struct kvm_vcpu *vcpu,
130 const struct sys_reg_params *p, 92 const struct sys_reg_params *p,
131 const struct sys_reg_desc *r) 93 const struct sys_reg_desc *r)
132{ 94{
133 unsigned long val; 95 unsigned long val;
96 bool was_enabled = vcpu_has_cache_enabled(vcpu);
134 97
135 BUG_ON(!p->is_write); 98 BUG_ON(!p->is_write);
136 99
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
143 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 106 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
144 } 107 }
145 108
146 return true; 109 kvm_toggle_cache(vcpu, was_enabled);
147}
148
149/*
150 * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
151 * guest enables the MMU, we stop trapping the VM sys_regs and leave
152 * it in complete control of the caches.
153 */
154static bool access_sctlr(struct kvm_vcpu *vcpu,
155 const struct sys_reg_params *p,
156 const struct sys_reg_desc *r)
157{
158 access_vm_reg(vcpu, p, r);
159
160 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
161 vcpu->arch.hcr_el2 &= ~HCR_TVM;
162 stage2_flush_vm(vcpu->kvm);
163 }
164
165 return true; 110 return true;
166} 111}
167 112
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
377 NULL, reset_mpidr, MPIDR_EL1 }, 322 NULL, reset_mpidr, MPIDR_EL1 },
378 /* SCTLR_EL1 */ 323 /* SCTLR_EL1 */
379 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 324 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
380 access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, 325 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
381 /* CPACR_EL1 */ 326 /* CPACR_EL1 */
382 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), 327 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
383 NULL, reset_val, CPACR_EL1, 0 }, 328 NULL, reset_val, CPACR_EL1, 0 },
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
657 * register). 602 * register).
658 */ 603 */
659static const struct sys_reg_desc cp15_regs[] = { 604static const struct sys_reg_desc cp15_regs[] = {
660 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, 605 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
661 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 606 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
662 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 607 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
663 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, 608 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },