aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/coproc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm/coproc.c')
-rw-r--r--arch/arm/kvm/coproc.c70
1 files changed, 14 insertions, 56 deletions
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7928dbdf2102..f3d88dc388bc 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
189 return true; 189 return true;
190} 190}
191 191
192/* See note at ARM ARM B1.14.4 */ 192/*
193 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
194 */
193static bool access_dcsw(struct kvm_vcpu *vcpu, 195static bool access_dcsw(struct kvm_vcpu *vcpu,
194 const struct coproc_params *p, 196 const struct coproc_params *p,
195 const struct coproc_reg *r) 197 const struct coproc_reg *r)
196{ 198{
197 unsigned long val;
198 int cpu;
199
200 if (!p->is_write) 199 if (!p->is_write)
201 return read_from_write_only(vcpu, p); 200 return read_from_write_only(vcpu, p);
202 201
203 cpu = get_cpu(); 202 kvm_set_way_flush(vcpu);
204
205 cpumask_setall(&vcpu->arch.require_dcache_flush);
206 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
207
208 /* If we were already preempted, take the long way around */
209 if (cpu != vcpu->arch.last_pcpu) {
210 flush_cache_all();
211 goto done;
212 }
213
214 val = *vcpu_reg(vcpu, p->Rt1);
215
216 switch (p->CRm) {
217 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
218 case 14: /* DCCISW */
219 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
220 break;
221
222 case 10: /* DCCSW */
223 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
224 break;
225 }
226
227done:
228 put_cpu();
229
230 return true; 203 return true;
231} 204}
232 205
233/* 206/*
234 * Generic accessor for VM registers. Only called as long as HCR_TVM 207 * Generic accessor for VM registers. Only called as long as HCR_TVM
235 * is set. 208 * is set. If the guest enables the MMU, we stop trapping the VM
209 * sys_regs and leave it in complete control of the caches.
210 *
211 * Used by the cpu-specific code.
236 */ 212 */
237static bool access_vm_reg(struct kvm_vcpu *vcpu, 213bool access_vm_reg(struct kvm_vcpu *vcpu,
238 const struct coproc_params *p, 214 const struct coproc_params *p,
239 const struct coproc_reg *r) 215 const struct coproc_reg *r)
240{ 216{
217 bool was_enabled = vcpu_has_cache_enabled(vcpu);
218
241 BUG_ON(!p->is_write); 219 BUG_ON(!p->is_write);
242 220
243 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); 221 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
244 if (p->is_64bit) 222 if (p->is_64bit)
245 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); 223 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
246 224
247 return true; 225 kvm_toggle_cache(vcpu, was_enabled);
248}
249
250/*
251 * SCTLR accessor. Only called as long as HCR_TVM is set. If the
252 * guest enables the MMU, we stop trapping the VM sys_regs and leave
253 * it in complete control of the caches.
254 *
255 * Used by the cpu-specific code.
256 */
257bool access_sctlr(struct kvm_vcpu *vcpu,
258 const struct coproc_params *p,
259 const struct coproc_reg *r)
260{
261 access_vm_reg(vcpu, p, r);
262
263 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
264 vcpu->arch.hcr &= ~HCR_TVM;
265 stage2_flush_vm(vcpu->kvm);
266 }
267
268 return true; 226 return true;
269} 227}
270 228