diff options
author | Alexander Graf <agraf@suse.de> | 2014-06-18 08:53:49 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-07-28 12:30:10 -0400 |
commit | d69614a295aef72f8fb22da8e3ccf1a8f19a7ffc (patch) | |
tree | b8ce894e8738e6711b5593a28a116db5567fe31f /arch/powerpc/kvm/emulate.c | |
parent | c12fb43c2f6d6a57a4e21afe74ff56485d699ee7 (diff) |
KVM: PPC: Separate loadstore emulation from priv emulation
Today the instruction emulator can get called via 2 separate code paths. It
can either be called by MMIO emulation detection code or by privileged
instruction traps.
This is bad, as both code paths prepare the environment differently. For MMIO
emulation we already know the virtual address we faulted on, so instructions
there don't have to actually fetch that information.
Split out the two separate use cases into separate files.
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 192 |
1 files changed, 1 insertions, 191 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index c5c64b6e7eb2..e96b50d0bdab 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -207,25 +207,12 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
207 | return emulated; | 207 | return emulated; |
208 | } | 208 | } |
209 | 209 | ||
210 | /* XXX to do: | ||
211 | * lhax | ||
212 | * lhaux | ||
213 | * lswx | ||
214 | * lswi | ||
215 | * stswx | ||
216 | * stswi | ||
217 | * lha | ||
218 | * lhau | ||
219 | * lmw | ||
220 | * stmw | ||
221 | * | ||
222 | */ | ||
223 | /* XXX Should probably auto-generate instruction decoding for a particular core | 210 | /* XXX Should probably auto-generate instruction decoding for a particular core |
224 | * from opcode tables in the future. */ | 211 | * from opcode tables in the future. */ |
225 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 212 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
226 | { | 213 | { |
227 | u32 inst; | 214 | u32 inst; |
228 | int ra, rs, rt, sprn; | 215 | int rs, rt, sprn; |
229 | enum emulation_result emulated; | 216 | enum emulation_result emulated; |
230 | int advance = 1; | 217 | int advance = 1; |
231 | 218 | ||
@@ -238,7 +225,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
238 | 225 | ||
239 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 226 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
240 | 227 | ||
241 | ra = get_ra(inst); | ||
242 | rs = get_rs(inst); | 228 | rs = get_rs(inst); |
243 | rt = get_rt(inst); | 229 | rt = get_rt(inst); |
244 | sprn = get_sprn(inst); | 230 | sprn = get_sprn(inst); |
@@ -270,200 +256,24 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
270 | #endif | 256 | #endif |
271 | advance = 0; | 257 | advance = 0; |
272 | break; | 258 | break; |
273 | case OP_31_XOP_LWZX: | ||
274 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
275 | break; | ||
276 | |||
277 | case OP_31_XOP_LBZX: | ||
278 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
279 | break; | ||
280 | |||
281 | case OP_31_XOP_LBZUX: | ||
282 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
283 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
284 | break; | ||
285 | |||
286 | case OP_31_XOP_STWX: | ||
287 | emulated = kvmppc_handle_store(run, vcpu, | ||
288 | kvmppc_get_gpr(vcpu, rs), | ||
289 | 4, 1); | ||
290 | break; | ||
291 | |||
292 | case OP_31_XOP_STBX: | ||
293 | emulated = kvmppc_handle_store(run, vcpu, | ||
294 | kvmppc_get_gpr(vcpu, rs), | ||
295 | 1, 1); | ||
296 | break; | ||
297 | |||
298 | case OP_31_XOP_STBUX: | ||
299 | emulated = kvmppc_handle_store(run, vcpu, | ||
300 | kvmppc_get_gpr(vcpu, rs), | ||
301 | 1, 1); | ||
302 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
303 | break; | ||
304 | |||
305 | case OP_31_XOP_LHAX: | ||
306 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
307 | break; | ||
308 | |||
309 | case OP_31_XOP_LHZX: | ||
310 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
311 | break; | ||
312 | |||
313 | case OP_31_XOP_LHZUX: | ||
314 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
315 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
316 | break; | ||
317 | 259 | ||
318 | case OP_31_XOP_MFSPR: | 260 | case OP_31_XOP_MFSPR: |
319 | emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); | 261 | emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); |
320 | break; | 262 | break; |
321 | 263 | ||
322 | case OP_31_XOP_STHX: | ||
323 | emulated = kvmppc_handle_store(run, vcpu, | ||
324 | kvmppc_get_gpr(vcpu, rs), | ||
325 | 2, 1); | ||
326 | break; | ||
327 | |||
328 | case OP_31_XOP_STHUX: | ||
329 | emulated = kvmppc_handle_store(run, vcpu, | ||
330 | kvmppc_get_gpr(vcpu, rs), | ||
331 | 2, 1); | ||
332 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
333 | break; | ||
334 | |||
335 | case OP_31_XOP_MTSPR: | 264 | case OP_31_XOP_MTSPR: |
336 | emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); | 265 | emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); |
337 | break; | 266 | break; |
338 | 267 | ||
339 | case OP_31_XOP_DCBST: | ||
340 | case OP_31_XOP_DCBF: | ||
341 | case OP_31_XOP_DCBI: | ||
342 | /* Do nothing. The guest is performing dcbi because | ||
343 | * hardware DMA is not snooped by the dcache, but | ||
344 | * emulated DMA either goes through the dcache as | ||
345 | * normal writes, or the host kernel has handled dcache | ||
346 | * coherence. */ | ||
347 | break; | ||
348 | |||
349 | case OP_31_XOP_LWBRX: | ||
350 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | ||
351 | break; | ||
352 | |||
353 | case OP_31_XOP_TLBSYNC: | 268 | case OP_31_XOP_TLBSYNC: |
354 | break; | 269 | break; |
355 | 270 | ||
356 | case OP_31_XOP_STWBRX: | ||
357 | emulated = kvmppc_handle_store(run, vcpu, | ||
358 | kvmppc_get_gpr(vcpu, rs), | ||
359 | 4, 0); | ||
360 | break; | ||
361 | |||
362 | case OP_31_XOP_LHBRX: | ||
363 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | ||
364 | break; | ||
365 | |||
366 | case OP_31_XOP_STHBRX: | ||
367 | emulated = kvmppc_handle_store(run, vcpu, | ||
368 | kvmppc_get_gpr(vcpu, rs), | ||
369 | 2, 0); | ||
370 | break; | ||
371 | |||
372 | default: | 271 | default: |
373 | /* Attempt core-specific emulation below. */ | 272 | /* Attempt core-specific emulation below. */ |
374 | emulated = EMULATE_FAIL; | 273 | emulated = EMULATE_FAIL; |
375 | } | 274 | } |
376 | break; | 275 | break; |
377 | 276 | ||
378 | case OP_LWZ: | ||
379 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
380 | break; | ||
381 | |||
382 | /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ | ||
383 | case OP_LD: | ||
384 | rt = get_rt(inst); | ||
385 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
386 | break; | ||
387 | |||
388 | case OP_LWZU: | ||
389 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
390 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
391 | break; | ||
392 | |||
393 | case OP_LBZ: | ||
394 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
395 | break; | ||
396 | |||
397 | case OP_LBZU: | ||
398 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
399 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
400 | break; | ||
401 | |||
402 | case OP_STW: | ||
403 | emulated = kvmppc_handle_store(run, vcpu, | ||
404 | kvmppc_get_gpr(vcpu, rs), | ||
405 | 4, 1); | ||
406 | break; | ||
407 | |||
408 | /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ | ||
409 | case OP_STD: | ||
410 | rs = get_rs(inst); | ||
411 | emulated = kvmppc_handle_store(run, vcpu, | ||
412 | kvmppc_get_gpr(vcpu, rs), | ||
413 | 8, 1); | ||
414 | break; | ||
415 | |||
416 | case OP_STWU: | ||
417 | emulated = kvmppc_handle_store(run, vcpu, | ||
418 | kvmppc_get_gpr(vcpu, rs), | ||
419 | 4, 1); | ||
420 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
421 | break; | ||
422 | |||
423 | case OP_STB: | ||
424 | emulated = kvmppc_handle_store(run, vcpu, | ||
425 | kvmppc_get_gpr(vcpu, rs), | ||
426 | 1, 1); | ||
427 | break; | ||
428 | |||
429 | case OP_STBU: | ||
430 | emulated = kvmppc_handle_store(run, vcpu, | ||
431 | kvmppc_get_gpr(vcpu, rs), | ||
432 | 1, 1); | ||
433 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
434 | break; | ||
435 | |||
436 | case OP_LHZ: | ||
437 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
438 | break; | ||
439 | |||
440 | case OP_LHZU: | ||
441 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
442 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
443 | break; | ||
444 | |||
445 | case OP_LHA: | ||
446 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
447 | break; | ||
448 | |||
449 | case OP_LHAU: | ||
450 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
451 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
452 | break; | ||
453 | |||
454 | case OP_STH: | ||
455 | emulated = kvmppc_handle_store(run, vcpu, | ||
456 | kvmppc_get_gpr(vcpu, rs), | ||
457 | 2, 1); | ||
458 | break; | ||
459 | |||
460 | case OP_STHU: | ||
461 | emulated = kvmppc_handle_store(run, vcpu, | ||
462 | kvmppc_get_gpr(vcpu, rs), | ||
463 | 2, 1); | ||
464 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
465 | break; | ||
466 | |||
467 | default: | 277 | default: |
468 | emulated = EMULATE_FAIL; | 278 | emulated = EMULATE_FAIL; |
469 | } | 279 | } |