diff options
author | Paul Mackerras <paulus@samba.org> | 2011-06-28 20:21:34 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-07-12 06:16:54 -0400 |
commit | de56a948b9182fbcf92cb8212f114de096c2d574 (patch) | |
tree | 633ab73672aa2543b683686fc8fb023629c5f8f8 /arch/powerpc/kernel/asm-offsets.c | |
parent | 3c42bf8a717cb636e0ed2ed77194669e2ac3ed56 (diff) |
KVM: PPC: Add support for Book3S processors in hypervisor mode
This adds support for KVM running on 64-bit Book 3S processors,
specifically POWER7, in hypervisor mode. Using hypervisor mode means
that the guest can use the processor's supervisor mode. That means
that the guest can execute privileged instructions and access privileged
registers itself without trapping to the host. This gives excellent
performance, but does mean that KVM cannot emulate a processor
architecture other than the one that the hardware implements.
This code assumes that the guest is running paravirtualized using the
PAPR (Power Architecture Platform Requirements) interface, which is the
interface that IBM's PowerVM hypervisor uses. That means that existing
Linux distributions that run on IBM pSeries machines will also run
under KVM without modification. In order to communicate the PAPR
hypercalls to qemu, this adds a new KVM_EXIT_PAPR_HCALL exit code
to include/linux/kvm.h.
Currently the choice between book3s_hv support and book3s_pr support
(i.e. the existing code, which runs the guest in user mode) has to be
made at kernel configuration time, so a given kernel binary can only
do one or the other.
This new book3s_hv code doesn't support MMIO emulation at present.
Since we are running paravirtualized guests, this isn't a serious
restriction.
With the guest running in supervisor mode, most exceptions go straight
to the guest. We will never get data or instruction storage or segment
interrupts, alignment interrupts, decrementer interrupts, program
interrupts, single-step interrupts, etc., coming to the hypervisor from
the guest. Therefore this introduces a new KVMTEST_NONHV macro for the
exception entry path so that we don't have to do the KVM test on entry
to those exception handlers.
We do however get hypervisor decrementer, hypervisor data storage,
hypervisor instruction storage, and hypervisor emulation assist
interrupts, so we have to handle those.
In hypervisor mode, real-mode accesses can access all of RAM, not just
a limited amount. Therefore we put all the guest state in the vcpu.arch
and use the shadow_vcpu in the PACA only for temporary scratch space.
We allocate the vcpu with kzalloc rather than vzalloc, and we don't use
anything in the kvmppc_vcpu_book3s struct, so we don't allocate it.
We don't have a shared page with the guest, but we still need a
kvm_vcpu_arch_shared struct to store the values of various registers,
so we include one in the vcpu_arch struct.
The POWER7 processor has a restriction that all threads in a core have
to be in the same partition. MMU-on kernel code counts as a partition
(partition 0), so we have to do a partition switch on every entry to and
exit from the guest. At present we require the host and guest to run
in single-thread mode because of this hardware restriction.
This code allocates a hashed page table for the guest and initializes
it with HPTEs for the guest's Virtual Real Memory Area (VRMA). We
require that the guest memory is allocated using 16MB huge pages, in
order to simplify the low-level memory management. This also means that
we can get away without tracking paging activity in the host for now,
since huge pages can't be paged or swapped.
This also adds a few new exports needed by the book3s_hv code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kernel/asm-offsets.c')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 79 |
1 files changed, 79 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index dabfb7346f36..936267462cae 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -187,6 +187,7 @@ int main(void) | |||
187 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | 187 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); |
188 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | 188 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); |
189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | 189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); |
190 | DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); | ||
190 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); | 191 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
191 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); | 192 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); |
192 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 193 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
@@ -392,6 +393,29 @@ int main(void) | |||
392 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 393 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
393 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 394 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
394 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); | 395 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); |
396 | DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); | ||
397 | DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); | ||
398 | #ifdef CONFIG_ALTIVEC | ||
399 | DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); | ||
400 | DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); | ||
401 | #endif | ||
402 | #ifdef CONFIG_VSX | ||
403 | DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr)); | ||
404 | #endif | ||
405 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | ||
406 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); | ||
407 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | ||
408 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | ||
409 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); | ||
410 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
411 | DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); | ||
412 | DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); | ||
413 | DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); | ||
414 | DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); | ||
415 | DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); | ||
416 | DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); | ||
417 | DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); | ||
418 | #endif | ||
395 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); | 419 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); |
396 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); | 420 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); |
397 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); | 421 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); |
@@ -403,17 +427,60 @@ int main(void) | |||
403 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); | 427 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); |
404 | 428 | ||
405 | /* book3s */ | 429 | /* book3s */ |
430 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
431 | DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); | ||
432 | DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); | ||
433 | DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); | ||
434 | DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); | ||
435 | DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); | ||
436 | DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); | ||
437 | DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter)); | ||
438 | DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu)); | ||
439 | DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); | ||
440 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); | ||
441 | #endif | ||
406 | #ifdef CONFIG_PPC_BOOK3S | 442 | #ifdef CONFIG_PPC_BOOK3S |
443 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | ||
444 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); | ||
407 | DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); | 445 | DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); |
408 | DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); | 446 | DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); |
447 | DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); | ||
448 | DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); | ||
449 | DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); | ||
450 | DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); | ||
451 | DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); | ||
452 | DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); | ||
453 | DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); | ||
409 | DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); | 454 | DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); |
410 | DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); | 455 | DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); |
411 | DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); | 456 | DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); |
412 | DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); | 457 | DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); |
413 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); | 458 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); |
459 | DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); | ||
460 | DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); | ||
461 | DEFINE(VCPU_LPCR, offsetof(struct kvm_vcpu, arch.lpcr)); | ||
462 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); | ||
463 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); | ||
464 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); | ||
465 | DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); | ||
466 | DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); | ||
467 | DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu)); | ||
468 | DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); | ||
469 | DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); | ||
470 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); | ||
471 | DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); | ||
472 | DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - | ||
473 | offsetof(struct kvmppc_vcpu_book3s, vcpu)); | ||
474 | DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); | ||
475 | DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); | ||
476 | DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); | ||
414 | 477 | ||
415 | #ifdef CONFIG_PPC_BOOK3S_64 | 478 | #ifdef CONFIG_PPC_BOOK3S_64 |
479 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
416 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) | 480 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) |
481 | #else | ||
482 | # define SVCPU_FIELD(x, f) | ||
483 | #endif | ||
417 | # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) | 484 | # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) |
418 | #else /* 32-bit */ | 485 | #else /* 32-bit */ |
419 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) | 486 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) |
@@ -453,11 +520,23 @@ int main(void) | |||
453 | 520 | ||
454 | HSTATE_FIELD(HSTATE_HOST_R1, host_r1); | 521 | HSTATE_FIELD(HSTATE_HOST_R1, host_r1); |
455 | HSTATE_FIELD(HSTATE_HOST_R2, host_r2); | 522 | HSTATE_FIELD(HSTATE_HOST_R2, host_r2); |
523 | HSTATE_FIELD(HSTATE_HOST_MSR, host_msr); | ||
456 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 524 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
457 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 525 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
458 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 526 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
459 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 527 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
460 | 528 | ||
529 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
530 | HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); | ||
531 | HSTATE_FIELD(HSTATE_MMCR, host_mmcr); | ||
532 | HSTATE_FIELD(HSTATE_PMC, host_pmc); | ||
533 | HSTATE_FIELD(HSTATE_PURR, host_purr); | ||
534 | HSTATE_FIELD(HSTATE_SPURR, host_spurr); | ||
535 | HSTATE_FIELD(HSTATE_DSCR, host_dscr); | ||
536 | HSTATE_FIELD(HSTATE_DABR, dabr); | ||
537 | HSTATE_FIELD(HSTATE_DECEXP, dec_expires); | ||
538 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | ||
539 | |||
461 | #else /* CONFIG_PPC_BOOK3S */ | 540 | #else /* CONFIG_PPC_BOOK3S */ |
462 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | 541 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
463 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | 542 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); |