aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/asm-offsets.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2014-01-08 05:25:20 -0500
committerAlexander Graf <agraf@suse.de>2014-01-27 10:00:59 -0500
commite0b7ec058c0eb7ba8d5d937d81de2bd16db6970e (patch)
tree32b266bf4e3c497ab0a306731c01761b2dde11ce /arch/powerpc/kernel/asm-offsets.c
parenteee7ff9d2cc0eaaa00496bdf4193144104c7dc63 (diff)
KVM: PPC: Book3S HV: Align physical and virtual CPU thread numbers
On a threaded processor such as POWER7, we group VCPUs into virtual cores and arrange that the VCPUs in a virtual core run on the same physical core. Currently we don't enforce any correspondence between virtual thread numbers within a virtual core and physical thread numbers. Physical threads are allocated starting at 0 on a first-come first-served basis to runnable virtual threads (VCPUs). POWER8 implements a new "msgsndp" instruction which guest kernels can use to interrupt other threads in the same core or sub-core. Since the instruction takes the destination physical thread ID as a parameter, it becomes necessary to align the physical thread IDs with the virtual thread IDs, that is, to make sure virtual thread N within a virtual core always runs on physical thread N. This means that it's possible that thread 0, which is where we call __kvmppc_vcore_entry, may end up running some other vcpu than the one whose task called kvmppc_run_core(), or it may end up running no vcpu at all, if for example thread 0 of the virtual core is currently executing in userspace. However, we do need thread 0 to be responsible for switching the MMU -- a previous version of this patch that had other threads switching the MMU was found to be responsible for occasional memory corruption and machine check interrupts in the guest on POWER7 machines. To accommodate this, we no longer pass the vcpu pointer to __kvmppc_vcore_entry, but instead let the assembly code load it from the PACA. Since the assembly code will need to know the kvm pointer and the thread ID for threads which don't have a vcpu, we move the thread ID into the PACA and we add a kvm pointer to the virtual core structure. In the case where thread 0 has no vcpu to run, it still calls into kvmppc_hv_entry in order to do the MMU switch, and then naps until either its vcpu is ready to run in the guest, or some other thread needs to exit the guest. In the latter case, thread 0 jumps to the code that switches the MMU back to the host. This control flow means that now we switch the MMU before loading any guest vcpu state. Similarly, on guest exit we now save all the guest vcpu state before switching the MMU back to the host. This has required substantial code movement, making the diff rather large. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kernel/asm-offsets.c')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 5e64c3d2149f..332ae66883e4 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -506,7 +506,6 @@ int main(void)
506 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); 506 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
507 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 507 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
508 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); 508 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
509 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
510 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); 509 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
511 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); 510 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
512 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); 511 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
@@ -514,6 +513,7 @@ int main(void)
514 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); 513 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
515 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); 514 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
516 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); 515 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
516 DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
517 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); 517 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
518 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); 518 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
519 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); 519 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
@@ -583,6 +583,7 @@ int main(void)
583 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); 583 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
584 HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); 584 HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
585 HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); 585 HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
586 HSTATE_FIELD(HSTATE_PTID, ptid);
586 HSTATE_FIELD(HSTATE_MMCR, host_mmcr); 587 HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
587 HSTATE_FIELD(HSTATE_PMC, host_pmc); 588 HSTATE_FIELD(HSTATE_PMC, host_pmc);
588 HSTATE_FIELD(HSTATE_PURR, host_purr); 589 HSTATE_FIELD(HSTATE_PURR, host_purr);