diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2016-11-10 08:22:02 -0500 |
---|---|---|
committer | Christian Borntraeger <borntraeger@de.ibm.com> | 2016-11-22 13:32:35 -0500 |
commit | 31d8b8d41a7e3e8db081972a63ef1de276ef8ab4 (patch) | |
tree | 76fde32379d40edca2fabd5b609aff67eb73cb82 | |
parent | 813ae37e6aed72cc457094b6066aa38efd66c9e9 (diff) |
KVM: s390: handle access registers in the run ioctl not in vcpu_put/load
Right now we save the host access registers in kvm_arch_vcpu_load
and load them in kvm_arch_vcpu_put. Vice versa for the guest access
registers. On schedule this means, that we load/save access registers
multiple times.
e.g. VCPU_RUN with just one reschedule and then return does
[from user space via VCPU_RUN]
- save the host registers in kvm_arch_vcpu_load (via ioctl)
- load the guest registers in kvm_arch_vcpu_load (via ioctl)
- do guest stuff
- decide to schedule/sleep
- save the guest registers in kvm_arch_vcpu_put (via sched)
- load the host registers in kvm_arch_vcpu_put (via sched)
- save the host registers in switch_to (via sched)
- schedule
- return
- load the host registers in switch_to (via sched)
- save the host registers in kvm_arch_vcpu_load (via sched)
- load the guest registers in kvm_arch_vcpu_load (via sched)
- do guest stuff
- decide to go to userspace
- save the guest registers in kvm_arch_vcpu_put (via ioctl)
- load the host registers in kvm_arch_vcpu_put (via ioctl)
[back to user space]
As the kernel does not use access registers, we can avoid
this reloading and simply piggy back on switch_to (let it save
the guest values instead of host values in thread.acrs) by
moving the host/guest switch into the VCPU_RUN ioctl function.
We now do
[from user space via VCPU_RUN]
- save the host registers in kvm_arch_vcpu_ioctl_run
- load the guest registers in kvm_arch_vcpu_ioctl_run
- do guest stuff
- decide to schedule/sleep
- save the guest registers in switch_to
- schedule
- return
- load the guest registers in switch_to (via sched)
- do guest stuff
- decide to go to userspace
- save the guest registers in kvm_arch_vcpu_ioctl_run
- load the host registers in kvm_arch_vcpu_ioctl_run
This seems to save about 10% of the vcpu_put/load functions
according to perf.
As vcpu_load no longer switches the acrs, We can also loading
the acrs in kvm_arch_vcpu_ioctl_set_sregs.
Suggested-by: Fan Zhang <zhangfan@linux.vnet.ibm.com>
Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r-- | arch/s390/kvm/interrupt.c | 2 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 12 |
2 files changed, 6 insertions, 8 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index be4db07f70d3..af13f1a135b6 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -415,7 +415,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, | |||
415 | int rc; | 415 | int rc; |
416 | 416 | ||
417 | mci.val = mchk->mcic; | 417 | mci.val = mchk->mcic; |
418 | /* take care of lazy register loading via vcpu load/put */ | 418 | /* take care of lazy register loading */ |
419 | save_fpu_regs(); | 419 | save_fpu_regs(); |
420 | save_access_regs(vcpu->run->s.regs.acrs); | 420 | save_access_regs(vcpu->run->s.regs.acrs); |
421 | 421 | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 9c7a1ecfe6bd..4105e1ea8dda 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -1826,8 +1826,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1826 | /* User space provided an invalid FPC, let's clear it */ | 1826 | /* User space provided an invalid FPC, let's clear it */ |
1827 | current->thread.fpu.fpc = 0; | 1827 | current->thread.fpu.fpc = 0; |
1828 | 1828 | ||
1829 | save_access_regs(vcpu->arch.host_acrs); | ||
1830 | restore_access_regs(vcpu->run->s.regs.acrs); | ||
1831 | gmap_enable(vcpu->arch.enabled_gmap); | 1829 | gmap_enable(vcpu->arch.enabled_gmap); |
1832 | atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 1830 | atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
1833 | if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) | 1831 | if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) |
@@ -1851,9 +1849,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
1851 | /* Restore host register state */ | 1849 | /* Restore host register state */ |
1852 | current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; | 1850 | current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; |
1853 | current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; | 1851 | current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; |
1854 | |||
1855 | save_access_regs(vcpu->run->s.regs.acrs); | ||
1856 | restore_access_regs(vcpu->arch.host_acrs); | ||
1857 | } | 1852 | } |
1858 | 1853 | ||
1859 | static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | 1854 | static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) |
@@ -2243,7 +2238,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
2243 | { | 2238 | { |
2244 | memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); | 2239 | memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); |
2245 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); | 2240 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); |
2246 | restore_access_regs(vcpu->run->s.regs.acrs); | ||
2247 | return 0; | 2241 | return 0; |
2248 | } | 2242 | } |
2249 | 2243 | ||
@@ -2740,6 +2734,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2740 | if (riccb->valid) | 2734 | if (riccb->valid) |
2741 | vcpu->arch.sie_block->ecb3 |= 0x01; | 2735 | vcpu->arch.sie_block->ecb3 |= 0x01; |
2742 | } | 2736 | } |
2737 | save_access_regs(vcpu->arch.host_acrs); | ||
2738 | restore_access_regs(vcpu->run->s.regs.acrs); | ||
2743 | 2739 | ||
2744 | kvm_run->kvm_dirty_regs = 0; | 2740 | kvm_run->kvm_dirty_regs = 0; |
2745 | } | 2741 | } |
@@ -2758,6 +2754,8 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2758 | kvm_run->s.regs.pft = vcpu->arch.pfault_token; | 2754 | kvm_run->s.regs.pft = vcpu->arch.pfault_token; |
2759 | kvm_run->s.regs.pfs = vcpu->arch.pfault_select; | 2755 | kvm_run->s.regs.pfs = vcpu->arch.pfault_select; |
2760 | kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; | 2756 | kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; |
2757 | save_access_regs(vcpu->run->s.regs.acrs); | ||
2758 | restore_access_regs(vcpu->arch.host_acrs); | ||
2761 | } | 2759 | } |
2762 | 2760 | ||
2763 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2761 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
@@ -2874,7 +2872,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
2874 | { | 2872 | { |
2875 | /* | 2873 | /* |
2876 | * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy | 2874 | * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy |
2877 | * copying in vcpu load/put. Lets update our copies before we save | 2875 | * switch in the run ioctl. Let's update our copies before we save |
2878 | * it into the save area | 2876 | * it into the save area |
2879 | */ | 2877 | */ |
2880 | save_fpu_regs(); | 2878 | save_fpu_regs(); |