aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-04-15 18:11:52 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:18:45 -0400
commit61db97cc1e7fce4fd16f72b1350e1728797fa26f (patch)
treeceae282bf55a46b8580ec8aad3c7eebe2f9a5f9e /arch/powerpc/kvm
parent97e492558f423d99c51eb934506b7a3d7c64613b (diff)
KVM: PPC: Emulate segment fault
Book3S_32 doesn't know about segment faults. It only knows about page faults. So in order to know that we didn't map a segment, we need to fake segment faults. We do this by setting invalid segment registers to an invalid VSID and then check for that VSID on normal page faults. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index d0986968a611..f8ac26599f6d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -775,6 +775,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
775 switch (exit_nr) { 775 switch (exit_nr) {
776 case BOOK3S_INTERRUPT_INST_STORAGE: 776 case BOOK3S_INTERRUPT_INST_STORAGE:
777 vcpu->stat.pf_instruc++; 777 vcpu->stat.pf_instruc++;
778
779#ifdef CONFIG_PPC_BOOK3S_32
780 /* We set segments as unused segments when invalidating them. So
781 * treat the respective fault as segment fault. */
782 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
783 == SR_INVALID) {
784 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
785 r = RESUME_GUEST;
786 break;
787 }
788#endif
789
778 /* only care about PTEG not found errors, but leave NX alone */ 790 /* only care about PTEG not found errors, but leave NX alone */
779 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { 791 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
780 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 792 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
@@ -799,6 +811,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
799 { 811 {
800 ulong dar = kvmppc_get_fault_dar(vcpu); 812 ulong dar = kvmppc_get_fault_dar(vcpu);
801 vcpu->stat.pf_storage++; 813 vcpu->stat.pf_storage++;
814
815#ifdef CONFIG_PPC_BOOK3S_32
816 /* We set segments as unused segments when invalidating them. So
817 * treat the respective fault as segment fault. */
818 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
819 kvmppc_mmu_map_segment(vcpu, dar);
820 r = RESUME_GUEST;
821 break;
822 }
823#endif
824
802 /* The only case we need to handle is missing shadow PTEs */ 825 /* The only case we need to handle is missing shadow PTEs */
803 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { 826 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
804 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 827 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);