aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-15 08:49:13 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:56 -0500
commitf7adbba1e5d464b0d449adac1eb2519be6be9728 (patch)
tree5a7580ac34b8abfb976525b9802d860e01cc3581 /arch/powerpc/kvm/book3s.c
parent1c0006d8d131585095c4a27dbfcfb3970807a35e (diff)
KVM: PPC: Keep SRR1 flags around in shadow_msr
SRR1 stores more information that just the MSR value. It also stores valuable information about the type of interrupt we received, for example whether the storage interrupt we just got was because of a missing htab entry or not. We use that information to speed up the exit path. Now if we get preempted before we can interpret the shadow_msr values, we get into vcpu_put which then calls the MSR handler, which then sets all the SRR1 information bits in shadow_msr to 0. Great. So let's preserve the SRR1 specific bits in shadow_msr whenever we set the MSR. They don't hurt. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
-rw-r--r--arch/powerpc/kvm/book3s.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 2cb181396f82..58f5200fc09b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -524,14 +524,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
524 /* Page not found in guest PTE entries */ 524 /* Page not found in guest PTE entries */
525 vcpu->arch.dear = vcpu->arch.fault_dear; 525 vcpu->arch.dear = vcpu->arch.fault_dear;
526 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; 526 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
527 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); 527 vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
528 kvmppc_book3s_queue_irqprio(vcpu, vec); 528 kvmppc_book3s_queue_irqprio(vcpu, vec);
529 } else if (page_found == -EPERM) { 529 } else if (page_found == -EPERM) {
530 /* Storage protection */ 530 /* Storage protection */
531 vcpu->arch.dear = vcpu->arch.fault_dear; 531 vcpu->arch.dear = vcpu->arch.fault_dear;
532 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; 532 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
533 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 533 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
534 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); 534 vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
535 kvmppc_book3s_queue_irqprio(vcpu, vec); 535 kvmppc_book3s_queue_irqprio(vcpu, vec);
536 } else if (page_found == -EINVAL) { 536 } else if (page_found == -EINVAL) {
537 /* Page not found in guest SLB */ 537 /* Page not found in guest SLB */
@@ -693,7 +693,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
693 case BOOK3S_INTERRUPT_INST_STORAGE: 693 case BOOK3S_INTERRUPT_INST_STORAGE:
694 vcpu->stat.pf_instruc++; 694 vcpu->stat.pf_instruc++;
695 /* only care about PTEG not found errors, but leave NX alone */ 695 /* only care about PTEG not found errors, but leave NX alone */
696 if (vcpu->arch.shadow_msr & 0x40000000) { 696 if (vcpu->arch.shadow_srr1 & 0x40000000) {
697 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); 697 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
698 vcpu->stat.sp_instruc++; 698 vcpu->stat.sp_instruc++;
699 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 699 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -705,7 +705,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
705 */ 705 */
706 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); 706 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
707 } else { 707 } else {
708 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); 708 vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000;
709 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 709 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
710 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); 710 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
711 r = RESUME_GUEST; 711 r = RESUME_GUEST;
@@ -753,7 +753,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
753 enum emulation_result er; 753 enum emulation_result er;
754 ulong flags; 754 ulong flags;
755 755
756 flags = (vcpu->arch.shadow_msr & 0x1f0000ull); 756 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
757 757
758 if (vcpu->arch.msr & MSR_PR) { 758 if (vcpu->arch.msr & MSR_PR) {
759#ifdef EXIT_DEBUG 759#ifdef EXIT_DEBUG
@@ -808,7 +808,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
808 break; 808 break;
809 default: 809 default:
810 /* Ugh - bork here! What did we get? */ 810 /* Ugh - bork here! What did we get? */
811 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); 811 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
812 exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1);
812 r = RESUME_HOST; 813 r = RESUME_HOST;
813 BUG(); 814 BUG();
814 break; 815 break;