diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 65 |
1 files changed, 35 insertions, 30 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index b3385dd6f28d..2efe69240e1b 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -115,31 +115,31 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | |||
115 | 115 | ||
116 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | 116 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
117 | { | 117 | { |
118 | vcpu->arch.shadow_msr = vcpu->arch.msr; | 118 | ulong smsr = vcpu->arch.shared->msr; |
119 | |||
119 | /* Guest MSR values */ | 120 | /* Guest MSR values */ |
120 | vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | | 121 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; |
121 | MSR_BE | MSR_DE; | ||
122 | /* Process MSR values */ | 122 | /* Process MSR values */ |
123 | vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | | 123 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; |
124 | MSR_EE; | ||
125 | /* External providers the guest reserved */ | 124 | /* External providers the guest reserved */ |
126 | vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); | 125 | smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); |
127 | /* 64-bit Process MSR values */ | 126 | /* 64-bit Process MSR values */ |
128 | #ifdef CONFIG_PPC_BOOK3S_64 | 127 | #ifdef CONFIG_PPC_BOOK3S_64 |
129 | vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; | 128 | smsr |= MSR_ISF | MSR_HV; |
130 | #endif | 129 | #endif |
130 | vcpu->arch.shadow_msr = smsr; | ||
131 | } | 131 | } |
132 | 132 | ||
133 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 133 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
134 | { | 134 | { |
135 | ulong old_msr = vcpu->arch.msr; | 135 | ulong old_msr = vcpu->arch.shared->msr; |
136 | 136 | ||
137 | #ifdef EXIT_DEBUG | 137 | #ifdef EXIT_DEBUG |
138 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 138 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | msr &= to_book3s(vcpu)->msr_mask; | 141 | msr &= to_book3s(vcpu)->msr_mask; |
142 | vcpu->arch.msr = msr; | 142 | vcpu->arch.shared->msr = msr; |
143 | kvmppc_recalc_shadow_msr(vcpu); | 143 | kvmppc_recalc_shadow_msr(vcpu); |
144 | 144 | ||
145 | if (msr & (MSR_WE|MSR_POW)) { | 145 | if (msr & (MSR_WE|MSR_POW)) { |
@@ -149,21 +149,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
149 | } | 149 | } |
150 | } | 150 | } |
151 | 151 | ||
152 | if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != | 152 | if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != |
153 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | 153 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
154 | kvmppc_mmu_flush_segments(vcpu); | 154 | kvmppc_mmu_flush_segments(vcpu); |
155 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 155 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
156 | } | 156 | } |
157 | 157 | ||
158 | /* Preload FPU if it's enabled */ | 158 | /* Preload FPU if it's enabled */ |
159 | if (vcpu->arch.msr & MSR_FP) | 159 | if (vcpu->arch.shared->msr & MSR_FP) |
160 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 160 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
161 | } | 161 | } |
162 | 162 | ||
163 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 163 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
164 | { | 164 | { |
165 | vcpu->arch.srr0 = kvmppc_get_pc(vcpu); | 165 | vcpu->arch.srr0 = kvmppc_get_pc(vcpu); |
166 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | 166 | vcpu->arch.srr1 = vcpu->arch.shared->msr | flags; |
167 | kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); | 167 | kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); |
168 | vcpu->arch.mmu.reset_msr(vcpu); | 168 | vcpu->arch.mmu.reset_msr(vcpu); |
169 | } | 169 | } |
@@ -254,11 +254,11 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
254 | 254 | ||
255 | switch (priority) { | 255 | switch (priority) { |
256 | case BOOK3S_IRQPRIO_DECREMENTER: | 256 | case BOOK3S_IRQPRIO_DECREMENTER: |
257 | deliver = vcpu->arch.msr & MSR_EE; | 257 | deliver = vcpu->arch.shared->msr & MSR_EE; |
258 | vec = BOOK3S_INTERRUPT_DECREMENTER; | 258 | vec = BOOK3S_INTERRUPT_DECREMENTER; |
259 | break; | 259 | break; |
260 | case BOOK3S_IRQPRIO_EXTERNAL: | 260 | case BOOK3S_IRQPRIO_EXTERNAL: |
261 | deliver = vcpu->arch.msr & MSR_EE; | 261 | deliver = vcpu->arch.shared->msr & MSR_EE; |
262 | vec = BOOK3S_INTERRUPT_EXTERNAL; | 262 | vec = BOOK3S_INTERRUPT_EXTERNAL; |
263 | break; | 263 | break; |
264 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | 264 | case BOOK3S_IRQPRIO_SYSTEM_RESET: |
@@ -437,7 +437,7 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |||
437 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 437 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, |
438 | struct kvmppc_pte *pte) | 438 | struct kvmppc_pte *pte) |
439 | { | 439 | { |
440 | int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); | 440 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); |
441 | int r; | 441 | int r; |
442 | 442 | ||
443 | if (relocated) { | 443 | if (relocated) { |
@@ -545,8 +545,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
545 | int page_found = 0; | 545 | int page_found = 0; |
546 | struct kvmppc_pte pte; | 546 | struct kvmppc_pte pte; |
547 | bool is_mmio = false; | 547 | bool is_mmio = false; |
548 | bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; | 548 | bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; |
549 | bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; | 549 | bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; |
550 | u64 vsid; | 550 | u64 vsid; |
551 | 551 | ||
552 | relocated = data ? dr : ir; | 552 | relocated = data ? dr : ir; |
@@ -563,7 +563,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
563 | pte.vpage = eaddr >> 12; | 563 | pte.vpage = eaddr >> 12; |
564 | } | 564 | } |
565 | 565 | ||
566 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 566 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
567 | case 0: | 567 | case 0: |
568 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | 568 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); |
569 | break; | 569 | break; |
@@ -571,7 +571,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
571 | case MSR_IR: | 571 | case MSR_IR: |
572 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | 572 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
573 | 573 | ||
574 | if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) | 574 | if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) |
575 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | 575 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
576 | else | 576 | else |
577 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | 577 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); |
@@ -596,14 +596,16 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
596 | /* Page not found in guest PTE entries */ | 596 | /* Page not found in guest PTE entries */ |
597 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 597 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
598 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; | 598 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; |
599 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 599 | vcpu->arch.shared->msr |= |
600 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | ||
600 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 601 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
601 | } else if (page_found == -EPERM) { | 602 | } else if (page_found == -EPERM) { |
602 | /* Storage protection */ | 603 | /* Storage protection */ |
603 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 604 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
604 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; | 605 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; |
605 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | 606 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; |
606 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 607 | vcpu->arch.shared->msr |= |
608 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | ||
607 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 609 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
608 | } else if (page_found == -EINVAL) { | 610 | } else if (page_found == -EINVAL) { |
609 | /* Page not found in guest SLB */ | 611 | /* Page not found in guest SLB */ |
@@ -695,9 +697,11 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | |||
695 | 697 | ||
696 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | 698 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); |
697 | if (ret == -ENOENT) { | 699 | if (ret == -ENOENT) { |
698 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); | 700 | ulong msr = vcpu->arch.shared->msr; |
699 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); | 701 | |
700 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); | 702 | msr = kvmppc_set_field(msr, 33, 33, 1); |
703 | msr = kvmppc_set_field(msr, 34, 36, 0); | ||
704 | vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); | ||
701 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | 705 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); |
702 | return EMULATE_AGAIN; | 706 | return EMULATE_AGAIN; |
703 | } | 707 | } |
@@ -736,7 +740,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
736 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | 740 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) |
737 | return RESUME_GUEST; | 741 | return RESUME_GUEST; |
738 | 742 | ||
739 | if (!(vcpu->arch.msr & msr)) { | 743 | if (!(vcpu->arch.shared->msr & msr)) { |
740 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 744 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
741 | return RESUME_GUEST; | 745 | return RESUME_GUEST; |
742 | } | 746 | } |
@@ -804,7 +808,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
804 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | 808 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) |
805 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | 809 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", |
806 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), | 810 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), |
807 | vcpu->arch.msr); | 811 | vcpu->arch.shared->msr); |
808 | #endif | 812 | #endif |
809 | kvm_resched(vcpu); | 813 | kvm_resched(vcpu); |
810 | switch (exit_nr) { | 814 | switch (exit_nr) { |
@@ -836,7 +840,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
836 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 840 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
837 | r = RESUME_GUEST; | 841 | r = RESUME_GUEST; |
838 | } else { | 842 | } else { |
839 | vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | 843 | vcpu->arch.shared->msr |= |
844 | to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | ||
840 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 845 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
841 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 846 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
842 | r = RESUME_GUEST; | 847 | r = RESUME_GUEST; |
@@ -904,7 +909,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
904 | program_interrupt: | 909 | program_interrupt: |
905 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; | 910 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; |
906 | 911 | ||
907 | if (vcpu->arch.msr & MSR_PR) { | 912 | if (vcpu->arch.shared->msr & MSR_PR) { |
908 | #ifdef EXIT_DEBUG | 913 | #ifdef EXIT_DEBUG |
909 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | 914 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); |
910 | #endif | 915 | #endif |
@@ -1052,7 +1057,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1052 | regs->ctr = kvmppc_get_ctr(vcpu); | 1057 | regs->ctr = kvmppc_get_ctr(vcpu); |
1053 | regs->lr = kvmppc_get_lr(vcpu); | 1058 | regs->lr = kvmppc_get_lr(vcpu); |
1054 | regs->xer = kvmppc_get_xer(vcpu); | 1059 | regs->xer = kvmppc_get_xer(vcpu); |
1055 | regs->msr = vcpu->arch.msr; | 1060 | regs->msr = vcpu->arch.shared->msr; |
1056 | regs->srr0 = vcpu->arch.srr0; | 1061 | regs->srr0 = vcpu->arch.srr0; |
1057 | regs->srr1 = vcpu->arch.srr1; | 1062 | regs->srr1 = vcpu->arch.srr1; |
1058 | regs->pid = vcpu->arch.pid; | 1063 | regs->pid = vcpu->arch.pid; |
@@ -1353,7 +1358,7 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1353 | local_irq_enable(); | 1358 | local_irq_enable(); |
1354 | 1359 | ||
1355 | /* Preload FPU if it's enabled */ | 1360 | /* Preload FPU if it's enabled */ |
1356 | if (vcpu->arch.msr & MSR_FP) | 1361 | if (vcpu->arch.shared->msr & MSR_FP) |
1357 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 1362 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1358 | 1363 | ||
1359 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); | 1364 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); |