diff options
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r-- | arch/powerpc/kvm/booke.c | 74 |
1 files changed, 73 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 91e7217db9d9..8ace6120ef9b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -168,6 +168,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) | |||
168 | #endif | 168 | #endif |
169 | } | 169 | } |
170 | 170 | ||
171 | /* | ||
172 | * Simulate AltiVec unavailable fault to load guest state | ||
173 | * from thread to AltiVec unit. | ||
174 | * It requires to be called with preemption disabled. | ||
175 | */ | ||
176 | static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) | ||
177 | { | ||
178 | #ifdef CONFIG_ALTIVEC | ||
179 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) { | ||
180 | if (!(current->thread.regs->msr & MSR_VEC)) { | ||
181 | enable_kernel_altivec(); | ||
182 | load_vr_state(&vcpu->arch.vr); | ||
183 | current->thread.vr_save_area = &vcpu->arch.vr; | ||
184 | current->thread.regs->msr |= MSR_VEC; | ||
185 | } | ||
186 | } | ||
187 | #endif | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Save guest vcpu AltiVec state into thread. | ||
192 | * It requires to be called with preemption disabled. | ||
193 | */ | ||
194 | static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu) | ||
195 | { | ||
196 | #ifdef CONFIG_ALTIVEC | ||
197 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) { | ||
198 | if (current->thread.regs->msr & MSR_VEC) | ||
199 | giveup_altivec(current); | ||
200 | current->thread.vr_save_area = NULL; | ||
201 | } | ||
202 | #endif | ||
203 | } | ||
204 | |||
171 | static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) | 205 | static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) |
172 | { | 206 | { |
173 | /* Synchronize guest's desire to get debug interrupts into shadow MSR */ | 207 | /* Synchronize guest's desire to get debug interrupts into shadow MSR */ |
@@ -375,9 +409,15 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
375 | case BOOKE_IRQPRIO_ITLB_MISS: | 409 | case BOOKE_IRQPRIO_ITLB_MISS: |
376 | case BOOKE_IRQPRIO_SYSCALL: | 410 | case BOOKE_IRQPRIO_SYSCALL: |
377 | case BOOKE_IRQPRIO_FP_UNAVAIL: | 411 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
412 | #ifdef CONFIG_SPE_POSSIBLE | ||
378 | case BOOKE_IRQPRIO_SPE_UNAVAIL: | 413 | case BOOKE_IRQPRIO_SPE_UNAVAIL: |
379 | case BOOKE_IRQPRIO_SPE_FP_DATA: | 414 | case BOOKE_IRQPRIO_SPE_FP_DATA: |
380 | case BOOKE_IRQPRIO_SPE_FP_ROUND: | 415 | case BOOKE_IRQPRIO_SPE_FP_ROUND: |
416 | #endif | ||
417 | #ifdef CONFIG_ALTIVEC | ||
418 | case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL: | ||
419 | case BOOKE_IRQPRIO_ALTIVEC_ASSIST: | ||
420 | #endif | ||
381 | case BOOKE_IRQPRIO_AP_UNAVAIL: | 421 | case BOOKE_IRQPRIO_AP_UNAVAIL: |
382 | allowed = 1; | 422 | allowed = 1; |
383 | msr_mask = MSR_CE | MSR_ME | MSR_DE; | 423 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
@@ -697,6 +737,17 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
697 | kvmppc_load_guest_fp(vcpu); | 737 | kvmppc_load_guest_fp(vcpu); |
698 | #endif | 738 | #endif |
699 | 739 | ||
740 | #ifdef CONFIG_ALTIVEC | ||
741 | /* Save userspace AltiVec state in stack */ | ||
742 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
743 | enable_kernel_altivec(); | ||
744 | /* | ||
745 | * Since we can't trap on MSR_VEC in GS-mode, we consider the guest | ||
746 | * as always using the AltiVec. | ||
747 | */ | ||
748 | kvmppc_load_guest_altivec(vcpu); | ||
749 | #endif | ||
750 | |||
700 | /* Switch to guest debug context */ | 751 | /* Switch to guest debug context */ |
701 | debug = vcpu->arch.dbg_reg; | 752 | debug = vcpu->arch.dbg_reg; |
702 | switch_booke_debug_regs(&debug); | 753 | switch_booke_debug_regs(&debug); |
@@ -719,6 +770,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
719 | kvmppc_save_guest_fp(vcpu); | 770 | kvmppc_save_guest_fp(vcpu); |
720 | #endif | 771 | #endif |
721 | 772 | ||
773 | #ifdef CONFIG_ALTIVEC | ||
774 | kvmppc_save_guest_altivec(vcpu); | ||
775 | #endif | ||
776 | |||
722 | out: | 777 | out: |
723 | vcpu->mode = OUTSIDE_GUEST_MODE; | 778 | vcpu->mode = OUTSIDE_GUEST_MODE; |
724 | return ret; | 779 | return ret; |
@@ -1025,7 +1080,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1025 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); | 1080 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); |
1026 | r = RESUME_GUEST; | 1081 | r = RESUME_GUEST; |
1027 | break; | 1082 | break; |
1028 | #else | 1083 | #elif defined(CONFIG_SPE_POSSIBLE) |
1029 | case BOOKE_INTERRUPT_SPE_UNAVAIL: | 1084 | case BOOKE_INTERRUPT_SPE_UNAVAIL: |
1030 | /* | 1085 | /* |
1031 | * Guest wants SPE, but host kernel doesn't support it. Send | 1086 | * Guest wants SPE, but host kernel doesn't support it. Send |
@@ -1046,6 +1101,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1046 | run->hw.hardware_exit_reason = exit_nr; | 1101 | run->hw.hardware_exit_reason = exit_nr; |
1047 | r = RESUME_HOST; | 1102 | r = RESUME_HOST; |
1048 | break; | 1103 | break; |
1104 | #endif /* CONFIG_SPE_POSSIBLE */ | ||
1105 | |||
1106 | /* | ||
1107 | * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC, | ||
1108 | * see kvmppc_core_check_processor_compat(). | ||
1109 | */ | ||
1110 | #ifdef CONFIG_ALTIVEC | ||
1111 | case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL: | ||
1112 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); | ||
1113 | r = RESUME_GUEST; | ||
1114 | break; | ||
1115 | |||
1116 | case BOOKE_INTERRUPT_ALTIVEC_ASSIST: | ||
1117 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST); | ||
1118 | r = RESUME_GUEST; | ||
1119 | break; | ||
1049 | #endif | 1120 | #endif |
1050 | 1121 | ||
1051 | case BOOKE_INTERRUPT_DATA_STORAGE: | 1122 | case BOOKE_INTERRUPT_DATA_STORAGE: |
@@ -1223,6 +1294,7 @@ out: | |||
1223 | /* interrupts now hard-disabled */ | 1294 | /* interrupts now hard-disabled */ |
1224 | kvmppc_fix_ee_before_entry(); | 1295 | kvmppc_fix_ee_before_entry(); |
1225 | kvmppc_load_guest_fp(vcpu); | 1296 | kvmppc_load_guest_fp(vcpu); |
1297 | kvmppc_load_guest_altivec(vcpu); | ||
1226 | } | 1298 | } |
1227 | } | 1299 | } |
1228 | 1300 | ||