diff options
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r-- | arch/powerpc/kvm/booke.c | 132 |
1 files changed, 130 insertions, 2 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 8462b3a1c1c7..ee45fa01220e 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
14 | * | 14 | * |
15 | * Copyright IBM Corp. 2007 | 15 | * Copyright IBM Corp. 2007 |
16 | * Copyright 2010-2011 Freescale Semiconductor, Inc. | ||
16 | * | 17 | * |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | 19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
@@ -78,6 +79,60 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
78 | } | 79 | } |
79 | } | 80 | } |
80 | 81 | ||
82 | #ifdef CONFIG_SPE | ||
83 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) | ||
84 | { | ||
85 | preempt_disable(); | ||
86 | enable_kernel_spe(); | ||
87 | kvmppc_save_guest_spe(vcpu); | ||
88 | vcpu->arch.shadow_msr &= ~MSR_SPE; | ||
89 | preempt_enable(); | ||
90 | } | ||
91 | |||
92 | static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) | ||
93 | { | ||
94 | preempt_disable(); | ||
95 | enable_kernel_spe(); | ||
96 | kvmppc_load_guest_spe(vcpu); | ||
97 | vcpu->arch.shadow_msr |= MSR_SPE; | ||
98 | preempt_enable(); | ||
99 | } | ||
100 | |||
101 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | ||
102 | { | ||
103 | if (vcpu->arch.shared->msr & MSR_SPE) { | ||
104 | if (!(vcpu->arch.shadow_msr & MSR_SPE)) | ||
105 | kvmppc_vcpu_enable_spe(vcpu); | ||
106 | } else if (vcpu->arch.shadow_msr & MSR_SPE) { | ||
107 | kvmppc_vcpu_disable_spe(vcpu); | ||
108 | } | ||
109 | } | ||
110 | #else | ||
111 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | ||
112 | { | ||
113 | } | ||
114 | #endif | ||
115 | |||
116 | /* | ||
117 | * Helper function for "full" MSR writes. No need to call this if only | ||
118 | * EE/CE/ME/DE/RI are changing. | ||
119 | */ | ||
120 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | ||
121 | { | ||
122 | u32 old_msr = vcpu->arch.shared->msr; | ||
123 | |||
124 | vcpu->arch.shared->msr = new_msr; | ||
125 | |||
126 | kvmppc_mmu_msr_notify(vcpu, old_msr); | ||
127 | |||
128 | if (vcpu->arch.shared->msr & MSR_WE) { | ||
129 | kvm_vcpu_block(vcpu); | ||
130 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | ||
131 | }; | ||
132 | |||
133 | kvmppc_vcpu_sync_spe(vcpu); | ||
134 | } | ||
135 | |||
81 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, | 136 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, |
82 | unsigned int priority) | 137 | unsigned int priority) |
83 | { | 138 | { |
@@ -257,6 +312,19 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
257 | vcpu->arch.shared->int_pending = 0; | 312 | vcpu->arch.shared->int_pending = 0; |
258 | } | 313 | } |
259 | 314 | ||
315 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
316 | { | ||
317 | int ret; | ||
318 | |||
319 | local_irq_disable(); | ||
320 | kvm_guest_enter(); | ||
321 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | ||
322 | kvm_guest_exit(); | ||
323 | local_irq_enable(); | ||
324 | |||
325 | return ret; | ||
326 | } | ||
327 | |||
260 | /** | 328 | /** |
261 | * kvmppc_handle_exit | 329 | * kvmppc_handle_exit |
262 | * | 330 | * |
@@ -344,10 +412,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
344 | r = RESUME_GUEST; | 412 | r = RESUME_GUEST; |
345 | break; | 413 | break; |
346 | 414 | ||
347 | case BOOKE_INTERRUPT_SPE_UNAVAIL: | 415 | #ifdef CONFIG_SPE |
348 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL); | 416 | case BOOKE_INTERRUPT_SPE_UNAVAIL: { |
417 | if (vcpu->arch.shared->msr & MSR_SPE) | ||
418 | kvmppc_vcpu_enable_spe(vcpu); | ||
419 | else | ||
420 | kvmppc_booke_queue_irqprio(vcpu, | ||
421 | BOOKE_IRQPRIO_SPE_UNAVAIL); | ||
349 | r = RESUME_GUEST; | 422 | r = RESUME_GUEST; |
350 | break; | 423 | break; |
424 | } | ||
351 | 425 | ||
352 | case BOOKE_INTERRUPT_SPE_FP_DATA: | 426 | case BOOKE_INTERRUPT_SPE_FP_DATA: |
353 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); | 427 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); |
@@ -358,6 +432,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
358 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); | 432 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); |
359 | r = RESUME_GUEST; | 433 | r = RESUME_GUEST; |
360 | break; | 434 | break; |
435 | #else | ||
436 | case BOOKE_INTERRUPT_SPE_UNAVAIL: | ||
437 | /* | ||
438 | * Guest wants SPE, but host kernel doesn't support it. Send | ||
439 | * an "unimplemented operation" program check to the guest. | ||
440 | */ | ||
441 | kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); | ||
442 | r = RESUME_GUEST; | ||
443 | break; | ||
444 | |||
445 | /* | ||
446 | * These really should never happen without CONFIG_SPE, | ||
447 | * as we should never enable the real MSR[SPE] in the guest. | ||
448 | */ | ||
449 | case BOOKE_INTERRUPT_SPE_FP_DATA: | ||
450 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | ||
451 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", | ||
452 | __func__, exit_nr, vcpu->arch.pc); | ||
453 | run->hw.hardware_exit_reason = exit_nr; | ||
454 | r = RESUME_HOST; | ||
455 | break; | ||
456 | #endif | ||
361 | 457 | ||
362 | case BOOKE_INTERRUPT_DATA_STORAGE: | 458 | case BOOKE_INTERRUPT_DATA_STORAGE: |
363 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, | 459 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, |
@@ -392,6 +488,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
392 | gpa_t gpaddr; | 488 | gpa_t gpaddr; |
393 | gfn_t gfn; | 489 | gfn_t gfn; |
394 | 490 | ||
491 | #ifdef CONFIG_KVM_E500 | ||
492 | if (!(vcpu->arch.shared->msr & MSR_PR) && | ||
493 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | ||
494 | kvmppc_map_magic(vcpu); | ||
495 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); | ||
496 | r = RESUME_GUEST; | ||
497 | |||
498 | break; | ||
499 | } | ||
500 | #endif | ||
501 | |||
395 | /* Check the guest TLB. */ | 502 | /* Check the guest TLB. */ |
396 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); | 503 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); |
397 | if (gtlb_index < 0) { | 504 | if (gtlb_index < 0) { |
@@ -514,6 +621,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
514 | 621 | ||
515 | vcpu->arch.pc = 0; | 622 | vcpu->arch.pc = 0; |
516 | vcpu->arch.shared->msr = 0; | 623 | vcpu->arch.shared->msr = 0; |
624 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | ||
517 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 625 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
518 | 626 | ||
519 | vcpu->arch.shadow_pid = 1; | 627 | vcpu->arch.shadow_pid = 1; |
@@ -770,6 +878,26 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | |||
770 | return -ENOTSUPP; | 878 | return -ENOTSUPP; |
771 | } | 879 | } |
772 | 880 | ||
881 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | ||
882 | struct kvm_userspace_memory_region *mem) | ||
883 | { | ||
884 | return 0; | ||
885 | } | ||
886 | |||
887 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | ||
888 | struct kvm_userspace_memory_region *mem) | ||
889 | { | ||
890 | } | ||
891 | |||
892 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
893 | { | ||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
898 | { | ||
899 | } | ||
900 | |||
773 | int __init kvmppc_booke_init(void) | 901 | int __init kvmppc_booke_init(void) |
774 | { | 902 | { |
775 | unsigned long ivor[16]; | 903 | unsigned long ivor[16]; |