diff options
author | Alexander Graf <agraf@suse.de> | 2010-01-15 08:49:11 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:35:52 -0500 |
commit | 180a34d2d3fda0151154f9cead4aab9dddd3d0c1 (patch) | |
tree | d55552acc176645c374496b988823071e50bca23 /arch/powerpc/kvm | |
parent | d5e528136cda31a32ff7d1eaa8d06220eb443781 (diff) |
KVM: PPC: Add support for FPU/Altivec/VSX
When our guest starts using either the FPU, Altivec or VSX we need to make
sure Linux knows about it and sneak into its process switching code
accordingly.
This patch makes accesses to the above parts of the system work inside the
VM.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 193 |
1 files changed, 188 insertions, 5 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 02861fda73da..2cb181396f82 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -33,6 +33,9 @@ | |||
33 | 33 | ||
34 | /* #define EXIT_DEBUG */ | 34 | /* #define EXIT_DEBUG */ |
35 | /* #define EXIT_DEBUG_SIMPLE */ | 35 | /* #define EXIT_DEBUG_SIMPLE */ |
36 | /* #define DEBUG_EXT */ | ||
37 | |||
38 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | ||
36 | 39 | ||
37 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 40 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
38 | { "exits", VCPU_STAT(sum_exits) }, | 41 | { "exits", VCPU_STAT(sum_exits) }, |
@@ -77,6 +80,10 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |||
77 | memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | 80 | memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, |
78 | sizeof(get_paca()->shadow_vcpu)); | 81 | sizeof(get_paca()->shadow_vcpu)); |
79 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; | 82 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; |
83 | |||
84 | kvmppc_giveup_ext(vcpu, MSR_FP); | ||
85 | kvmppc_giveup_ext(vcpu, MSR_VEC); | ||
86 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
80 | } | 87 | } |
81 | 88 | ||
82 | #if defined(EXIT_DEBUG) | 89 | #if defined(EXIT_DEBUG) |
@@ -97,9 +104,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
97 | msr &= to_book3s(vcpu)->msr_mask; | 104 | msr &= to_book3s(vcpu)->msr_mask; |
98 | vcpu->arch.msr = msr; | 105 | vcpu->arch.msr = msr; |
99 | vcpu->arch.shadow_msr = msr | MSR_USER32; | 106 | vcpu->arch.shadow_msr = msr | MSR_USER32; |
100 | vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 | | 107 | vcpu->arch.shadow_msr &= (MSR_FE0 | MSR_USER64 | MSR_SE | MSR_BE | |
101 | MSR_USER64 | MSR_SE | MSR_BE | MSR_DE | | 108 | MSR_DE | MSR_FE1); |
102 | MSR_FE1); | 109 | vcpu->arch.shadow_msr |= (msr & vcpu->arch.guest_owned_ext); |
103 | 110 | ||
104 | if (msr & (MSR_WE|MSR_POW)) { | 111 | if (msr & (MSR_WE|MSR_POW)) { |
105 | if (!vcpu->arch.pending_exceptions) { | 112 | if (!vcpu->arch.pending_exceptions) { |
@@ -551,6 +558,117 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
551 | return r; | 558 | return r; |
552 | } | 559 | } |
553 | 560 | ||
561 | static inline int get_fpr_index(int i) | ||
562 | { | ||
563 | #ifdef CONFIG_VSX | ||
564 | i *= 2; | ||
565 | #endif | ||
566 | return i; | ||
567 | } | ||
568 | |||
569 | /* Give up external provider (FPU, Altivec, VSX) */ | ||
570 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | ||
571 | { | ||
572 | struct thread_struct *t = ¤t->thread; | ||
573 | u64 *vcpu_fpr = vcpu->arch.fpr; | ||
574 | u64 *vcpu_vsx = vcpu->arch.vsr; | ||
575 | u64 *thread_fpr = (u64*)t->fpr; | ||
576 | int i; | ||
577 | |||
578 | if (!(vcpu->arch.guest_owned_ext & msr)) | ||
579 | return; | ||
580 | |||
581 | #ifdef DEBUG_EXT | ||
582 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | ||
583 | #endif | ||
584 | |||
585 | switch (msr) { | ||
586 | case MSR_FP: | ||
587 | giveup_fpu(current); | ||
588 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | ||
589 | vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; | ||
590 | |||
591 | vcpu->arch.fpscr = t->fpscr.val; | ||
592 | break; | ||
593 | case MSR_VEC: | ||
594 | #ifdef CONFIG_ALTIVEC | ||
595 | giveup_altivec(current); | ||
596 | memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); | ||
597 | vcpu->arch.vscr = t->vscr; | ||
598 | #endif | ||
599 | break; | ||
600 | case MSR_VSX: | ||
601 | #ifdef CONFIG_VSX | ||
602 | __giveup_vsx(current); | ||
603 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | ||
604 | vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; | ||
605 | #endif | ||
606 | break; | ||
607 | default: | ||
608 | BUG(); | ||
609 | } | ||
610 | |||
611 | vcpu->arch.guest_owned_ext &= ~msr; | ||
612 | current->thread.regs->msr &= ~msr; | ||
613 | kvmppc_set_msr(vcpu, vcpu->arch.msr); | ||
614 | } | ||
615 | |||
616 | /* Handle external providers (FPU, Altivec, VSX) */ | ||
617 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | ||
618 | ulong msr) | ||
619 | { | ||
620 | struct thread_struct *t = ¤t->thread; | ||
621 | u64 *vcpu_fpr = vcpu->arch.fpr; | ||
622 | u64 *vcpu_vsx = vcpu->arch.vsr; | ||
623 | u64 *thread_fpr = (u64*)t->fpr; | ||
624 | int i; | ||
625 | |||
626 | if (!(vcpu->arch.msr & msr)) { | ||
627 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
628 | return RESUME_GUEST; | ||
629 | } | ||
630 | |||
631 | #ifdef DEBUG_EXT | ||
632 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | ||
633 | #endif | ||
634 | |||
635 | current->thread.regs->msr |= msr; | ||
636 | |||
637 | switch (msr) { | ||
638 | case MSR_FP: | ||
639 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | ||
640 | thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; | ||
641 | |||
642 | t->fpscr.val = vcpu->arch.fpscr; | ||
643 | t->fpexc_mode = 0; | ||
644 | kvmppc_load_up_fpu(); | ||
645 | break; | ||
646 | case MSR_VEC: | ||
647 | #ifdef CONFIG_ALTIVEC | ||
648 | memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | ||
649 | t->vscr = vcpu->arch.vscr; | ||
650 | t->vrsave = -1; | ||
651 | kvmppc_load_up_altivec(); | ||
652 | #endif | ||
653 | break; | ||
654 | case MSR_VSX: | ||
655 | #ifdef CONFIG_VSX | ||
656 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | ||
657 | thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; | ||
658 | kvmppc_load_up_vsx(); | ||
659 | #endif | ||
660 | break; | ||
661 | default: | ||
662 | BUG(); | ||
663 | } | ||
664 | |||
665 | vcpu->arch.guest_owned_ext |= msr; | ||
666 | |||
667 | kvmppc_set_msr(vcpu, vcpu->arch.msr); | ||
668 | |||
669 | return RESUME_GUEST; | ||
670 | } | ||
671 | |||
554 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 672 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
555 | unsigned int exit_nr) | 673 | unsigned int exit_nr) |
556 | { | 674 | { |
@@ -674,11 +792,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
674 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 792 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
675 | r = RESUME_GUEST; | 793 | r = RESUME_GUEST; |
676 | break; | 794 | break; |
677 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | ||
678 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | 795 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
679 | case BOOK3S_INTERRUPT_TRACE: | 796 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP); |
797 | break; | ||
680 | case BOOK3S_INTERRUPT_ALTIVEC: | 798 | case BOOK3S_INTERRUPT_ALTIVEC: |
799 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC); | ||
800 | break; | ||
681 | case BOOK3S_INTERRUPT_VSX: | 801 | case BOOK3S_INTERRUPT_VSX: |
802 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); | ||
803 | break; | ||
804 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | ||
805 | case BOOK3S_INTERRUPT_TRACE: | ||
682 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 806 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
683 | r = RESUME_GUEST; | 807 | r = RESUME_GUEST; |
684 | break; | 808 | break; |
@@ -959,6 +1083,10 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | |||
959 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 1083 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
960 | { | 1084 | { |
961 | int ret; | 1085 | int ret; |
1086 | struct thread_struct ext_bkp; | ||
1087 | bool save_vec = current->thread.used_vr; | ||
1088 | bool save_vsx = current->thread.used_vsr; | ||
1089 | ulong ext_msr; | ||
962 | 1090 | ||
963 | /* No need to go into the guest when all we do is going out */ | 1091 | /* No need to go into the guest when all we do is going out */ |
964 | if (signal_pending(current)) { | 1092 | if (signal_pending(current)) { |
@@ -966,6 +1094,35 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
966 | return -EINTR; | 1094 | return -EINTR; |
967 | } | 1095 | } |
968 | 1096 | ||
1097 | /* Save FPU state in stack */ | ||
1098 | if (current->thread.regs->msr & MSR_FP) | ||
1099 | giveup_fpu(current); | ||
1100 | memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); | ||
1101 | ext_bkp.fpscr = current->thread.fpscr; | ||
1102 | ext_bkp.fpexc_mode = current->thread.fpexc_mode; | ||
1103 | |||
1104 | #ifdef CONFIG_ALTIVEC | ||
1105 | /* Save Altivec state in stack */ | ||
1106 | if (save_vec) { | ||
1107 | if (current->thread.regs->msr & MSR_VEC) | ||
1108 | giveup_altivec(current); | ||
1109 | memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); | ||
1110 | ext_bkp.vscr = current->thread.vscr; | ||
1111 | ext_bkp.vrsave = current->thread.vrsave; | ||
1112 | } | ||
1113 | ext_bkp.used_vr = current->thread.used_vr; | ||
1114 | #endif | ||
1115 | |||
1116 | #ifdef CONFIG_VSX | ||
1117 | /* Save VSX state in stack */ | ||
1118 | if (save_vsx && (current->thread.regs->msr & MSR_VSX)) | ||
1119 | __giveup_vsx(current); | ||
1120 | ext_bkp.used_vsr = current->thread.used_vsr; | ||
1121 | #endif | ||
1122 | |||
1123 | /* Remember the MSR with disabled extensions */ | ||
1124 | ext_msr = current->thread.regs->msr; | ||
1125 | |||
969 | /* XXX we get called with irq disabled - change that! */ | 1126 | /* XXX we get called with irq disabled - change that! */ |
970 | local_irq_enable(); | 1127 | local_irq_enable(); |
971 | 1128 | ||
@@ -973,6 +1130,32 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
973 | 1130 | ||
974 | local_irq_disable(); | 1131 | local_irq_disable(); |
975 | 1132 | ||
1133 | current->thread.regs->msr = ext_msr; | ||
1134 | |||
1135 | /* Make sure we save the guest FPU/Altivec/VSX state */ | ||
1136 | kvmppc_giveup_ext(vcpu, MSR_FP); | ||
1137 | kvmppc_giveup_ext(vcpu, MSR_VEC); | ||
1138 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
1139 | |||
1140 | /* Restore FPU state from stack */ | ||
1141 | memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); | ||
1142 | current->thread.fpscr = ext_bkp.fpscr; | ||
1143 | current->thread.fpexc_mode = ext_bkp.fpexc_mode; | ||
1144 | |||
1145 | #ifdef CONFIG_ALTIVEC | ||
1146 | /* Restore Altivec state from stack */ | ||
1147 | if (save_vec && current->thread.used_vr) { | ||
1148 | memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); | ||
1149 | current->thread.vscr = ext_bkp.vscr; | ||
1150 | current->thread.vrsave= ext_bkp.vrsave; | ||
1151 | } | ||
1152 | current->thread.used_vr = ext_bkp.used_vr; | ||
1153 | #endif | ||
1154 | |||
1155 | #ifdef CONFIG_VSX | ||
1156 | current->thread.used_vsr = ext_bkp.used_vsr; | ||
1157 | #endif | ||
1158 | |||
976 | return ret; | 1159 | return ret; |
977 | } | 1160 | } |
978 | 1161 | ||