diff options
author | Anton Blanchard <anton@samba.org> | 2014-01-08 05:25:30 -0500 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-01-27 10:01:16 -0500 |
commit | d682916a381ac7c8eb965c10ab64bc7cc2f18647 (patch) | |
tree | d207502175f9b6b6c48988b25200efe859008399 /arch/powerpc | |
parent | 8563bf52d509213e746295341ab52896b562ca5e (diff) |
KVM: PPC: Book3S HV: Basic little-endian guest support
We create a guest MSR from scratch when delivering exceptions in
a few places. Instead of extracting LPCR[ILE] and inserting it
into MSR_LE each time, we simply create a new variable intr_msr which
contains the entire MSR to use. For a little-endian guest, userspace
needs to set the ILE (interrupt little-endian) bit in the LPCR for
each vcpu (or at least one vcpu in each virtual core).
[paulus@samba.org - removed H_SET_MODE implementation from original
version of the patch, and made kvmppc_set_lpcr update vcpu->arch.intr_msr.]
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 22 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 15 |
5 files changed, 30 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d161bc09153b..7726a3bc8ff0 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -636,6 +636,7 @@ struct kvm_vcpu_arch { | |||
636 | spinlock_t tbacct_lock; | 636 | spinlock_t tbacct_lock; |
637 | u64 busy_stolen; | 637 | u64 busy_stolen; |
638 | u64 busy_preempt; | 638 | u64 busy_preempt; |
639 | unsigned long intr_msr; | ||
639 | #endif | 640 | #endif |
640 | }; | 641 | }; |
641 | 642 | ||
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 239a857f1141..a60a2fdae5bd 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -480,6 +480,7 @@ int main(void) | |||
480 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); | 480 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); |
481 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); | 481 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); |
482 | DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); | 482 | DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); |
483 | DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); | ||
483 | #endif | 484 | #endif |
484 | #ifdef CONFIG_PPC_BOOK3S | 485 | #ifdef CONFIG_PPC_BOOK3S |
485 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); | 486 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index efb8aa544876..22cc895333e6 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void) | |||
262 | 262 | ||
263 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) | 263 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) |
264 | { | 264 | { |
265 | kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); | 265 | kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); |
266 | } | 266 | } |
267 | 267 | ||
268 | /* | 268 | /* |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index eb4eed4a7173..3195e4f8a2ed 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -788,6 +788,27 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) | |||
788 | 788 | ||
789 | spin_lock(&vc->lock); | 789 | spin_lock(&vc->lock); |
790 | /* | 790 | /* |
791 | * If ILE (interrupt little-endian) has changed, update the | ||
792 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. | ||
793 | */ | ||
794 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { | ||
795 | struct kvm *kvm = vcpu->kvm; | ||
796 | struct kvm_vcpu *vcpu; | ||
797 | int i; | ||
798 | |||
799 | mutex_lock(&kvm->lock); | ||
800 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
801 | if (vcpu->arch.vcore != vc) | ||
802 | continue; | ||
803 | if (new_lpcr & LPCR_ILE) | ||
804 | vcpu->arch.intr_msr |= MSR_LE; | ||
805 | else | ||
806 | vcpu->arch.intr_msr &= ~MSR_LE; | ||
807 | } | ||
808 | mutex_unlock(&kvm->lock); | ||
809 | } | ||
810 | |||
811 | /* | ||
791 | * Userspace can only modify DPFD (default prefetch depth), | 812 | * Userspace can only modify DPFD (default prefetch depth), |
792 | * ILE (interrupt little-endian) and TC (translation control). | 813 | * ILE (interrupt little-endian) and TC (translation control). |
793 | * On POWER8 userspace can also modify AIL (alt. interrupt loc.) | 814 | * On POWER8 userspace can also modify AIL (alt. interrupt loc.) |
@@ -1155,6 +1176,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, | |||
1155 | spin_lock_init(&vcpu->arch.vpa_update_lock); | 1176 | spin_lock_init(&vcpu->arch.vpa_update_lock); |
1156 | spin_lock_init(&vcpu->arch.tbacct_lock); | 1177 | spin_lock_init(&vcpu->arch.tbacct_lock); |
1157 | vcpu->arch.busy_preempt = TB_NIL; | 1178 | vcpu->arch.busy_preempt = TB_NIL; |
1179 | vcpu->arch.intr_msr = MSR_SF | MSR_ME; | ||
1158 | 1180 | ||
1159 | kvmppc_mmu_book3s_hv_init(vcpu); | 1181 | kvmppc_mmu_book3s_hv_init(vcpu); |
1160 | 1182 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 56299349e94b..ecb76357c9d0 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -812,8 +812,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
812 | 12: mtspr SPRN_SRR0, r10 | 812 | 12: mtspr SPRN_SRR0, r10 |
813 | mr r10,r0 | 813 | mr r10,r0 |
814 | mtspr SPRN_SRR1, r11 | 814 | mtspr SPRN_SRR1, r11 |
815 | li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ | 815 | ld r11, VCPU_INTR_MSR(r4) |
816 | rotldi r11,r11,63 | ||
817 | 5: | 816 | 5: |
818 | 817 | ||
819 | /* | 818 | /* |
@@ -1551,8 +1550,7 @@ kvmppc_hdsi: | |||
1551 | mtspr SPRN_SRR0, r10 | 1550 | mtspr SPRN_SRR0, r10 |
1552 | mtspr SPRN_SRR1, r11 | 1551 | mtspr SPRN_SRR1, r11 |
1553 | li r10, BOOK3S_INTERRUPT_DATA_STORAGE | 1552 | li r10, BOOK3S_INTERRUPT_DATA_STORAGE |
1554 | li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ | 1553 | ld r11, VCPU_INTR_MSR(r9) |
1555 | rotldi r11, r11, 63 | ||
1556 | fast_interrupt_c_return: | 1554 | fast_interrupt_c_return: |
1557 | 6: ld r7, VCPU_CTR(r9) | 1555 | 6: ld r7, VCPU_CTR(r9) |
1558 | lwz r8, VCPU_XER(r9) | 1556 | lwz r8, VCPU_XER(r9) |
@@ -1621,8 +1619,7 @@ kvmppc_hisi: | |||
1621 | 1: mtspr SPRN_SRR0, r10 | 1619 | 1: mtspr SPRN_SRR0, r10 |
1622 | mtspr SPRN_SRR1, r11 | 1620 | mtspr SPRN_SRR1, r11 |
1623 | li r10, BOOK3S_INTERRUPT_INST_STORAGE | 1621 | li r10, BOOK3S_INTERRUPT_INST_STORAGE |
1624 | li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ | 1622 | ld r11, VCPU_INTR_MSR(r9) |
1625 | rotldi r11, r11, 63 | ||
1626 | b fast_interrupt_c_return | 1623 | b fast_interrupt_c_return |
1627 | 1624 | ||
1628 | 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ | 1625 | 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ |
@@ -1665,8 +1662,7 @@ sc_1_fast_return: | |||
1665 | mtspr SPRN_SRR0,r10 | 1662 | mtspr SPRN_SRR0,r10 |
1666 | mtspr SPRN_SRR1,r11 | 1663 | mtspr SPRN_SRR1,r11 |
1667 | li r10, BOOK3S_INTERRUPT_SYSCALL | 1664 | li r10, BOOK3S_INTERRUPT_SYSCALL |
1668 | li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ | 1665 | ld r11, VCPU_INTR_MSR(r9) |
1669 | rotldi r11, r11, 63 | ||
1670 | mr r4,r9 | 1666 | mr r4,r9 |
1671 | b fast_guest_return | 1667 | b fast_guest_return |
1672 | 1668 | ||
@@ -1994,8 +1990,7 @@ machine_check_realmode: | |||
1994 | beq mc_cont | 1990 | beq mc_cont |
1995 | /* If not, deliver a machine check. SRR0/1 are already set */ | 1991 | /* If not, deliver a machine check. SRR0/1 are already set */ |
1996 | li r10, BOOK3S_INTERRUPT_MACHINE_CHECK | 1992 | li r10, BOOK3S_INTERRUPT_MACHINE_CHECK |
1997 | li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ | 1993 | ld r11, VCPU_INTR_MSR(r9) |
1998 | rotldi r11, r11, 63 | ||
1999 | b fast_interrupt_c_return | 1994 | b fast_interrupt_c_return |
2000 | 1995 | ||
2001 | /* | 1996 | /* |