diff options
Diffstat (limited to 'arch/powerpc')
| -rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 10 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/kvm_fpu.h | 27 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 18 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/system.h | 3 | ||||
| -rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 4 | ||||
| -rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 3 | ||||
| -rw-r--r-- | arch/powerpc/kvm/Makefile | 2 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s.c | 79 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu.c | 8 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu_host.c | 134 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 129 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_mmu_hpte.c | 277 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_paired_singles.c | 94 | ||||
| -rw-r--r-- | arch/powerpc/kvm/booke.c | 12 | ||||
| -rw-r--r-- | arch/powerpc/kvm/fpu.S | 18 | ||||
| -rw-r--r-- | arch/powerpc/kvm/powerpc.c | 14 |
16 files changed, 443 insertions, 389 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 6f74d93725a0..8274a2d43925 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -115,7 +115,15 @@ extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); | |||
| 115 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | 115 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); |
| 116 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); | 116 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); |
| 117 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); | 117 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); |
| 118 | extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data); | 118 | |
| 119 | extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | ||
| 120 | extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); | ||
| 121 | extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); | ||
| 122 | extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); | ||
| 123 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | ||
| 124 | extern int kvmppc_mmu_hpte_sysinit(void); | ||
| 125 | extern void kvmppc_mmu_hpte_sysexit(void); | ||
| 126 | |||
| 119 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); | 127 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
| 120 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); | 128 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
| 121 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); | 129 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); |
diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h index 94f05de9ad04..c3d4f0518a67 100644 --- a/arch/powerpc/include/asm/kvm_fpu.h +++ b/arch/powerpc/include/asm/kvm_fpu.h | |||
| @@ -22,24 +22,24 @@ | |||
| 22 | 22 | ||
| 23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
| 24 | 24 | ||
| 25 | extern void fps_fres(struct thread_struct *t, u32 *dst, u32 *src1); | 25 | extern void fps_fres(u64 *fpscr, u32 *dst, u32 *src1); |
| 26 | extern void fps_frsqrte(struct thread_struct *t, u32 *dst, u32 *src1); | 26 | extern void fps_frsqrte(u64 *fpscr, u32 *dst, u32 *src1); |
| 27 | extern void fps_fsqrts(struct thread_struct *t, u32 *dst, u32 *src1); | 27 | extern void fps_fsqrts(u64 *fpscr, u32 *dst, u32 *src1); |
| 28 | 28 | ||
| 29 | extern void fps_fadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); | 29 | extern void fps_fadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); |
| 30 | extern void fps_fdivs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); | 30 | extern void fps_fdivs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); |
| 31 | extern void fps_fmuls(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); | 31 | extern void fps_fmuls(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); |
| 32 | extern void fps_fsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); | 32 | extern void fps_fsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); |
| 33 | 33 | ||
| 34 | extern void fps_fmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, | 34 | extern void fps_fmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, |
| 35 | u32 *src3); | 35 | u32 *src3); |
| 36 | extern void fps_fmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, | 36 | extern void fps_fmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, |
| 37 | u32 *src3); | 37 | u32 *src3); |
| 38 | extern void fps_fnmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, | 38 | extern void fps_fnmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, |
| 39 | u32 *src3); | 39 | u32 *src3); |
| 40 | extern void fps_fnmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, | 40 | extern void fps_fnmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, |
| 41 | u32 *src3); | 41 | u32 *src3); |
| 42 | extern void fps_fsel(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, | 42 | extern void fps_fsel(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, |
| 43 | u32 *src3); | 43 | u32 *src3); |
| 44 | 44 | ||
| 45 | #define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \ | 45 | #define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \ |
| @@ -82,4 +82,7 @@ FPD_THREE_IN(fmadd) | |||
| 82 | FPD_THREE_IN(fnmsub) | 82 | FPD_THREE_IN(fnmsub) |
| 83 | FPD_THREE_IN(fnmadd) | 83 | FPD_THREE_IN(fnmadd) |
| 84 | 84 | ||
| 85 | extern void kvm_cvt_fd(u32 *from, u64 *to, u64 *fpscr); | ||
| 86 | extern void kvm_cvt_df(u64 *from, u32 *to, u64 *fpscr); | ||
| 87 | |||
| 85 | #endif | 88 | #endif |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 0c9ad869decd..b0b23c007d6e 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
| @@ -35,10 +35,17 @@ | |||
| 35 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 35 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
| 36 | 36 | ||
| 37 | /* We don't currently support large pages. */ | 37 | /* We don't currently support large pages. */ |
| 38 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | ||
| 38 | #define KVM_NR_PAGE_SIZES 1 | 39 | #define KVM_NR_PAGE_SIZES 1 |
| 39 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | 40 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) |
| 40 | 41 | ||
| 41 | #define HPTEG_CACHE_NUM 1024 | 42 | #define HPTEG_CACHE_NUM (1 << 15) |
| 43 | #define HPTEG_HASH_BITS_PTE 13 | ||
| 44 | #define HPTEG_HASH_BITS_VPTE 13 | ||
| 45 | #define HPTEG_HASH_BITS_VPTE_LONG 5 | ||
| 46 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) | ||
| 47 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) | ||
| 48 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) | ||
| 42 | 49 | ||
| 43 | struct kvm; | 50 | struct kvm; |
| 44 | struct kvm_run; | 51 | struct kvm_run; |
| @@ -151,6 +158,9 @@ struct kvmppc_mmu { | |||
| 151 | }; | 158 | }; |
| 152 | 159 | ||
| 153 | struct hpte_cache { | 160 | struct hpte_cache { |
| 161 | struct hlist_node list_pte; | ||
| 162 | struct hlist_node list_vpte; | ||
| 163 | struct hlist_node list_vpte_long; | ||
| 154 | u64 host_va; | 164 | u64 host_va; |
| 155 | u64 pfn; | 165 | u64 pfn; |
| 156 | ulong slot; | 166 | ulong slot; |
| @@ -282,8 +292,10 @@ struct kvm_vcpu_arch { | |||
| 282 | unsigned long pending_exceptions; | 292 | unsigned long pending_exceptions; |
| 283 | 293 | ||
| 284 | #ifdef CONFIG_PPC_BOOK3S | 294 | #ifdef CONFIG_PPC_BOOK3S |
| 285 | struct hpte_cache hpte_cache[HPTEG_CACHE_NUM]; | 295 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; |
| 286 | int hpte_cache_offset; | 296 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; |
| 297 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | ||
| 298 | int hpte_cache_count; | ||
| 287 | #endif | 299 | #endif |
| 288 | }; | 300 | }; |
| 289 | 301 | ||
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index a6297c67c3d6..6c294acac848 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h | |||
| @@ -515,11 +515,8 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, | |||
| 515 | * powers of 2 writes until it reaches sufficient alignment). | 515 | * powers of 2 writes until it reaches sufficient alignment). |
| 516 | * | 516 | * |
| 517 | * Based on this we disable the IP header alignment in network drivers. | 517 | * Based on this we disable the IP header alignment in network drivers. |
| 518 | * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining | ||
| 519 | * cacheline alignment of buffers. | ||
| 520 | */ | 518 | */ |
| 521 | #define NET_IP_ALIGN 0 | 519 | #define NET_IP_ALIGN 0 |
| 522 | #define NET_SKB_PAD L1_CACHE_BYTES | ||
| 523 | 520 | ||
| 524 | #define cmpxchg64(ptr, o, n) \ | 521 | #define cmpxchg64(ptr, o, n) \ |
| 525 | ({ \ | 522 | ({ \ |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 3b4dcc82a4c1..ab3e392ac63c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
| @@ -101,10 +101,6 @@ EXPORT_SYMBOL(pci_dram_offset); | |||
| 101 | EXPORT_SYMBOL(start_thread); | 101 | EXPORT_SYMBOL(start_thread); |
| 102 | EXPORT_SYMBOL(kernel_thread); | 102 | EXPORT_SYMBOL(kernel_thread); |
| 103 | 103 | ||
| 104 | #ifdef CONFIG_PPC_FPU | ||
| 105 | EXPORT_SYMBOL_GPL(cvt_df); | ||
| 106 | EXPORT_SYMBOL_GPL(cvt_fd); | ||
| 107 | #endif | ||
| 108 | EXPORT_SYMBOL(giveup_fpu); | 104 | EXPORT_SYMBOL(giveup_fpu); |
| 109 | #ifdef CONFIG_ALTIVEC | 105 | #ifdef CONFIG_ALTIVEC |
| 110 | EXPORT_SYMBOL(giveup_altivec); | 106 | EXPORT_SYMBOL(giveup_altivec); |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 812312542e50..9b9b5cdea840 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
| @@ -316,7 +316,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | |||
| 316 | gfn = gpaddr >> PAGE_SHIFT; | 316 | gfn = gpaddr >> PAGE_SHIFT; |
| 317 | new_page = gfn_to_page(vcpu->kvm, gfn); | 317 | new_page = gfn_to_page(vcpu->kvm, gfn); |
| 318 | if (is_error_page(new_page)) { | 318 | if (is_error_page(new_page)) { |
| 319 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 319 | printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", |
| 320 | (unsigned long long)gfn); | ||
| 320 | kvm_release_page_clean(new_page); | 321 | kvm_release_page_clean(new_page); |
| 321 | return; | 322 | return; |
| 322 | } | 323 | } |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index ff436066bf77..d45c818a384c 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
| @@ -45,6 +45,7 @@ kvm-book3s_64-objs := \ | |||
| 45 | book3s.o \ | 45 | book3s.o \ |
| 46 | book3s_emulate.o \ | 46 | book3s_emulate.o \ |
| 47 | book3s_interrupts.o \ | 47 | book3s_interrupts.o \ |
| 48 | book3s_mmu_hpte.o \ | ||
| 48 | book3s_64_mmu_host.o \ | 49 | book3s_64_mmu_host.o \ |
| 49 | book3s_64_mmu.o \ | 50 | book3s_64_mmu.o \ |
| 50 | book3s_32_mmu.o | 51 | book3s_32_mmu.o |
| @@ -57,6 +58,7 @@ kvm-book3s_32-objs := \ | |||
| 57 | book3s.o \ | 58 | book3s.o \ |
| 58 | book3s_emulate.o \ | 59 | book3s_emulate.o \ |
| 59 | book3s_interrupts.o \ | 60 | book3s_interrupts.o \ |
| 61 | book3s_mmu_hpte.o \ | ||
| 60 | book3s_32_mmu_host.o \ | 62 | book3s_32_mmu_host.o \ |
| 61 | book3s_32_mmu.o | 63 | book3s_32_mmu.o |
| 62 | kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) | 64 | kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index b998abf1a63d..a3cef30d1d42 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
| @@ -1047,8 +1047,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 1047 | { | 1047 | { |
| 1048 | int i; | 1048 | int i; |
| 1049 | 1049 | ||
| 1050 | vcpu_load(vcpu); | ||
| 1051 | |||
| 1052 | regs->pc = kvmppc_get_pc(vcpu); | 1050 | regs->pc = kvmppc_get_pc(vcpu); |
| 1053 | regs->cr = kvmppc_get_cr(vcpu); | 1051 | regs->cr = kvmppc_get_cr(vcpu); |
| 1054 | regs->ctr = kvmppc_get_ctr(vcpu); | 1052 | regs->ctr = kvmppc_get_ctr(vcpu); |
| @@ -1069,8 +1067,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 1069 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1067 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
| 1070 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 1068 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
| 1071 | 1069 | ||
| 1072 | vcpu_put(vcpu); | ||
| 1073 | |||
| 1074 | return 0; | 1070 | return 0; |
| 1075 | } | 1071 | } |
| 1076 | 1072 | ||
| @@ -1078,8 +1074,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 1078 | { | 1074 | { |
| 1079 | int i; | 1075 | int i; |
| 1080 | 1076 | ||
| 1081 | vcpu_load(vcpu); | ||
| 1082 | |||
| 1083 | kvmppc_set_pc(vcpu, regs->pc); | 1077 | kvmppc_set_pc(vcpu, regs->pc); |
| 1084 | kvmppc_set_cr(vcpu, regs->cr); | 1078 | kvmppc_set_cr(vcpu, regs->cr); |
| 1085 | kvmppc_set_ctr(vcpu, regs->ctr); | 1079 | kvmppc_set_ctr(vcpu, regs->ctr); |
| @@ -1099,8 +1093,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 1099 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1093 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
| 1100 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 1094 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
| 1101 | 1095 | ||
| 1102 | vcpu_put(vcpu); | ||
| 1103 | |||
| 1104 | return 0; | 1096 | return 0; |
| 1105 | } | 1097 | } |
| 1106 | 1098 | ||
| @@ -1110,8 +1102,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
| 1110 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1102 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
| 1111 | int i; | 1103 | int i; |
| 1112 | 1104 | ||
| 1113 | vcpu_load(vcpu); | ||
| 1114 | |||
| 1115 | sregs->pvr = vcpu->arch.pvr; | 1105 | sregs->pvr = vcpu->arch.pvr; |
| 1116 | 1106 | ||
| 1117 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | 1107 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; |
| @@ -1131,8 +1121,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
| 1131 | } | 1121 | } |
| 1132 | } | 1122 | } |
| 1133 | 1123 | ||
| 1134 | vcpu_put(vcpu); | ||
| 1135 | |||
| 1136 | return 0; | 1124 | return 0; |
| 1137 | } | 1125 | } |
| 1138 | 1126 | ||
| @@ -1142,8 +1130,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
| 1142 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1130 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
| 1143 | int i; | 1131 | int i; |
| 1144 | 1132 | ||
| 1145 | vcpu_load(vcpu); | ||
| 1146 | |||
| 1147 | kvmppc_set_pvr(vcpu, sregs->pvr); | 1133 | kvmppc_set_pvr(vcpu, sregs->pvr); |
| 1148 | 1134 | ||
| 1149 | vcpu3s->sdr1 = sregs->u.s.sdr1; | 1135 | vcpu3s->sdr1 = sregs->u.s.sdr1; |
| @@ -1171,8 +1157,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
| 1171 | /* Flush the MMU after messing with the segments */ | 1157 | /* Flush the MMU after messing with the segments */ |
| 1172 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 1158 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
| 1173 | 1159 | ||
| 1174 | vcpu_put(vcpu); | ||
| 1175 | |||
| 1176 | return 0; | 1160 | return 0; |
| 1177 | } | 1161 | } |
| 1178 | 1162 | ||
| @@ -1309,12 +1293,17 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | |||
| 1309 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 1293 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
| 1310 | { | 1294 | { |
| 1311 | int ret; | 1295 | int ret; |
| 1312 | struct thread_struct ext_bkp; | 1296 | double fpr[32][TS_FPRWIDTH]; |
| 1297 | unsigned int fpscr; | ||
| 1298 | int fpexc_mode; | ||
| 1313 | #ifdef CONFIG_ALTIVEC | 1299 | #ifdef CONFIG_ALTIVEC |
| 1314 | bool save_vec = current->thread.used_vr; | 1300 | vector128 vr[32]; |
| 1301 | vector128 vscr; | ||
| 1302 | unsigned long uninitialized_var(vrsave); | ||
| 1303 | int used_vr; | ||
| 1315 | #endif | 1304 | #endif |
| 1316 | #ifdef CONFIG_VSX | 1305 | #ifdef CONFIG_VSX |
| 1317 | bool save_vsx = current->thread.used_vsr; | 1306 | int used_vsr; |
| 1318 | #endif | 1307 | #endif |
| 1319 | ulong ext_msr; | 1308 | ulong ext_msr; |
| 1320 | 1309 | ||
| @@ -1327,27 +1316,27 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 1327 | /* Save FPU state in stack */ | 1316 | /* Save FPU state in stack */ |
| 1328 | if (current->thread.regs->msr & MSR_FP) | 1317 | if (current->thread.regs->msr & MSR_FP) |
| 1329 | giveup_fpu(current); | 1318 | giveup_fpu(current); |
| 1330 | memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); | 1319 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); |
| 1331 | ext_bkp.fpscr = current->thread.fpscr; | 1320 | fpscr = current->thread.fpscr.val; |
| 1332 | ext_bkp.fpexc_mode = current->thread.fpexc_mode; | 1321 | fpexc_mode = current->thread.fpexc_mode; |
| 1333 | 1322 | ||
| 1334 | #ifdef CONFIG_ALTIVEC | 1323 | #ifdef CONFIG_ALTIVEC |
| 1335 | /* Save Altivec state in stack */ | 1324 | /* Save Altivec state in stack */ |
| 1336 | if (save_vec) { | 1325 | used_vr = current->thread.used_vr; |
| 1326 | if (used_vr) { | ||
| 1337 | if (current->thread.regs->msr & MSR_VEC) | 1327 | if (current->thread.regs->msr & MSR_VEC) |
| 1338 | giveup_altivec(current); | 1328 | giveup_altivec(current); |
| 1339 | memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); | 1329 | memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); |
| 1340 | ext_bkp.vscr = current->thread.vscr; | 1330 | vscr = current->thread.vscr; |
| 1341 | ext_bkp.vrsave = current->thread.vrsave; | 1331 | vrsave = current->thread.vrsave; |
| 1342 | } | 1332 | } |
| 1343 | ext_bkp.used_vr = current->thread.used_vr; | ||
| 1344 | #endif | 1333 | #endif |
| 1345 | 1334 | ||
| 1346 | #ifdef CONFIG_VSX | 1335 | #ifdef CONFIG_VSX |
| 1347 | /* Save VSX state in stack */ | 1336 | /* Save VSX state in stack */ |
| 1348 | if (save_vsx && (current->thread.regs->msr & MSR_VSX)) | 1337 | used_vsr = current->thread.used_vsr; |
| 1338 | if (used_vsr && (current->thread.regs->msr & MSR_VSX)) | ||
| 1349 | __giveup_vsx(current); | 1339 | __giveup_vsx(current); |
| 1350 | ext_bkp.used_vsr = current->thread.used_vsr; | ||
| 1351 | #endif | 1340 | #endif |
| 1352 | 1341 | ||
| 1353 | /* Remember the MSR with disabled extensions */ | 1342 | /* Remember the MSR with disabled extensions */ |
| @@ -1372,22 +1361,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 1372 | kvmppc_giveup_ext(vcpu, MSR_VSX); | 1361 | kvmppc_giveup_ext(vcpu, MSR_VSX); |
| 1373 | 1362 | ||
| 1374 | /* Restore FPU state from stack */ | 1363 | /* Restore FPU state from stack */ |
| 1375 | memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); | 1364 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); |
| 1376 | current->thread.fpscr = ext_bkp.fpscr; | 1365 | current->thread.fpscr.val = fpscr; |
| 1377 | current->thread.fpexc_mode = ext_bkp.fpexc_mode; | 1366 | current->thread.fpexc_mode = fpexc_mode; |
| 1378 | 1367 | ||
| 1379 | #ifdef CONFIG_ALTIVEC | 1368 | #ifdef CONFIG_ALTIVEC |
| 1380 | /* Restore Altivec state from stack */ | 1369 | /* Restore Altivec state from stack */ |
| 1381 | if (save_vec && current->thread.used_vr) { | 1370 | if (used_vr && current->thread.used_vr) { |
| 1382 | memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); | 1371 | memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); |
| 1383 | current->thread.vscr = ext_bkp.vscr; | 1372 | current->thread.vscr = vscr; |
| 1384 | current->thread.vrsave= ext_bkp.vrsave; | 1373 | current->thread.vrsave = vrsave; |
| 1385 | } | 1374 | } |
| 1386 | current->thread.used_vr = ext_bkp.used_vr; | 1375 | current->thread.used_vr = used_vr; |
| 1387 | #endif | 1376 | #endif |
| 1388 | 1377 | ||
| 1389 | #ifdef CONFIG_VSX | 1378 | #ifdef CONFIG_VSX |
| 1390 | current->thread.used_vsr = ext_bkp.used_vsr; | 1379 | current->thread.used_vsr = used_vsr; |
| 1391 | #endif | 1380 | #endif |
| 1392 | 1381 | ||
| 1393 | return ret; | 1382 | return ret; |
| @@ -1395,12 +1384,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 1395 | 1384 | ||
| 1396 | static int kvmppc_book3s_init(void) | 1385 | static int kvmppc_book3s_init(void) |
| 1397 | { | 1386 | { |
| 1398 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | 1387 | int r; |
| 1399 | THIS_MODULE); | 1388 | |
| 1389 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | ||
| 1390 | THIS_MODULE); | ||
| 1391 | |||
| 1392 | if (r) | ||
| 1393 | return r; | ||
| 1394 | |||
| 1395 | r = kvmppc_mmu_hpte_sysinit(); | ||
| 1396 | |||
| 1397 | return r; | ||
| 1400 | } | 1398 | } |
| 1401 | 1399 | ||
| 1402 | static void kvmppc_book3s_exit(void) | 1400 | static void kvmppc_book3s_exit(void) |
| 1403 | { | 1401 | { |
| 1402 | kvmppc_mmu_hpte_sysexit(); | ||
| 1404 | kvm_exit(); | 1403 | kvm_exit(); |
| 1405 | } | 1404 | } |
| 1406 | 1405 | ||
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 0b10503c8a4a..3292d76101d2 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
| @@ -354,10 +354,10 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
| 354 | *vsid = VSID_REAL_DR | gvsid; | 354 | *vsid = VSID_REAL_DR | gvsid; |
| 355 | break; | 355 | break; |
| 356 | case MSR_DR|MSR_IR: | 356 | case MSR_DR|MSR_IR: |
| 357 | if (!sr->valid) | 357 | if (sr->valid) |
| 358 | return -1; | 358 | *vsid = sr->vsid; |
| 359 | 359 | else | |
| 360 | *vsid = sr->vsid; | 360 | *vsid = VSID_BAT | gvsid; |
| 361 | break; | 361 | break; |
| 362 | default: | 362 | default: |
| 363 | BUG(); | 363 | BUG(); |
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 0bb66005338f..0b51ef872c1e 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | */ | 19 | */ |
| 20 | 20 | ||
| 21 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
| 22 | #include <linux/hash.h> | ||
| 22 | 23 | ||
| 23 | #include <asm/kvm_ppc.h> | 24 | #include <asm/kvm_ppc.h> |
| 24 | #include <asm/kvm_book3s.h> | 25 | #include <asm/kvm_book3s.h> |
| @@ -57,139 +58,26 @@ | |||
| 57 | static ulong htab; | 58 | static ulong htab; |
| 58 | static u32 htabmask; | 59 | static u32 htabmask; |
| 59 | 60 | ||
| 60 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 61 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
| 61 | { | 62 | { |
| 62 | volatile u32 *pteg; | 63 | volatile u32 *pteg; |
| 63 | 64 | ||
| 64 | dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n", | 65 | /* Remove from host HTAB */ |
| 65 | pte->pte.eaddr, pte->pte.vpage, pte->host_va); | ||
| 66 | |||
| 67 | pteg = (u32*)pte->slot; | 66 | pteg = (u32*)pte->slot; |
| 68 | |||
| 69 | pteg[0] = 0; | 67 | pteg[0] = 0; |
| 68 | |||
| 69 | /* And make sure it's gone from the TLB too */ | ||
| 70 | asm volatile ("sync"); | 70 | asm volatile ("sync"); |
| 71 | asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); | 71 | asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); |
| 72 | asm volatile ("sync"); | 72 | asm volatile ("sync"); |
| 73 | asm volatile ("tlbsync"); | 73 | asm volatile ("tlbsync"); |
| 74 | |||
| 75 | pte->host_va = 0; | ||
| 76 | |||
| 77 | if (pte->pte.may_write) | ||
| 78 | kvm_release_pfn_dirty(pte->pfn); | ||
| 79 | else | ||
| 80 | kvm_release_pfn_clean(pte->pfn); | ||
| 81 | } | ||
| 82 | |||
| 83 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | ||
| 84 | { | ||
| 85 | int i; | ||
| 86 | |||
| 87 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n", | ||
| 88 | vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); | ||
| 89 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
| 90 | |||
| 91 | guest_ea &= ea_mask; | ||
| 92 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
| 93 | struct hpte_cache *pte; | ||
| 94 | |||
| 95 | pte = &vcpu->arch.hpte_cache[i]; | ||
| 96 | if (!pte->host_va) | ||
| 97 | continue; | ||
| 98 | |||
| 99 | if ((pte->pte.eaddr & ea_mask) == guest_ea) { | ||
| 100 | invalidate_pte(vcpu, pte); | ||
| 101 | } | ||
| 102 | } | ||
| 103 | |||
| 104 | /* Doing a complete flush -> start from scratch */ | ||
| 105 | if (!ea_mask) | ||
| 106 | vcpu->arch.hpte_cache_offset = 0; | ||
| 107 | } | ||
| 108 | |||
| 109 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | ||
| 110 | { | ||
| 111 | int i; | ||
| 112 | |||
| 113 | dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", | ||
| 114 | vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); | ||
| 115 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
| 116 | |||
| 117 | guest_vp &= vp_mask; | ||
| 118 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
| 119 | struct hpte_cache *pte; | ||
| 120 | |||
| 121 | pte = &vcpu->arch.hpte_cache[i]; | ||
| 122 | if (!pte->host_va) | ||
| 123 | continue; | ||
| 124 | |||
| 125 | if ((pte->pte.vpage & vp_mask) == guest_vp) { | ||
| 126 | invalidate_pte(vcpu, pte); | ||
| 127 | } | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 131 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | ||
| 132 | { | ||
| 133 | int i; | ||
| 134 | |||
| 135 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", | ||
| 136 | vcpu->arch.hpte_cache_offset, pa_start, pa_end); | ||
| 137 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
| 138 | |||
| 139 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
| 140 | struct hpte_cache *pte; | ||
| 141 | |||
| 142 | pte = &vcpu->arch.hpte_cache[i]; | ||
| 143 | if (!pte->host_va) | ||
| 144 | continue; | ||
| 145 | |||
| 146 | if ((pte->pte.raddr >= pa_start) && | ||
| 147 | (pte->pte.raddr < pa_end)) { | ||
| 148 | invalidate_pte(vcpu, pte); | ||
| 149 | } | ||
| 150 | } | ||
| 151 | } | ||
| 152 | |||
| 153 | struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) | ||
| 154 | { | ||
| 155 | int i; | ||
| 156 | u64 guest_vp; | ||
| 157 | |||
| 158 | guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); | ||
| 159 | for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { | ||
| 160 | struct hpte_cache *pte; | ||
| 161 | |||
| 162 | pte = &vcpu->arch.hpte_cache[i]; | ||
| 163 | if (!pte->host_va) | ||
| 164 | continue; | ||
| 165 | |||
| 166 | if (pte->pte.vpage == guest_vp) | ||
| 167 | return &pte->pte; | ||
| 168 | } | ||
| 169 | |||
| 170 | return NULL; | ||
| 171 | } | ||
| 172 | |||
| 173 | static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) | ||
| 174 | { | ||
| 175 | if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) | ||
| 176 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
| 177 | |||
| 178 | return vcpu->arch.hpte_cache_offset++; | ||
| 179 | } | 74 | } |
| 180 | 75 | ||
| 181 | /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using | 76 | /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using |
| 182 | * a hash, so we don't waste cycles on looping */ | 77 | * a hash, so we don't waste cycles on looping */ |
| 183 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | 78 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) |
| 184 | { | 79 | { |
| 185 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ | 80 | return hash_64(gvsid, SID_MAP_BITS); |
| 186 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | ||
| 187 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | ||
| 188 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | ||
| 189 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | ||
| 190 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | ||
| 191 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | ||
| 192 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | ||
| 193 | } | 81 | } |
| 194 | 82 | ||
| 195 | 83 | ||
| @@ -256,7 +144,6 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
| 256 | register int rr = 0; | 144 | register int rr = 0; |
| 257 | bool primary = false; | 145 | bool primary = false; |
| 258 | bool evict = false; | 146 | bool evict = false; |
| 259 | int hpte_id; | ||
| 260 | struct hpte_cache *pte; | 147 | struct hpte_cache *pte; |
| 261 | 148 | ||
| 262 | /* Get host physical address for gpa */ | 149 | /* Get host physical address for gpa */ |
| @@ -341,8 +228,7 @@ next_pteg: | |||
| 341 | 228 | ||
| 342 | /* Now tell our Shadow PTE code about the new page */ | 229 | /* Now tell our Shadow PTE code about the new page */ |
| 343 | 230 | ||
| 344 | hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); | 231 | pte = kvmppc_mmu_hpte_cache_next(vcpu); |
| 345 | pte = &vcpu->arch.hpte_cache[hpte_id]; | ||
| 346 | 232 | ||
| 347 | dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", | 233 | dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", |
| 348 | orig_pte->may_write ? 'w' : '-', | 234 | orig_pte->may_write ? 'w' : '-', |
| @@ -355,6 +241,8 @@ next_pteg: | |||
| 355 | pte->pte = *orig_pte; | 241 | pte->pte = *orig_pte; |
| 356 | pte->pfn = hpaddr >> PAGE_SHIFT; | 242 | pte->pfn = hpaddr >> PAGE_SHIFT; |
| 357 | 243 | ||
| 244 | kvmppc_mmu_hpte_cache_map(vcpu, pte); | ||
| 245 | |||
| 358 | return 0; | 246 | return 0; |
| 359 | } | 247 | } |
| 360 | 248 | ||
| @@ -439,7 +327,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | |||
| 439 | 327 | ||
| 440 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 328 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
| 441 | { | 329 | { |
| 442 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 330 | kvmppc_mmu_hpte_destroy(vcpu); |
| 443 | preempt_disable(); | 331 | preempt_disable(); |
| 444 | __destroy_context(to_book3s(vcpu)->context_id); | 332 | __destroy_context(to_book3s(vcpu)->context_id); |
| 445 | preempt_enable(); | 333 | preempt_enable(); |
| @@ -479,5 +367,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
| 479 | htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; | 367 | htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; |
| 480 | htab = (ulong)__va(sdr1 & 0xffff0000); | 368 | htab = (ulong)__va(sdr1 & 0xffff0000); |
| 481 | 369 | ||
| 370 | kvmppc_mmu_hpte_init(vcpu); | ||
| 371 | |||
| 482 | return 0; | 372 | return 0; |
| 483 | } | 373 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e4b5744977f6..384179a5002b 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
| 23 | #include <linux/hash.h> | ||
| 23 | 24 | ||
| 24 | #include <asm/kvm_ppc.h> | 25 | #include <asm/kvm_ppc.h> |
| 25 | #include <asm/kvm_book3s.h> | 26 | #include <asm/kvm_book3s.h> |
| @@ -46,135 +47,20 @@ | |||
| 46 | #define dprintk_slb(a, ...) do { } while(0) | 47 | #define dprintk_slb(a, ...) do { } while(0) |
| 47 | #endif | 48 | #endif |
| 48 | 49 | ||
| 49 | static void invalidate_pte(struct hpte_cache *pte) | 50 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
| 50 | { | 51 | { |
| 51 | dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", | ||
| 52 | pte->pte.eaddr, pte->pte.vpage, pte->host_va); | ||
| 53 | |||
| 54 | ppc_md.hpte_invalidate(pte->slot, pte->host_va, | 52 | ppc_md.hpte_invalidate(pte->slot, pte->host_va, |
| 55 | MMU_PAGE_4K, MMU_SEGSIZE_256M, | 53 | MMU_PAGE_4K, MMU_SEGSIZE_256M, |
| 56 | false); | 54 | false); |
| 57 | pte->host_va = 0; | ||
| 58 | |||
| 59 | if (pte->pte.may_write) | ||
| 60 | kvm_release_pfn_dirty(pte->pfn); | ||
| 61 | else | ||
| 62 | kvm_release_pfn_clean(pte->pfn); | ||
| 63 | } | ||
| 64 | |||
| 65 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | ||
| 66 | { | ||
| 67 | int i; | ||
| 68 | |||
| 69 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", | ||
| 70 | vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); | ||
| 71 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
| 72 | |||
| 73 | guest_ea &= ea_mask; | ||
| 74 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
| 75 | struct hpte_cache *pte; | ||
| 76 | |||
| 77 | pte = &vcpu->arch.hpte_cache[i]; | ||
| 78 | if (!pte->host_va) | ||
| 79 | continue; | ||
| 80 | |||
| 81 | if ((pte->pte.eaddr & ea_mask) == guest_ea) { | ||
| 82 | invalidate_pte(pte); | ||
| 83 | } | ||
| 84 | } | ||
| 85 | |||
| 86 | /* Doing a complete flush -> start from scratch */ | ||
| 87 | if (!ea_mask) | ||
| 88 | vcpu->arch.hpte_cache_offset = 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | ||
| 92 | { | ||
| 93 | int i; | ||
| 94 | |||
| 95 | dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", | ||
| 96 | vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); | ||
| 97 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
| 98 | |||
| 99 | guest_vp &= vp_mask; | ||
| 100 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
| 101 | struct hpte_cache *pte; | ||
| 102 | |||
| 103 | pte = &vcpu->arch.hpte_cache[i]; | ||
| 104 | if (!pte->host_va) | ||
| 105 | continue; | ||
| 106 | |||
| 107 | if ((pte->pte.vpage & vp_mask) == guest_vp) { | ||
| 108 | invalidate_pte(pte); | ||
| 109 | } | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | ||
| 114 | { | ||
| 115 | int i; | ||
| 116 | |||
| 117 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n", | ||
| 118 | vcpu->arch.hpte_cache_offset, pa_start, pa_end); | ||
| 119 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
| 120 | |||
| 121 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
| 122 | struct hpte_cache *pte; | ||
| 123 | |||
| 124 | pte = &vcpu->arch.hpte_cache[i]; | ||
| 125 | if (!pte->host_va) | ||
| 126 | continue; | ||
| 127 | |||
| 128 | if ((pte->pte.raddr >= pa_start) && | ||
| 129 | (pte->pte.raddr < pa_end)) { | ||
| 130 | invalidate_pte(pte); | ||
| 131 | } | ||
| 132 | } | ||
| 133 | } | ||
| 134 | |||
| 135 | struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) | ||
| 136 | { | ||
| 137 | int i; | ||
| 138 | u64 guest_vp; | ||
| 139 | |||
| 140 | guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); | ||
| 141 | for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { | ||
| 142 | struct hpte_cache *pte; | ||
| 143 | |||
| 144 | pte = &vcpu->arch.hpte_cache[i]; | ||
| 145 | if (!pte->host_va) | ||
| 146 | continue; | ||
| 147 | |||
| 148 | if (pte->pte.vpage == guest_vp) | ||
| 149 | return &pte->pte; | ||
| 150 | } | ||
| 151 | |||
| 152 | return NULL; | ||
| 153 | } | ||
| 154 | |||
| 155 | static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) | ||
| 156 | { | ||
| 157 | if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) | ||
| 158 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
| 159 | |||
| 160 | return vcpu->arch.hpte_cache_offset++; | ||
| 161 | } | 55 | } |
| 162 | 56 | ||
| 163 | /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using | 57 | /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using |
| 164 | * a hash, so we don't waste cycles on looping */ | 58 | * a hash, so we don't waste cycles on looping */ |
| 165 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | 59 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) |
| 166 | { | 60 | { |
| 167 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ | 61 | return hash_64(gvsid, SID_MAP_BITS); |
| 168 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | ||
| 169 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | ||
| 170 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | ||
| 171 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | ||
| 172 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | ||
| 173 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | ||
| 174 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | ||
| 175 | } | 62 | } |
| 176 | 63 | ||
| 177 | |||
| 178 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | 64 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) |
| 179 | { | 65 | { |
| 180 | struct kvmppc_sid_map *map; | 66 | struct kvmppc_sid_map *map; |
| @@ -273,8 +159,7 @@ map_again: | |||
| 273 | attempt++; | 159 | attempt++; |
| 274 | goto map_again; | 160 | goto map_again; |
| 275 | } else { | 161 | } else { |
| 276 | int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); | 162 | struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); |
| 277 | struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id]; | ||
| 278 | 163 | ||
| 279 | dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", | 164 | dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", |
| 280 | ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', | 165 | ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', |
| @@ -292,6 +177,8 @@ map_again: | |||
| 292 | pte->host_va = va; | 177 | pte->host_va = va; |
| 293 | pte->pte = *orig_pte; | 178 | pte->pte = *orig_pte; |
| 294 | pte->pfn = hpaddr >> PAGE_SHIFT; | 179 | pte->pfn = hpaddr >> PAGE_SHIFT; |
| 180 | |||
| 181 | kvmppc_mmu_hpte_cache_map(vcpu, pte); | ||
| 295 | } | 182 | } |
| 296 | 183 | ||
| 297 | return 0; | 184 | return 0; |
| @@ -418,7 +305,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | |||
| 418 | 305 | ||
| 419 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 306 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
| 420 | { | 307 | { |
| 421 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 308 | kvmppc_mmu_hpte_destroy(vcpu); |
| 422 | __destroy_context(to_book3s(vcpu)->context_id); | 309 | __destroy_context(to_book3s(vcpu)->context_id); |
| 423 | } | 310 | } |
| 424 | 311 | ||
| @@ -436,5 +323,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
| 436 | vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; | 323 | vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; |
| 437 | vcpu3s->vsid_next = vcpu3s->vsid_first; | 324 | vcpu3s->vsid_next = vcpu3s->vsid_first; |
| 438 | 325 | ||
| 326 | kvmppc_mmu_hpte_init(vcpu); | ||
| 327 | |||
| 439 | return 0; | 328 | return 0; |
| 440 | } | 329 | } |
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c new file mode 100644 index 000000000000..4868d4a7ebc5 --- /dev/null +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c | |||
| @@ -0,0 +1,277 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. | ||
| 3 | * | ||
| 4 | * Authors: | ||
| 5 | * Alexander Graf <agraf@suse.de> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License, version 2, as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/kvm_host.h> | ||
| 22 | #include <linux/hash.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | |||
| 25 | #include <asm/kvm_ppc.h> | ||
| 26 | #include <asm/kvm_book3s.h> | ||
| 27 | #include <asm/machdep.h> | ||
| 28 | #include <asm/mmu_context.h> | ||
| 29 | #include <asm/hw_irq.h> | ||
| 30 | |||
| 31 | #define PTE_SIZE 12 | ||
| 32 | |||
| 33 | /* #define DEBUG_MMU */ | ||
| 34 | |||
| 35 | #ifdef DEBUG_MMU | ||
| 36 | #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
| 37 | #else | ||
| 38 | #define dprintk_mmu(a, ...) do { } while(0) | ||
| 39 | #endif | ||
| 40 | |||
| 41 | static struct kmem_cache *hpte_cache; | ||
| 42 | |||
| 43 | static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) | ||
| 44 | { | ||
| 45 | return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) | ||
| 49 | { | ||
| 50 | return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); | ||
| 51 | } | ||
| 52 | |||
| 53 | static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage) | ||
| 54 | { | ||
| 55 | return hash_64((vpage & 0xffffff000ULL) >> 12, | ||
| 56 | HPTEG_HASH_BITS_VPTE_LONG); | ||
| 57 | } | ||
| 58 | |||
| 59 | void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | ||
| 60 | { | ||
| 61 | u64 index; | ||
| 62 | |||
| 63 | /* Add to ePTE list */ | ||
| 64 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); | ||
| 65 | hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); | ||
| 66 | |||
| 67 | /* Add to vPTE list */ | ||
| 68 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); | ||
| 69 | hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); | ||
| 70 | |||
| 71 | /* Add to vPTE_long list */ | ||
| 72 | index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); | ||
| 73 | hlist_add_head(&pte->list_vpte_long, | ||
| 74 | &vcpu->arch.hpte_hash_vpte_long[index]); | ||
| 75 | } | ||
| 76 | |||
| 77 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | ||
| 78 | { | ||
| 79 | dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", | ||
| 80 | pte->pte.eaddr, pte->pte.vpage, pte->host_va); | ||
| 81 | |||
| 82 | /* Different for 32 and 64 bit */ | ||
| 83 | kvmppc_mmu_invalidate_pte(vcpu, pte); | ||
| 84 | |||
| 85 | if (pte->pte.may_write) | ||
| 86 | kvm_release_pfn_dirty(pte->pfn); | ||
| 87 | else | ||
| 88 | kvm_release_pfn_clean(pte->pfn); | ||
| 89 | |||
| 90 | hlist_del(&pte->list_pte); | ||
| 91 | hlist_del(&pte->list_vpte); | ||
| 92 | hlist_del(&pte->list_vpte_long); | ||
| 93 | |||
| 94 | vcpu->arch.hpte_cache_count--; | ||
| 95 | kmem_cache_free(hpte_cache, pte); | ||
| 96 | } | ||
| 97 | |||
| 98 | static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) | ||
| 99 | { | ||
| 100 | struct hpte_cache *pte; | ||
| 101 | struct hlist_node *node, *tmp; | ||
| 102 | int i; | ||
| 103 | |||
| 104 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | ||
| 105 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; | ||
| 106 | |||
| 107 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | ||
| 108 | invalidate_pte(vcpu, pte); | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) | ||
| 113 | { | ||
| 114 | struct hlist_head *list; | ||
| 115 | struct hlist_node *node, *tmp; | ||
| 116 | struct hpte_cache *pte; | ||
| 117 | |||
| 118 | /* Find the list of entries in the map */ | ||
| 119 | list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; | ||
| 120 | |||
| 121 | /* Check the list for matching entries and invalidate */ | ||
| 122 | hlist_for_each_entry_safe(pte, node, tmp, list, list_pte) | ||
| 123 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) | ||
| 124 | invalidate_pte(vcpu, pte); | ||
| 125 | } | ||
| 126 | |||
| 127 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | ||
| 128 | { | ||
| 129 | u64 i; | ||
| 130 | |||
| 131 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", | ||
| 132 | vcpu->arch.hpte_cache_count, guest_ea, ea_mask); | ||
| 133 | |||
| 134 | guest_ea &= ea_mask; | ||
| 135 | |||
| 136 | switch (ea_mask) { | ||
| 137 | case ~0xfffUL: | ||
| 138 | kvmppc_mmu_pte_flush_page(vcpu, guest_ea); | ||
| 139 | break; | ||
| 140 | case 0x0ffff000: | ||
| 141 | /* 32-bit flush w/o segment, go through all possible segments */ | ||
| 142 | for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL) | ||
| 143 | kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL); | ||
| 144 | break; | ||
| 145 | case 0: | ||
| 146 | /* Doing a complete flush -> start from scratch */ | ||
| 147 | kvmppc_mmu_pte_flush_all(vcpu); | ||
| 148 | break; | ||
| 149 | default: | ||
| 150 | WARN_ON(1); | ||
| 151 | break; | ||
| 152 | } | ||
| 153 | } | ||
| 154 | |||
| 155 | /* Flush with mask 0xfffffffff */ | ||
| 156 | static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) | ||
| 157 | { | ||
| 158 | struct hlist_head *list; | ||
| 159 | struct hlist_node *node, *tmp; | ||
| 160 | struct hpte_cache *pte; | ||
| 161 | u64 vp_mask = 0xfffffffffULL; | ||
| 162 | |||
| 163 | list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; | ||
| 164 | |||
| 165 | /* Check the list for matching entries and invalidate */ | ||
| 166 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte) | ||
| 167 | if ((pte->pte.vpage & vp_mask) == guest_vp) | ||
| 168 | invalidate_pte(vcpu, pte); | ||
| 169 | } | ||
| 170 | |||
| 171 | /* Flush with mask 0xffffff000 */ | ||
| 172 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) | ||
| 173 | { | ||
| 174 | struct hlist_head *list; | ||
| 175 | struct hlist_node *node, *tmp; | ||
| 176 | struct hpte_cache *pte; | ||
| 177 | u64 vp_mask = 0xffffff000ULL; | ||
| 178 | |||
| 179 | list = &vcpu->arch.hpte_hash_vpte_long[ | ||
| 180 | kvmppc_mmu_hash_vpte_long(guest_vp)]; | ||
| 181 | |||
| 182 | /* Check the list for matching entries and invalidate */ | ||
| 183 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | ||
| 184 | if ((pte->pte.vpage & vp_mask) == guest_vp) | ||
| 185 | invalidate_pte(vcpu, pte); | ||
| 186 | } | ||
| 187 | |||
| 188 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | ||
| 189 | { | ||
| 190 | dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", | ||
| 191 | vcpu->arch.hpte_cache_count, guest_vp, vp_mask); | ||
| 192 | guest_vp &= vp_mask; | ||
| 193 | |||
| 194 | switch(vp_mask) { | ||
| 195 | case 0xfffffffffULL: | ||
| 196 | kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); | ||
| 197 | break; | ||
| 198 | case 0xffffff000ULL: | ||
| 199 | kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); | ||
| 200 | break; | ||
| 201 | default: | ||
| 202 | WARN_ON(1); | ||
| 203 | return; | ||
| 204 | } | ||
| 205 | } | ||
| 206 | |||
| 207 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | ||
| 208 | { | ||
| 209 | struct hlist_node *node, *tmp; | ||
| 210 | struct hpte_cache *pte; | ||
| 211 | int i; | ||
| 212 | |||
| 213 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", | ||
| 214 | vcpu->arch.hpte_cache_count, pa_start, pa_end); | ||
| 215 | |||
| 216 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | ||
| 217 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; | ||
| 218 | |||
| 219 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | ||
| 220 | if ((pte->pte.raddr >= pa_start) && | ||
| 221 | (pte->pte.raddr < pa_end)) | ||
| 222 | invalidate_pte(vcpu, pte); | ||
| 223 | } | ||
| 224 | } | ||
| 225 | |||
| 226 | struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) | ||
| 227 | { | ||
| 228 | struct hpte_cache *pte; | ||
| 229 | |||
| 230 | pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); | ||
| 231 | vcpu->arch.hpte_cache_count++; | ||
| 232 | |||
| 233 | if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM) | ||
| 234 | kvmppc_mmu_pte_flush_all(vcpu); | ||
| 235 | |||
| 236 | return pte; | ||
| 237 | } | ||
| 238 | |||
| 239 | void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) | ||
| 240 | { | ||
| 241 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
| 242 | } | ||
| 243 | |||
| 244 | static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len) | ||
| 245 | { | ||
| 246 | int i; | ||
| 247 | |||
| 248 | for (i = 0; i < len; i++) | ||
| 249 | INIT_HLIST_HEAD(&hash_list[i]); | ||
| 250 | } | ||
| 251 | |||
| 252 | int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) | ||
| 253 | { | ||
| 254 | /* init hpte lookup hashes */ | ||
| 255 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, | ||
| 256 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); | ||
| 257 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, | ||
| 258 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); | ||
| 259 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, | ||
| 260 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); | ||
| 261 | |||
| 262 | return 0; | ||
| 263 | } | ||
| 264 | |||
| 265 | int kvmppc_mmu_hpte_sysinit(void) | ||
| 266 | { | ||
| 267 | /* init hpte slab cache */ | ||
| 268 | hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache), | ||
| 269 | sizeof(struct hpte_cache), 0, NULL); | ||
| 270 | |||
| 271 | return 0; | ||
| 272 | } | ||
| 273 | |||
| 274 | void kvmppc_mmu_hpte_sysexit(void) | ||
| 275 | { | ||
| 276 | kmem_cache_destroy(hpte_cache); | ||
| 277 | } | ||
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index a9f66abafcb3..474f2e24050a 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
| @@ -159,10 +159,7 @@ | |||
| 159 | 159 | ||
| 160 | static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) | 160 | static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) |
| 161 | { | 161 | { |
| 162 | struct thread_struct t; | 162 | kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt], &vcpu->arch.fpscr); |
| 163 | |||
| 164 | t.fpscr.val = vcpu->arch.fpscr; | ||
| 165 | cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t); | ||
| 166 | } | 163 | } |
| 167 | 164 | ||
| 168 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) | 165 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) |
| @@ -183,7 +180,6 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 183 | int rs, ulong addr, int ls_type) | 180 | int rs, ulong addr, int ls_type) |
| 184 | { | 181 | { |
| 185 | int emulated = EMULATE_FAIL; | 182 | int emulated = EMULATE_FAIL; |
| 186 | struct thread_struct t; | ||
| 187 | int r; | 183 | int r; |
| 188 | char tmp[8]; | 184 | char tmp[8]; |
| 189 | int len = sizeof(u32); | 185 | int len = sizeof(u32); |
| @@ -191,8 +187,6 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 191 | if (ls_type == FPU_LS_DOUBLE) | 187 | if (ls_type == FPU_LS_DOUBLE) |
| 192 | len = sizeof(u64); | 188 | len = sizeof(u64); |
| 193 | 189 | ||
| 194 | t.fpscr.val = vcpu->arch.fpscr; | ||
| 195 | |||
| 196 | /* read from memory */ | 190 | /* read from memory */ |
| 197 | r = kvmppc_ld(vcpu, &addr, len, tmp, true); | 191 | r = kvmppc_ld(vcpu, &addr, len, tmp, true); |
| 198 | vcpu->arch.paddr_accessed = addr; | 192 | vcpu->arch.paddr_accessed = addr; |
| @@ -210,7 +204,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 210 | /* put in registers */ | 204 | /* put in registers */ |
| 211 | switch (ls_type) { | 205 | switch (ls_type) { |
| 212 | case FPU_LS_SINGLE: | 206 | case FPU_LS_SINGLE: |
| 213 | cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t); | 207 | kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); |
| 214 | vcpu->arch.qpr[rs] = *((u32*)tmp); | 208 | vcpu->arch.qpr[rs] = *((u32*)tmp); |
| 215 | break; | 209 | break; |
| 216 | case FPU_LS_DOUBLE: | 210 | case FPU_LS_DOUBLE: |
| @@ -229,17 +223,14 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 229 | int rs, ulong addr, int ls_type) | 223 | int rs, ulong addr, int ls_type) |
| 230 | { | 224 | { |
| 231 | int emulated = EMULATE_FAIL; | 225 | int emulated = EMULATE_FAIL; |
| 232 | struct thread_struct t; | ||
| 233 | int r; | 226 | int r; |
| 234 | char tmp[8]; | 227 | char tmp[8]; |
| 235 | u64 val; | 228 | u64 val; |
| 236 | int len; | 229 | int len; |
| 237 | 230 | ||
| 238 | t.fpscr.val = vcpu->arch.fpscr; | ||
| 239 | |||
| 240 | switch (ls_type) { | 231 | switch (ls_type) { |
| 241 | case FPU_LS_SINGLE: | 232 | case FPU_LS_SINGLE: |
| 242 | cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t); | 233 | kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp, &vcpu->arch.fpscr); |
| 243 | val = *((u32*)tmp); | 234 | val = *((u32*)tmp); |
| 244 | len = sizeof(u32); | 235 | len = sizeof(u32); |
| 245 | break; | 236 | break; |
| @@ -278,13 +269,10 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 278 | int rs, ulong addr, bool w, int i) | 269 | int rs, ulong addr, bool w, int i) |
| 279 | { | 270 | { |
| 280 | int emulated = EMULATE_FAIL; | 271 | int emulated = EMULATE_FAIL; |
| 281 | struct thread_struct t; | ||
| 282 | int r; | 272 | int r; |
| 283 | float one = 1.0; | 273 | float one = 1.0; |
| 284 | u32 tmp[2]; | 274 | u32 tmp[2]; |
| 285 | 275 | ||
| 286 | t.fpscr.val = vcpu->arch.fpscr; | ||
| 287 | |||
| 288 | /* read from memory */ | 276 | /* read from memory */ |
| 289 | if (w) { | 277 | if (w) { |
| 290 | r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true); | 278 | r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true); |
| @@ -308,7 +296,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 308 | emulated = EMULATE_DONE; | 296 | emulated = EMULATE_DONE; |
| 309 | 297 | ||
| 310 | /* put in registers */ | 298 | /* put in registers */ |
| 311 | cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t); | 299 | kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); |
| 312 | vcpu->arch.qpr[rs] = tmp[1]; | 300 | vcpu->arch.qpr[rs] = tmp[1]; |
| 313 | 301 | ||
| 314 | dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], | 302 | dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], |
| @@ -322,14 +310,11 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 322 | int rs, ulong addr, bool w, int i) | 310 | int rs, ulong addr, bool w, int i) |
| 323 | { | 311 | { |
| 324 | int emulated = EMULATE_FAIL; | 312 | int emulated = EMULATE_FAIL; |
| 325 | struct thread_struct t; | ||
| 326 | int r; | 313 | int r; |
| 327 | u32 tmp[2]; | 314 | u32 tmp[2]; |
| 328 | int len = w ? sizeof(u32) : sizeof(u64); | 315 | int len = w ? sizeof(u32) : sizeof(u64); |
| 329 | 316 | ||
| 330 | t.fpscr.val = vcpu->arch.fpscr; | 317 | kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0], &vcpu->arch.fpscr); |
| 331 | |||
| 332 | cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t); | ||
| 333 | tmp[1] = vcpu->arch.qpr[rs]; | 318 | tmp[1] = vcpu->arch.qpr[rs]; |
| 334 | 319 | ||
| 335 | r = kvmppc_st(vcpu, &addr, len, tmp, true); | 320 | r = kvmppc_st(vcpu, &addr, len, tmp, true); |
| @@ -517,7 +502,7 @@ static int get_d_signext(u32 inst) | |||
| 517 | static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | 502 | static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, |
| 518 | int reg_out, int reg_in1, int reg_in2, | 503 | int reg_out, int reg_in1, int reg_in2, |
| 519 | int reg_in3, int scalar, | 504 | int reg_in3, int scalar, |
| 520 | void (*func)(struct thread_struct *t, | 505 | void (*func)(u64 *fpscr, |
| 521 | u32 *dst, u32 *src1, | 506 | u32 *dst, u32 *src1, |
| 522 | u32 *src2, u32 *src3)) | 507 | u32 *src2, u32 *src3)) |
| 523 | { | 508 | { |
| @@ -526,27 +511,25 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
| 526 | u32 ps0_out; | 511 | u32 ps0_out; |
| 527 | u32 ps0_in1, ps0_in2, ps0_in3; | 512 | u32 ps0_in1, ps0_in2, ps0_in3; |
| 528 | u32 ps1_in1, ps1_in2, ps1_in3; | 513 | u32 ps1_in1, ps1_in2, ps1_in3; |
| 529 | struct thread_struct t; | ||
| 530 | t.fpscr.val = vcpu->arch.fpscr; | ||
| 531 | 514 | ||
| 532 | /* RC */ | 515 | /* RC */ |
| 533 | WARN_ON(rc); | 516 | WARN_ON(rc); |
| 534 | 517 | ||
| 535 | /* PS0 */ | 518 | /* PS0 */ |
| 536 | cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t); | 519 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); |
| 537 | cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t); | 520 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); |
| 538 | cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t); | 521 | kvm_cvt_df(&fpr[reg_in3], &ps0_in3, &vcpu->arch.fpscr); |
| 539 | 522 | ||
| 540 | if (scalar & SCALAR_LOW) | 523 | if (scalar & SCALAR_LOW) |
| 541 | ps0_in2 = qpr[reg_in2]; | 524 | ps0_in2 = qpr[reg_in2]; |
| 542 | 525 | ||
| 543 | func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); | 526 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); |
| 544 | 527 | ||
| 545 | dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", | 528 | dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", |
| 546 | ps0_in1, ps0_in2, ps0_in3, ps0_out); | 529 | ps0_in1, ps0_in2, ps0_in3, ps0_out); |
| 547 | 530 | ||
| 548 | if (!(scalar & SCALAR_NO_PS0)) | 531 | if (!(scalar & SCALAR_NO_PS0)) |
| 549 | cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); | 532 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); |
| 550 | 533 | ||
| 551 | /* PS1 */ | 534 | /* PS1 */ |
| 552 | ps1_in1 = qpr[reg_in1]; | 535 | ps1_in1 = qpr[reg_in1]; |
| @@ -557,7 +540,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
| 557 | ps1_in2 = ps0_in2; | 540 | ps1_in2 = ps0_in2; |
| 558 | 541 | ||
| 559 | if (!(scalar & SCALAR_NO_PS1)) | 542 | if (!(scalar & SCALAR_NO_PS1)) |
| 560 | func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); | 543 | func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); |
| 561 | 544 | ||
| 562 | dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", | 545 | dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", |
| 563 | ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); | 546 | ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); |
| @@ -568,7 +551,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
| 568 | static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | 551 | static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, |
| 569 | int reg_out, int reg_in1, int reg_in2, | 552 | int reg_out, int reg_in1, int reg_in2, |
| 570 | int scalar, | 553 | int scalar, |
| 571 | void (*func)(struct thread_struct *t, | 554 | void (*func)(u64 *fpscr, |
| 572 | u32 *dst, u32 *src1, | 555 | u32 *dst, u32 *src1, |
| 573 | u32 *src2)) | 556 | u32 *src2)) |
| 574 | { | 557 | { |
| @@ -578,27 +561,25 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
| 578 | u32 ps0_in1, ps0_in2; | 561 | u32 ps0_in1, ps0_in2; |
| 579 | u32 ps1_out; | 562 | u32 ps1_out; |
| 580 | u32 ps1_in1, ps1_in2; | 563 | u32 ps1_in1, ps1_in2; |
| 581 | struct thread_struct t; | ||
| 582 | t.fpscr.val = vcpu->arch.fpscr; | ||
| 583 | 564 | ||
| 584 | /* RC */ | 565 | /* RC */ |
| 585 | WARN_ON(rc); | 566 | WARN_ON(rc); |
| 586 | 567 | ||
| 587 | /* PS0 */ | 568 | /* PS0 */ |
| 588 | cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t); | 569 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); |
| 589 | 570 | ||
| 590 | if (scalar & SCALAR_LOW) | 571 | if (scalar & SCALAR_LOW) |
| 591 | ps0_in2 = qpr[reg_in2]; | 572 | ps0_in2 = qpr[reg_in2]; |
| 592 | else | 573 | else |
| 593 | cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t); | 574 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); |
| 594 | 575 | ||
| 595 | func(&t, &ps0_out, &ps0_in1, &ps0_in2); | 576 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); |
| 596 | 577 | ||
| 597 | if (!(scalar & SCALAR_NO_PS0)) { | 578 | if (!(scalar & SCALAR_NO_PS0)) { |
| 598 | dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", | 579 | dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", |
| 599 | ps0_in1, ps0_in2, ps0_out); | 580 | ps0_in1, ps0_in2, ps0_out); |
| 600 | 581 | ||
| 601 | cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); | 582 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); |
| 602 | } | 583 | } |
| 603 | 584 | ||
| 604 | /* PS1 */ | 585 | /* PS1 */ |
| @@ -608,7 +589,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
| 608 | if (scalar & SCALAR_HIGH) | 589 | if (scalar & SCALAR_HIGH) |
| 609 | ps1_in2 = ps0_in2; | 590 | ps1_in2 = ps0_in2; |
| 610 | 591 | ||
| 611 | func(&t, &ps1_out, &ps1_in1, &ps1_in2); | 592 | func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2); |
| 612 | 593 | ||
| 613 | if (!(scalar & SCALAR_NO_PS1)) { | 594 | if (!(scalar & SCALAR_NO_PS1)) { |
| 614 | qpr[reg_out] = ps1_out; | 595 | qpr[reg_out] = ps1_out; |
| @@ -622,31 +603,29 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
| 622 | 603 | ||
| 623 | static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, | 604 | static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, |
| 624 | int reg_out, int reg_in, | 605 | int reg_out, int reg_in, |
| 625 | void (*func)(struct thread_struct *t, | 606 | void (*func)(u64 *t, |
| 626 | u32 *dst, u32 *src1)) | 607 | u32 *dst, u32 *src1)) |
| 627 | { | 608 | { |
| 628 | u32 *qpr = vcpu->arch.qpr; | 609 | u32 *qpr = vcpu->arch.qpr; |
| 629 | u64 *fpr = vcpu->arch.fpr; | 610 | u64 *fpr = vcpu->arch.fpr; |
| 630 | u32 ps0_out, ps0_in; | 611 | u32 ps0_out, ps0_in; |
| 631 | u32 ps1_in; | 612 | u32 ps1_in; |
| 632 | struct thread_struct t; | ||
| 633 | t.fpscr.val = vcpu->arch.fpscr; | ||
| 634 | 613 | ||
| 635 | /* RC */ | 614 | /* RC */ |
| 636 | WARN_ON(rc); | 615 | WARN_ON(rc); |
| 637 | 616 | ||
| 638 | /* PS0 */ | 617 | /* PS0 */ |
| 639 | cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t); | 618 | kvm_cvt_df(&fpr[reg_in], &ps0_in, &vcpu->arch.fpscr); |
| 640 | func(&t, &ps0_out, &ps0_in); | 619 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); |
| 641 | 620 | ||
| 642 | dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", | 621 | dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", |
| 643 | ps0_in, ps0_out); | 622 | ps0_in, ps0_out); |
| 644 | 623 | ||
| 645 | cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); | 624 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); |
| 646 | 625 | ||
| 647 | /* PS1 */ | 626 | /* PS1 */ |
| 648 | ps1_in = qpr[reg_in]; | 627 | ps1_in = qpr[reg_in]; |
| 649 | func(&t, &qpr[reg_out], &ps1_in); | 628 | func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in); |
| 650 | 629 | ||
| 651 | dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", | 630 | dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", |
| 652 | ps1_in, qpr[reg_out]); | 631 | ps1_in, qpr[reg_out]); |
| @@ -672,13 +651,10 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 672 | 651 | ||
| 673 | bool rcomp = (inst & 1) ? true : false; | 652 | bool rcomp = (inst & 1) ? true : false; |
| 674 | u32 cr = kvmppc_get_cr(vcpu); | 653 | u32 cr = kvmppc_get_cr(vcpu); |
| 675 | struct thread_struct t; | ||
| 676 | #ifdef DEBUG | 654 | #ifdef DEBUG |
| 677 | int i; | 655 | int i; |
| 678 | #endif | 656 | #endif |
| 679 | 657 | ||
| 680 | t.fpscr.val = vcpu->arch.fpscr; | ||
| 681 | |||
| 682 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) | 658 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) |
| 683 | return EMULATE_FAIL; | 659 | return EMULATE_FAIL; |
| 684 | 660 | ||
| @@ -695,7 +671,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 695 | #ifdef DEBUG | 671 | #ifdef DEBUG |
| 696 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { | 672 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { |
| 697 | u32 f; | 673 | u32 f; |
| 698 | cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t); | 674 | kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); |
| 699 | dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", | 675 | dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", |
| 700 | i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); | 676 | i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); |
| 701 | } | 677 | } |
| @@ -819,8 +795,9 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 819 | WARN_ON(rcomp); | 795 | WARN_ON(rcomp); |
| 820 | vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; | 796 | vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; |
| 821 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ | 797 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ |
| 822 | cvt_df((double*)&vcpu->arch.fpr[ax_rb], | 798 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], |
| 823 | (float*)&vcpu->arch.qpr[ax_rd], &t); | 799 | &vcpu->arch.qpr[ax_rd], |
| 800 | &vcpu->arch.fpscr); | ||
| 824 | break; | 801 | break; |
| 825 | case OP_4X_PS_MERGE01: | 802 | case OP_4X_PS_MERGE01: |
| 826 | WARN_ON(rcomp); | 803 | WARN_ON(rcomp); |
| @@ -830,17 +807,20 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 830 | case OP_4X_PS_MERGE10: | 807 | case OP_4X_PS_MERGE10: |
| 831 | WARN_ON(rcomp); | 808 | WARN_ON(rcomp); |
| 832 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ | 809 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ |
| 833 | cvt_fd((float*)&vcpu->arch.qpr[ax_ra], | 810 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], |
| 834 | (double*)&vcpu->arch.fpr[ax_rd], &t); | 811 | &vcpu->arch.fpr[ax_rd], |
| 812 | &vcpu->arch.fpscr); | ||
| 835 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ | 813 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ |
| 836 | cvt_df((double*)&vcpu->arch.fpr[ax_rb], | 814 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], |
| 837 | (float*)&vcpu->arch.qpr[ax_rd], &t); | 815 | &vcpu->arch.qpr[ax_rd], |
| 816 | &vcpu->arch.fpscr); | ||
| 838 | break; | 817 | break; |
| 839 | case OP_4X_PS_MERGE11: | 818 | case OP_4X_PS_MERGE11: |
| 840 | WARN_ON(rcomp); | 819 | WARN_ON(rcomp); |
| 841 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ | 820 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ |
| 842 | cvt_fd((float*)&vcpu->arch.qpr[ax_ra], | 821 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], |
| 843 | (double*)&vcpu->arch.fpr[ax_rd], &t); | 822 | &vcpu->arch.fpr[ax_rd], |
| 823 | &vcpu->arch.fpscr); | ||
| 844 | vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; | 824 | vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; |
| 845 | break; | 825 | break; |
| 846 | } | 826 | } |
| @@ -1275,7 +1255,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 1275 | #ifdef DEBUG | 1255 | #ifdef DEBUG |
| 1276 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { | 1256 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { |
| 1277 | u32 f; | 1257 | u32 f; |
| 1278 | cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t); | 1258 | kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); |
| 1279 | dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); | 1259 | dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); |
| 1280 | } | 1260 | } |
| 1281 | #endif | 1261 | #endif |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index a33ab8cc2ccc..8d4e35f5372c 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
| @@ -144,7 +144,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
| 144 | unsigned int priority) | 144 | unsigned int priority) |
| 145 | { | 145 | { |
| 146 | int allowed = 0; | 146 | int allowed = 0; |
| 147 | ulong msr_mask; | 147 | ulong uninitialized_var(msr_mask); |
| 148 | bool update_esr = false, update_dear = false; | 148 | bool update_esr = false, update_dear = false; |
| 149 | 149 | ||
| 150 | switch (priority) { | 150 | switch (priority) { |
| @@ -485,8 +485,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 485 | { | 485 | { |
| 486 | int i; | 486 | int i; |
| 487 | 487 | ||
| 488 | vcpu_load(vcpu); | ||
| 489 | |||
| 490 | regs->pc = vcpu->arch.pc; | 488 | regs->pc = vcpu->arch.pc; |
| 491 | regs->cr = kvmppc_get_cr(vcpu); | 489 | regs->cr = kvmppc_get_cr(vcpu); |
| 492 | regs->ctr = vcpu->arch.ctr; | 490 | regs->ctr = vcpu->arch.ctr; |
| @@ -507,8 +505,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 507 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 505 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
| 508 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 506 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
| 509 | 507 | ||
| 510 | vcpu_put(vcpu); | ||
| 511 | |||
| 512 | return 0; | 508 | return 0; |
| 513 | } | 509 | } |
| 514 | 510 | ||
| @@ -516,8 +512,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 516 | { | 512 | { |
| 517 | int i; | 513 | int i; |
| 518 | 514 | ||
| 519 | vcpu_load(vcpu); | ||
| 520 | |||
| 521 | vcpu->arch.pc = regs->pc; | 515 | vcpu->arch.pc = regs->pc; |
| 522 | kvmppc_set_cr(vcpu, regs->cr); | 516 | kvmppc_set_cr(vcpu, regs->cr); |
| 523 | vcpu->arch.ctr = regs->ctr; | 517 | vcpu->arch.ctr = regs->ctr; |
| @@ -537,8 +531,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 537 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 531 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
| 538 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 532 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
| 539 | 533 | ||
| 540 | vcpu_put(vcpu); | ||
| 541 | |||
| 542 | return 0; | 534 | return 0; |
| 543 | } | 535 | } |
| 544 | 536 | ||
| @@ -569,9 +561,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
| 569 | { | 561 | { |
| 570 | int r; | 562 | int r; |
| 571 | 563 | ||
| 572 | vcpu_load(vcpu); | ||
| 573 | r = kvmppc_core_vcpu_translate(vcpu, tr); | 564 | r = kvmppc_core_vcpu_translate(vcpu, tr); |
| 574 | vcpu_put(vcpu); | ||
| 575 | return r; | 565 | return r; |
| 576 | } | 566 | } |
| 577 | 567 | ||
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S index 2b340a3eee90..cb34bbe16113 100644 --- a/arch/powerpc/kvm/fpu.S +++ b/arch/powerpc/kvm/fpu.S | |||
| @@ -271,3 +271,21 @@ FPD_THREE_IN(fmsub) | |||
| 271 | FPD_THREE_IN(fmadd) | 271 | FPD_THREE_IN(fmadd) |
| 272 | FPD_THREE_IN(fnmsub) | 272 | FPD_THREE_IN(fnmsub) |
| 273 | FPD_THREE_IN(fnmadd) | 273 | FPD_THREE_IN(fnmadd) |
| 274 | |||
| 275 | _GLOBAL(kvm_cvt_fd) | ||
| 276 | lfd 0,0(r5) /* load up fpscr value */ | ||
| 277 | MTFSF_L(0) | ||
| 278 | lfs 0,0(r3) | ||
| 279 | stfd 0,0(r4) | ||
| 280 | mffs 0 | ||
| 281 | stfd 0,0(r5) /* save new fpscr value */ | ||
| 282 | blr | ||
| 283 | |||
| 284 | _GLOBAL(kvm_cvt_df) | ||
| 285 | lfd 0,0(r5) /* load up fpscr value */ | ||
| 286 | MTFSF_L(0) | ||
| 287 | lfd 0,0(r3) | ||
| 288 | stfs 0,0(r4) | ||
| 289 | mffs 0 | ||
| 290 | stfd 0,0(r5) /* save new fpscr value */ | ||
| 291 | blr | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 9b8683f39e05..72a4ad86ee91 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
| @@ -36,11 +36,6 @@ | |||
| 36 | #define CREATE_TRACE_POINTS | 36 | #define CREATE_TRACE_POINTS |
| 37 | #include "trace.h" | 37 | #include "trace.h" |
| 38 | 38 | ||
| 39 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
| 40 | { | ||
| 41 | return gfn; | ||
| 42 | } | ||
| 43 | |||
| 44 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
| 45 | { | 40 | { |
| 46 | return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); | 41 | return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); |
| @@ -287,7 +282,7 @@ static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | |||
| 287 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | 282 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
| 288 | struct kvm_run *run) | 283 | struct kvm_run *run) |
| 289 | { | 284 | { |
| 290 | u64 gpr; | 285 | u64 uninitialized_var(gpr); |
| 291 | 286 | ||
| 292 | if (run->mmio.len > sizeof(gpr)) { | 287 | if (run->mmio.len > sizeof(gpr)) { |
| 293 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | 288 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
| @@ -423,8 +418,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 423 | int r; | 418 | int r; |
| 424 | sigset_t sigsaved; | 419 | sigset_t sigsaved; |
| 425 | 420 | ||
| 426 | vcpu_load(vcpu); | ||
| 427 | |||
| 428 | if (vcpu->sigset_active) | 421 | if (vcpu->sigset_active) |
| 429 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 422 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
| 430 | 423 | ||
| @@ -456,8 +449,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 456 | if (vcpu->sigset_active) | 449 | if (vcpu->sigset_active) |
| 457 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 450 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
| 458 | 451 | ||
| 459 | vcpu_put(vcpu); | ||
| 460 | |||
| 461 | return r; | 452 | return r; |
| 462 | } | 453 | } |
| 463 | 454 | ||
| @@ -523,8 +514,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 523 | if (copy_from_user(&irq, argp, sizeof(irq))) | 514 | if (copy_from_user(&irq, argp, sizeof(irq))) |
| 524 | goto out; | 515 | goto out; |
| 525 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); | 516 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
| 526 | break; | 517 | goto out; |
| 527 | } | 518 | } |
| 519 | |||
| 528 | case KVM_ENABLE_CAP: | 520 | case KVM_ENABLE_CAP: |
| 529 | { | 521 | { |
| 530 | struct kvm_enable_cap cap; | 522 | struct kvm_enable_cap cap; |
