diff options
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/disassemble.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/exception-64s.h | 21 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_asm.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 232 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_32.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_64.h | 8 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_asm.h | 9 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_booke.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 57 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 107 | ||||
-rw-r--r-- | arch/powerpc/include/asm/paca.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-book3e.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg.h | 15 | ||||
-rw-r--r-- | arch/powerpc/include/uapi/asm/kvm.h | 86 |
15 files changed, 277 insertions, 281 deletions
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h index 9b198d1b3b2b..856f8deb557a 100644 --- a/arch/powerpc/include/asm/disassemble.h +++ b/arch/powerpc/include/asm/disassemble.h | |||
@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst) | |||
77 | return inst & 0xffff; | 77 | return inst & 0xffff; |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline unsigned int get_oc(u32 inst) | ||
81 | { | ||
82 | return (inst >> 11) & 0x7fff; | ||
83 | } | ||
80 | #endif /* __ASM_PPC_DISASSEMBLE_H__ */ | 84 | #endif /* __ASM_PPC_DISASSEMBLE_H__ */ |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index cca12f084842..894662a5d4d5 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) | |||
198 | cmpwi r10,0; \ | 198 | cmpwi r10,0; \ |
199 | bne do_kvm_##n | 199 | bne do_kvm_##n |
200 | 200 | ||
201 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
202 | /* | ||
203 | * If hv is possible, interrupts come into to the hv version | ||
204 | * of the kvmppc_interrupt code, which then jumps to the PR handler, | ||
205 | * kvmppc_interrupt_pr, if the guest is a PR guest. | ||
206 | */ | ||
207 | #define kvmppc_interrupt kvmppc_interrupt_hv | ||
208 | #else | ||
209 | #define kvmppc_interrupt kvmppc_interrupt_pr | ||
210 | #endif | ||
211 | |||
201 | #define __KVM_HANDLER(area, h, n) \ | 212 | #define __KVM_HANDLER(area, h, n) \ |
202 | do_kvm_##n: \ | 213 | do_kvm_##n: \ |
203 | BEGIN_FTR_SECTION_NESTED(947) \ | 214 | BEGIN_FTR_SECTION_NESTED(947) \ |
204 | ld r10,area+EX_CFAR(r13); \ | 215 | ld r10,area+EX_CFAR(r13); \ |
205 | std r10,HSTATE_CFAR(r13); \ | 216 | std r10,HSTATE_CFAR(r13); \ |
206 | END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ | 217 | END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ |
218 | BEGIN_FTR_SECTION_NESTED(948) \ | ||
219 | ld r10,area+EX_PPR(r13); \ | ||
220 | std r10,HSTATE_PPR(r13); \ | ||
221 | END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ | ||
207 | ld r10,area+EX_R10(r13); \ | 222 | ld r10,area+EX_R10(r13); \ |
208 | stw r9,HSTATE_SCRATCH1(r13); \ | 223 | stw r9,HSTATE_SCRATCH1(r13); \ |
209 | ld r9,area+EX_R9(r13); \ | 224 | ld r9,area+EX_R9(r13); \ |
@@ -217,6 +232,10 @@ do_kvm_##n: \ | |||
217 | ld r10,area+EX_R10(r13); \ | 232 | ld r10,area+EX_R10(r13); \ |
218 | beq 89f; \ | 233 | beq 89f; \ |
219 | stw r9,HSTATE_SCRATCH1(r13); \ | 234 | stw r9,HSTATE_SCRATCH1(r13); \ |
235 | BEGIN_FTR_SECTION_NESTED(948) \ | ||
236 | ld r9,area+EX_PPR(r13); \ | ||
237 | std r9,HSTATE_PPR(r13); \ | ||
238 | END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ | ||
220 | ld r9,area+EX_R9(r13); \ | 239 | ld r9,area+EX_R9(r13); \ |
221 | std r12,HSTATE_SCRATCH0(r13); \ | 240 | std r12,HSTATE_SCRATCH0(r13); \ |
222 | li r12,n; \ | 241 | li r12,n; \ |
@@ -236,7 +255,7 @@ do_kvm_##n: \ | |||
236 | #define KVM_HANDLER_SKIP(area, h, n) | 255 | #define KVM_HANDLER_SKIP(area, h, n) |
237 | #endif | 256 | #endif |
238 | 257 | ||
239 | #ifdef CONFIG_KVM_BOOK3S_PR | 258 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
240 | #define KVMTEST_PR(n) __KVMTEST(n) | 259 | #define KVMTEST_PR(n) __KVMTEST(n) |
241 | #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) | 260 | #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) |
242 | #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) | 261 | #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) |
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 851bac7afa4b..1bd92fd43cfb 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -123,6 +123,8 @@ | |||
123 | #define BOOK3S_HFLAG_SLB 0x2 | 123 | #define BOOK3S_HFLAG_SLB 0x2 |
124 | #define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 | 124 | #define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 |
125 | #define BOOK3S_HFLAG_NATIVE_PS 0x8 | 125 | #define BOOK3S_HFLAG_NATIVE_PS 0x8 |
126 | #define BOOK3S_HFLAG_MULTI_PGSIZE 0x10 | ||
127 | #define BOOK3S_HFLAG_NEW_TLBIE 0x20 | ||
126 | 128 | ||
127 | #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ | 129 | #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ |
128 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ | 130 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ |
@@ -136,6 +138,8 @@ | |||
136 | #define KVM_GUEST_MODE_NONE 0 | 138 | #define KVM_GUEST_MODE_NONE 0 |
137 | #define KVM_GUEST_MODE_GUEST 1 | 139 | #define KVM_GUEST_MODE_GUEST 1 |
138 | #define KVM_GUEST_MODE_SKIP 2 | 140 | #define KVM_GUEST_MODE_SKIP 2 |
141 | #define KVM_GUEST_MODE_GUEST_HV 3 | ||
142 | #define KVM_GUEST_MODE_HOST_HV 4 | ||
139 | 143 | ||
140 | #define KVM_INST_FETCH_FAILED -1 | 144 | #define KVM_INST_FETCH_FAILED -1 |
141 | 145 | ||
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index fa19e2f1a874..4a594b76674d 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -58,16 +58,18 @@ struct hpte_cache { | |||
58 | struct hlist_node list_pte_long; | 58 | struct hlist_node list_pte_long; |
59 | struct hlist_node list_vpte; | 59 | struct hlist_node list_vpte; |
60 | struct hlist_node list_vpte_long; | 60 | struct hlist_node list_vpte_long; |
61 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
62 | struct hlist_node list_vpte_64k; | ||
63 | #endif | ||
61 | struct rcu_head rcu_head; | 64 | struct rcu_head rcu_head; |
62 | u64 host_vpn; | 65 | u64 host_vpn; |
63 | u64 pfn; | 66 | u64 pfn; |
64 | ulong slot; | 67 | ulong slot; |
65 | struct kvmppc_pte pte; | 68 | struct kvmppc_pte pte; |
69 | int pagesize; | ||
66 | }; | 70 | }; |
67 | 71 | ||
68 | struct kvmppc_vcpu_book3s { | 72 | struct kvmppc_vcpu_book3s { |
69 | struct kvm_vcpu vcpu; | ||
70 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; | ||
71 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; | 73 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; |
72 | struct { | 74 | struct { |
73 | u64 esid; | 75 | u64 esid; |
@@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s { | |||
99 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | 101 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; |
100 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | 102 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; |
101 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | 103 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; |
104 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
105 | struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K]; | ||
106 | #endif | ||
102 | int hpte_cache_count; | 107 | int hpte_cache_count; |
103 | spinlock_t mmu_lock; | 108 | spinlock_t mmu_lock; |
104 | }; | 109 | }; |
@@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s { | |||
107 | #define CONTEXT_GUEST 1 | 112 | #define CONTEXT_GUEST 1 |
108 | #define CONTEXT_GUEST_END 2 | 113 | #define CONTEXT_GUEST_END 2 |
109 | 114 | ||
110 | #define VSID_REAL 0x0fffffffffc00000ULL | 115 | #define VSID_REAL 0x07ffffffffc00000ULL |
111 | #define VSID_BAT 0x0fffffffffb00000ULL | 116 | #define VSID_BAT 0x07ffffffffb00000ULL |
117 | #define VSID_64K 0x0800000000000000ULL | ||
112 | #define VSID_1T 0x1000000000000000ULL | 118 | #define VSID_1T 0x1000000000000000ULL |
113 | #define VSID_REAL_DR 0x2000000000000000ULL | 119 | #define VSID_REAL_DR 0x2000000000000000ULL |
114 | #define VSID_REAL_IR 0x4000000000000000ULL | 120 | #define VSID_REAL_IR 0x4000000000000000ULL |
@@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask) | |||
118 | extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); | 124 | extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); |
119 | extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); | 125 | extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); |
120 | extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); | 126 | extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); |
121 | extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); | ||
122 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); | 127 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); |
123 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); | 128 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); |
124 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); | 129 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); |
125 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | 130 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, |
131 | bool iswrite); | ||
132 | extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | ||
126 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); | 133 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); |
127 | extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); | 134 | extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); |
128 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); | 135 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); |
@@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, | |||
134 | 141 | ||
135 | extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | 142 | extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); |
136 | extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); | 143 | extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); |
144 | extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte); | ||
137 | extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); | 145 | extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); |
138 | extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); | 146 | extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); |
139 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | 147 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); |
@@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | |||
151 | bool upper, u32 val); | 159 | bool upper, u32 val); |
152 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 160 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
153 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); | 161 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); |
154 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | 162 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, |
163 | bool *writable); | ||
155 | extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, | 164 | extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, |
156 | unsigned long *rmap, long pte_index, int realmode); | 165 | unsigned long *rmap, long pte_index, int realmode); |
157 | extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, | 166 | extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, |
@@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, | |||
172 | unsigned long *hpret); | 181 | unsigned long *hpret); |
173 | extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, | 182 | extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, |
174 | struct kvm_memory_slot *memslot, unsigned long *map); | 183 | struct kvm_memory_slot *memslot, unsigned long *map); |
184 | extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, | ||
185 | unsigned long mask); | ||
175 | 186 | ||
176 | extern void kvmppc_entry_trampoline(void); | 187 | extern void kvmppc_entry_trampoline(void); |
177 | extern void kvmppc_hv_entry_trampoline(void); | 188 | extern void kvmppc_hv_entry_trampoline(void); |
@@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | |||
184 | 195 | ||
185 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 196 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
186 | { | 197 | { |
187 | return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); | 198 | return vcpu->arch.book3s; |
188 | } | 199 | } |
189 | 200 | ||
190 | extern void kvm_return_point(void); | ||
191 | |||
192 | /* Also add subarch specific defines */ | 201 | /* Also add subarch specific defines */ |
193 | 202 | ||
194 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER | 203 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
@@ -198,203 +207,6 @@ extern void kvm_return_point(void); | |||
198 | #include <asm/kvm_book3s_64.h> | 207 | #include <asm/kvm_book3s_64.h> |
199 | #endif | 208 | #endif |
200 | 209 | ||
201 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
202 | |||
203 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | ||
204 | { | ||
205 | return to_book3s(vcpu)->hior; | ||
206 | } | ||
207 | |||
208 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | ||
209 | unsigned long pending_now, unsigned long old_pending) | ||
210 | { | ||
211 | if (pending_now) | ||
212 | vcpu->arch.shared->int_pending = 1; | ||
213 | else if (old_pending) | ||
214 | vcpu->arch.shared->int_pending = 0; | ||
215 | } | ||
216 | |||
217 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | ||
218 | { | ||
219 | if ( num < 14 ) { | ||
220 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
221 | svcpu->gpr[num] = val; | ||
222 | svcpu_put(svcpu); | ||
223 | to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; | ||
224 | } else | ||
225 | vcpu->arch.gpr[num] = val; | ||
226 | } | ||
227 | |||
228 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | ||
229 | { | ||
230 | if ( num < 14 ) { | ||
231 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
232 | ulong r = svcpu->gpr[num]; | ||
233 | svcpu_put(svcpu); | ||
234 | return r; | ||
235 | } else | ||
236 | return vcpu->arch.gpr[num]; | ||
237 | } | ||
238 | |||
239 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | ||
240 | { | ||
241 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
242 | svcpu->cr = val; | ||
243 | svcpu_put(svcpu); | ||
244 | to_book3s(vcpu)->shadow_vcpu->cr = val; | ||
245 | } | ||
246 | |||
247 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | ||
248 | { | ||
249 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
250 | u32 r; | ||
251 | r = svcpu->cr; | ||
252 | svcpu_put(svcpu); | ||
253 | return r; | ||
254 | } | ||
255 | |||
256 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | ||
257 | { | ||
258 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
259 | svcpu->xer = val; | ||
260 | to_book3s(vcpu)->shadow_vcpu->xer = val; | ||
261 | svcpu_put(svcpu); | ||
262 | } | ||
263 | |||
264 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | ||
265 | { | ||
266 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
267 | u32 r; | ||
268 | r = svcpu->xer; | ||
269 | svcpu_put(svcpu); | ||
270 | return r; | ||
271 | } | ||
272 | |||
273 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | ||
274 | { | ||
275 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
276 | svcpu->ctr = val; | ||
277 | svcpu_put(svcpu); | ||
278 | } | ||
279 | |||
280 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | ||
281 | { | ||
282 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
283 | ulong r; | ||
284 | r = svcpu->ctr; | ||
285 | svcpu_put(svcpu); | ||
286 | return r; | ||
287 | } | ||
288 | |||
289 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | ||
290 | { | ||
291 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
292 | svcpu->lr = val; | ||
293 | svcpu_put(svcpu); | ||
294 | } | ||
295 | |||
296 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | ||
297 | { | ||
298 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
299 | ulong r; | ||
300 | r = svcpu->lr; | ||
301 | svcpu_put(svcpu); | ||
302 | return r; | ||
303 | } | ||
304 | |||
305 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | ||
306 | { | ||
307 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
308 | svcpu->pc = val; | ||
309 | svcpu_put(svcpu); | ||
310 | } | ||
311 | |||
312 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | ||
313 | { | ||
314 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
315 | ulong r; | ||
316 | r = svcpu->pc; | ||
317 | svcpu_put(svcpu); | ||
318 | return r; | ||
319 | } | ||
320 | |||
321 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | ||
322 | { | ||
323 | ulong pc = kvmppc_get_pc(vcpu); | ||
324 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
325 | u32 r; | ||
326 | |||
327 | /* Load the instruction manually if it failed to do so in the | ||
328 | * exit path */ | ||
329 | if (svcpu->last_inst == KVM_INST_FETCH_FAILED) | ||
330 | kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); | ||
331 | |||
332 | r = svcpu->last_inst; | ||
333 | svcpu_put(svcpu); | ||
334 | return r; | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * Like kvmppc_get_last_inst(), but for fetching a sc instruction. | ||
339 | * Because the sc instruction sets SRR0 to point to the following | ||
340 | * instruction, we have to fetch from pc - 4. | ||
341 | */ | ||
342 | static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) | ||
343 | { | ||
344 | ulong pc = kvmppc_get_pc(vcpu) - 4; | ||
345 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
346 | u32 r; | ||
347 | |||
348 | /* Load the instruction manually if it failed to do so in the | ||
349 | * exit path */ | ||
350 | if (svcpu->last_inst == KVM_INST_FETCH_FAILED) | ||
351 | kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); | ||
352 | |||
353 | r = svcpu->last_inst; | ||
354 | svcpu_put(svcpu); | ||
355 | return r; | ||
356 | } | ||
357 | |||
358 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | ||
359 | { | ||
360 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
361 | ulong r; | ||
362 | r = svcpu->fault_dar; | ||
363 | svcpu_put(svcpu); | ||
364 | return r; | ||
365 | } | ||
366 | |||
367 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | ||
368 | { | ||
369 | ulong crit_raw = vcpu->arch.shared->critical; | ||
370 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
371 | bool crit; | ||
372 | |||
373 | /* Truncate crit indicators in 32 bit mode */ | ||
374 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
375 | crit_raw &= 0xffffffff; | ||
376 | crit_r1 &= 0xffffffff; | ||
377 | } | ||
378 | |||
379 | /* Critical section when crit == r1 */ | ||
380 | crit = (crit_raw == crit_r1); | ||
381 | /* ... and we're in supervisor mode */ | ||
382 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
383 | |||
384 | return crit; | ||
385 | } | ||
386 | #else /* CONFIG_KVM_BOOK3S_PR */ | ||
387 | |||
388 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | ||
389 | { | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | ||
394 | unsigned long pending_now, unsigned long old_pending) | ||
395 | { | ||
396 | } | ||
397 | |||
398 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 210 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
399 | { | 211 | { |
400 | vcpu->arch.gpr[num] = val; | 212 | vcpu->arch.gpr[num] = val; |
@@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
489 | return vcpu->arch.fault_dar; | 301 | return vcpu->arch.fault_dar; |
490 | } | 302 | } |
491 | 303 | ||
492 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | ||
493 | { | ||
494 | return false; | ||
495 | } | ||
496 | #endif | ||
497 | |||
498 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly | 304 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly |
499 | * instruction for the OSI hypercalls */ | 305 | * instruction for the OSI hypercalls */ |
500 | #define OSI_SC_MAGIC_R3 0x113724FA | 306 | #define OSI_SC_MAGIC_R3 0x113724FA |
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h index ce0ef6ce8f86..c720e0b3238d 100644 --- a/arch/powerpc/include/asm/kvm_book3s_32.h +++ b/arch/powerpc/include/asm/kvm_book3s_32.h | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) | 23 | static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) |
24 | { | 24 | { |
25 | return to_book3s(vcpu)->shadow_vcpu; | 25 | return vcpu->arch.shadow_vcpu; |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) | 28 | static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 86d638a3b359..bf0fa8b0a883 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #ifndef __ASM_KVM_BOOK3S_64_H__ | 20 | #ifndef __ASM_KVM_BOOK3S_64_H__ |
21 | #define __ASM_KVM_BOOK3S_64_H__ | 21 | #define __ASM_KVM_BOOK3S_64_H__ |
22 | 22 | ||
23 | #ifdef CONFIG_KVM_BOOK3S_PR | 23 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
24 | static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) | 24 | static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) |
25 | { | 25 | { |
26 | preempt_disable(); | 26 | preempt_disable(); |
@@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) | |||
35 | 35 | ||
36 | #define SPAPR_TCE_SHIFT 12 | 36 | #define SPAPR_TCE_SHIFT 12 |
37 | 37 | ||
38 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 38 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
39 | #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ | 39 | #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ |
40 | extern unsigned long kvm_rma_pages; | 40 | extern unsigned long kvm_rma_pages; |
41 | #endif | 41 | #endif |
@@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v) | |||
278 | (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); | 278 | (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); |
279 | } | 279 | } |
280 | 280 | ||
281 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 281 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
282 | /* | 282 | /* |
283 | * Note modification of an HPTE; set the HPTE modified bit | 283 | * Note modification of an HPTE; set the HPTE modified bit |
284 | * if anyone is interested. | 284 | * if anyone is interested. |
@@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm, | |||
289 | if (atomic_read(&kvm->arch.hpte_mod_interest)) | 289 | if (atomic_read(&kvm->arch.hpte_mod_interest)) |
290 | rev->guest_rpte |= HPTE_GR_MODIFIED; | 290 | rev->guest_rpte |= HPTE_GR_MODIFIED; |
291 | } | 291 | } |
292 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 292 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
293 | 293 | ||
294 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ | 294 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 9039d3c97eec..0bd9348a4db9 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -83,7 +83,7 @@ struct kvmppc_host_state { | |||
83 | u8 restore_hid5; | 83 | u8 restore_hid5; |
84 | u8 napping; | 84 | u8 napping; |
85 | 85 | ||
86 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 86 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
87 | u8 hwthread_req; | 87 | u8 hwthread_req; |
88 | u8 hwthread_state; | 88 | u8 hwthread_state; |
89 | u8 host_ipi; | 89 | u8 host_ipi; |
@@ -101,6 +101,7 @@ struct kvmppc_host_state { | |||
101 | #endif | 101 | #endif |
102 | #ifdef CONFIG_PPC_BOOK3S_64 | 102 | #ifdef CONFIG_PPC_BOOK3S_64 |
103 | u64 cfar; | 103 | u64 cfar; |
104 | u64 ppr; | ||
104 | #endif | 105 | #endif |
105 | }; | 106 | }; |
106 | 107 | ||
@@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu { | |||
108 | ulong gpr[14]; | 109 | ulong gpr[14]; |
109 | u32 cr; | 110 | u32 cr; |
110 | u32 xer; | 111 | u32 xer; |
111 | |||
112 | u32 fault_dsisr; | ||
113 | u32 last_inst; | ||
114 | ulong ctr; | 112 | ulong ctr; |
115 | ulong lr; | 113 | ulong lr; |
116 | ulong pc; | 114 | ulong pc; |
115 | |||
117 | ulong shadow_srr1; | 116 | ulong shadow_srr1; |
118 | ulong fault_dar; | 117 | ulong fault_dar; |
118 | u32 fault_dsisr; | ||
119 | u32 last_inst; | ||
119 | 120 | ||
120 | #ifdef CONFIG_PPC_BOOK3S_32 | 121 | #ifdef CONFIG_PPC_BOOK3S_32 |
121 | u32 sr[16]; /* Guest SRs */ | 122 | u32 sr[16]; /* Guest SRs */ |
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index d3c1eb34c986..dd8f61510dfd 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
@@ -26,7 +26,12 @@ | |||
26 | /* LPIDs we support with this build -- runtime limit may be lower */ | 26 | /* LPIDs we support with this build -- runtime limit may be lower */ |
27 | #define KVMPPC_NR_LPIDS 64 | 27 | #define KVMPPC_NR_LPIDS 64 |
28 | 28 | ||
29 | #define KVMPPC_INST_EHPRIV 0x7c00021c | 29 | #define KVMPPC_INST_EHPRIV 0x7c00021c |
30 | #define EHPRIV_OC_SHIFT 11 | ||
31 | /* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */ | ||
32 | #define EHPRIV_OC_DEBUG 1 | ||
33 | #define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \ | ||
34 | (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT)) | ||
30 | 35 | ||
31 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 36 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
32 | { | 37 | { |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 33283532e9d8..237d1d25b448 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -63,20 +63,17 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | |||
63 | 63 | ||
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | /* We don't currently support large pages. */ | ||
67 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | ||
68 | #define KVM_NR_PAGE_SIZES 1 | ||
69 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | ||
70 | |||
71 | #define HPTEG_CACHE_NUM (1 << 15) | 66 | #define HPTEG_CACHE_NUM (1 << 15) |
72 | #define HPTEG_HASH_BITS_PTE 13 | 67 | #define HPTEG_HASH_BITS_PTE 13 |
73 | #define HPTEG_HASH_BITS_PTE_LONG 12 | 68 | #define HPTEG_HASH_BITS_PTE_LONG 12 |
74 | #define HPTEG_HASH_BITS_VPTE 13 | 69 | #define HPTEG_HASH_BITS_VPTE 13 |
75 | #define HPTEG_HASH_BITS_VPTE_LONG 5 | 70 | #define HPTEG_HASH_BITS_VPTE_LONG 5 |
71 | #define HPTEG_HASH_BITS_VPTE_64K 11 | ||
76 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) | 72 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) |
77 | #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) | 73 | #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) |
78 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) | 74 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) |
79 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) | 75 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) |
76 | #define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K) | ||
80 | 77 | ||
81 | /* Physical Address Mask - allowed range of real mode RAM access */ | 78 | /* Physical Address Mask - allowed range of real mode RAM access */ |
82 | #define KVM_PAM 0x0fffffffffffffffULL | 79 | #define KVM_PAM 0x0fffffffffffffffULL |
@@ -89,6 +86,9 @@ struct lppaca; | |||
89 | struct slb_shadow; | 86 | struct slb_shadow; |
90 | struct dtl_entry; | 87 | struct dtl_entry; |
91 | 88 | ||
89 | struct kvmppc_vcpu_book3s; | ||
90 | struct kvmppc_book3s_shadow_vcpu; | ||
91 | |||
92 | struct kvm_vm_stat { | 92 | struct kvm_vm_stat { |
93 | u32 remote_tlb_flush; | 93 | u32 remote_tlb_flush; |
94 | }; | 94 | }; |
@@ -224,15 +224,15 @@ struct revmap_entry { | |||
224 | #define KVMPPC_GOT_PAGE 0x80 | 224 | #define KVMPPC_GOT_PAGE 0x80 |
225 | 225 | ||
226 | struct kvm_arch_memory_slot { | 226 | struct kvm_arch_memory_slot { |
227 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 227 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
228 | unsigned long *rmap; | 228 | unsigned long *rmap; |
229 | unsigned long *slot_phys; | 229 | unsigned long *slot_phys; |
230 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 230 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
231 | }; | 231 | }; |
232 | 232 | ||
233 | struct kvm_arch { | 233 | struct kvm_arch { |
234 | unsigned int lpid; | 234 | unsigned int lpid; |
235 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 235 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
236 | unsigned long hpt_virt; | 236 | unsigned long hpt_virt; |
237 | struct revmap_entry *revmap; | 237 | struct revmap_entry *revmap; |
238 | unsigned int host_lpid; | 238 | unsigned int host_lpid; |
@@ -256,7 +256,10 @@ struct kvm_arch { | |||
256 | cpumask_t need_tlb_flush; | 256 | cpumask_t need_tlb_flush; |
257 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; | 257 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; |
258 | int hpt_cma_alloc; | 258 | int hpt_cma_alloc; |
259 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 259 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
260 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | ||
261 | struct mutex hpt_mutex; | ||
262 | #endif | ||
260 | #ifdef CONFIG_PPC_BOOK3S_64 | 263 | #ifdef CONFIG_PPC_BOOK3S_64 |
261 | struct list_head spapr_tce_tables; | 264 | struct list_head spapr_tce_tables; |
262 | struct list_head rtas_tokens; | 265 | struct list_head rtas_tokens; |
@@ -267,6 +270,7 @@ struct kvm_arch { | |||
267 | #ifdef CONFIG_KVM_XICS | 270 | #ifdef CONFIG_KVM_XICS |
268 | struct kvmppc_xics *xics; | 271 | struct kvmppc_xics *xics; |
269 | #endif | 272 | #endif |
273 | struct kvmppc_ops *kvm_ops; | ||
270 | }; | 274 | }; |
271 | 275 | ||
272 | /* | 276 | /* |
@@ -294,6 +298,10 @@ struct kvmppc_vcore { | |||
294 | u64 stolen_tb; | 298 | u64 stolen_tb; |
295 | u64 preempt_tb; | 299 | u64 preempt_tb; |
296 | struct kvm_vcpu *runner; | 300 | struct kvm_vcpu *runner; |
301 | u64 tb_offset; /* guest timebase - host timebase */ | ||
302 | ulong lpcr; | ||
303 | u32 arch_compat; | ||
304 | ulong pcr; | ||
297 | }; | 305 | }; |
298 | 306 | ||
299 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) | 307 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) |
@@ -328,6 +336,7 @@ struct kvmppc_pte { | |||
328 | bool may_read : 1; | 336 | bool may_read : 1; |
329 | bool may_write : 1; | 337 | bool may_write : 1; |
330 | bool may_execute : 1; | 338 | bool may_execute : 1; |
339 | u8 page_size; /* MMU_PAGE_xxx */ | ||
331 | }; | 340 | }; |
332 | 341 | ||
333 | struct kvmppc_mmu { | 342 | struct kvmppc_mmu { |
@@ -340,7 +349,8 @@ struct kvmppc_mmu { | |||
340 | /* book3s */ | 349 | /* book3s */ |
341 | void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); | 350 | void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); |
342 | u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); | 351 | u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); |
343 | int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); | 352 | int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, |
353 | struct kvmppc_pte *pte, bool data, bool iswrite); | ||
344 | void (*reset_msr)(struct kvm_vcpu *vcpu); | 354 | void (*reset_msr)(struct kvm_vcpu *vcpu); |
345 | void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); | 355 | void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); |
346 | int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); | 356 | int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); |
@@ -360,6 +370,7 @@ struct kvmppc_slb { | |||
360 | bool large : 1; /* PTEs are 16MB */ | 370 | bool large : 1; /* PTEs are 16MB */ |
361 | bool tb : 1; /* 1TB segment */ | 371 | bool tb : 1; /* 1TB segment */ |
362 | bool class : 1; | 372 | bool class : 1; |
373 | u8 base_page_size; /* MMU_PAGE_xxx */ | ||
363 | }; | 374 | }; |
364 | 375 | ||
365 | # ifdef CONFIG_PPC_FSL_BOOK3E | 376 | # ifdef CONFIG_PPC_FSL_BOOK3E |
@@ -377,17 +388,6 @@ struct kvmppc_slb { | |||
377 | #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */ | 388 | #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */ |
378 | #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */ | 389 | #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */ |
379 | 390 | ||
380 | struct kvmppc_booke_debug_reg { | ||
381 | u32 dbcr0; | ||
382 | u32 dbcr1; | ||
383 | u32 dbcr2; | ||
384 | #ifdef CONFIG_KVM_E500MC | ||
385 | u32 dbcr4; | ||
386 | #endif | ||
387 | u64 iac[KVMPPC_BOOKE_MAX_IAC]; | ||
388 | u64 dac[KVMPPC_BOOKE_MAX_DAC]; | ||
389 | }; | ||
390 | |||
391 | #define KVMPPC_IRQ_DEFAULT 0 | 391 | #define KVMPPC_IRQ_DEFAULT 0 |
392 | #define KVMPPC_IRQ_MPIC 1 | 392 | #define KVMPPC_IRQ_MPIC 1 |
393 | #define KVMPPC_IRQ_XICS 2 | 393 | #define KVMPPC_IRQ_XICS 2 |
@@ -402,6 +402,10 @@ struct kvm_vcpu_arch { | |||
402 | int slb_max; /* 1 + index of last valid entry in slb[] */ | 402 | int slb_max; /* 1 + index of last valid entry in slb[] */ |
403 | int slb_nr; /* total number of entries in SLB */ | 403 | int slb_nr; /* total number of entries in SLB */ |
404 | struct kvmppc_mmu mmu; | 404 | struct kvmppc_mmu mmu; |
405 | struct kvmppc_vcpu_book3s *book3s; | ||
406 | #endif | ||
407 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
408 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; | ||
405 | #endif | 409 | #endif |
406 | 410 | ||
407 | ulong gpr[32]; | 411 | ulong gpr[32]; |
@@ -463,6 +467,8 @@ struct kvm_vcpu_arch { | |||
463 | u32 ctrl; | 467 | u32 ctrl; |
464 | ulong dabr; | 468 | ulong dabr; |
465 | ulong cfar; | 469 | ulong cfar; |
470 | ulong ppr; | ||
471 | ulong shadow_srr1; | ||
466 | #endif | 472 | #endif |
467 | u32 vrsave; /* also USPRG0 */ | 473 | u32 vrsave; /* also USPRG0 */ |
468 | u32 mmucr; | 474 | u32 mmucr; |
@@ -498,6 +504,8 @@ struct kvm_vcpu_arch { | |||
498 | 504 | ||
499 | u64 mmcr[3]; | 505 | u64 mmcr[3]; |
500 | u32 pmc[8]; | 506 | u32 pmc[8]; |
507 | u64 siar; | ||
508 | u64 sdar; | ||
501 | 509 | ||
502 | #ifdef CONFIG_KVM_EXIT_TIMING | 510 | #ifdef CONFIG_KVM_EXIT_TIMING |
503 | struct mutex exit_timing_lock; | 511 | struct mutex exit_timing_lock; |
@@ -531,7 +539,10 @@ struct kvm_vcpu_arch { | |||
531 | u32 eptcfg; | 539 | u32 eptcfg; |
532 | u32 epr; | 540 | u32 epr; |
533 | u32 crit_save; | 541 | u32 crit_save; |
534 | struct kvmppc_booke_debug_reg dbg_reg; | 542 | /* guest debug registers*/ |
543 | struct debug_reg dbg_reg; | ||
544 | /* hardware visible debug registers when in guest state */ | ||
545 | struct debug_reg shadow_dbg_reg; | ||
535 | #endif | 546 | #endif |
536 | gpa_t paddr_accessed; | 547 | gpa_t paddr_accessed; |
537 | gva_t vaddr_accessed; | 548 | gva_t vaddr_accessed; |
@@ -582,7 +593,7 @@ struct kvm_vcpu_arch { | |||
582 | struct kvmppc_icp *icp; /* XICS presentation controller */ | 593 | struct kvmppc_icp *icp; /* XICS presentation controller */ |
583 | #endif | 594 | #endif |
584 | 595 | ||
585 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 596 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
586 | struct kvm_vcpu_arch_shared shregs; | 597 | struct kvm_vcpu_arch_shared shregs; |
587 | 598 | ||
588 | unsigned long pgfault_addr; | 599 | unsigned long pgfault_addr; |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index b15554a26c20..c8317fbf92c4 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | |||
106 | struct kvm_interrupt *irq); | 106 | struct kvm_interrupt *irq); |
107 | extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); | 107 | extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); |
108 | extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); | 108 | extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); |
109 | |||
110 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
111 | unsigned int op, int *advance); | ||
112 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, | ||
113 | ulong val); | ||
114 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, | ||
115 | ulong *val); | ||
116 | extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); | 109 | extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); |
117 | 110 | ||
118 | extern int kvmppc_booke_init(void); | 111 | extern int kvmppc_booke_init(void); |
@@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | |||
135 | struct kvm_create_spapr_tce *args); | 128 | struct kvm_create_spapr_tce *args); |
136 | extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 129 | extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
137 | unsigned long ioba, unsigned long tce); | 130 | unsigned long ioba, unsigned long tce); |
138 | extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, | ||
139 | struct kvm_allocate_rma *rma); | ||
140 | extern struct kvm_rma_info *kvm_alloc_rma(void); | 131 | extern struct kvm_rma_info *kvm_alloc_rma(void); |
141 | extern void kvm_release_rma(struct kvm_rma_info *ri); | 132 | extern void kvm_release_rma(struct kvm_rma_info *ri); |
142 | extern struct page *kvm_alloc_hpt(unsigned long nr_pages); | 133 | extern struct page *kvm_alloc_hpt(unsigned long nr_pages); |
143 | extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); | 134 | extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); |
144 | extern int kvmppc_core_init_vm(struct kvm *kvm); | 135 | extern int kvmppc_core_init_vm(struct kvm *kvm); |
145 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); | 136 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); |
146 | extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, | 137 | extern void kvmppc_core_free_memslot(struct kvm *kvm, |
138 | struct kvm_memory_slot *free, | ||
147 | struct kvm_memory_slot *dont); | 139 | struct kvm_memory_slot *dont); |
148 | extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | 140 | extern int kvmppc_core_create_memslot(struct kvm *kvm, |
141 | struct kvm_memory_slot *slot, | ||
149 | unsigned long npages); | 142 | unsigned long npages); |
150 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 143 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
151 | struct kvm_memory_slot *memslot, | 144 | struct kvm_memory_slot *memslot, |
@@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, | |||
177 | extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); | 170 | extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); |
178 | extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); | 171 | extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); |
179 | 172 | ||
173 | union kvmppc_one_reg { | ||
174 | u32 wval; | ||
175 | u64 dval; | ||
176 | vector128 vval; | ||
177 | u64 vsxval[2]; | ||
178 | struct { | ||
179 | u64 addr; | ||
180 | u64 length; | ||
181 | } vpaval; | ||
182 | }; | ||
183 | |||
184 | struct kvmppc_ops { | ||
185 | struct module *owner; | ||
186 | int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
187 | int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
188 | int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id, | ||
189 | union kvmppc_one_reg *val); | ||
190 | int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id, | ||
191 | union kvmppc_one_reg *val); | ||
192 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | ||
193 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | ||
194 | void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); | ||
195 | int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); | ||
196 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id); | ||
197 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | ||
198 | int (*check_requests)(struct kvm_vcpu *vcpu); | ||
199 | int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); | ||
200 | void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); | ||
201 | int (*prepare_memory_region)(struct kvm *kvm, | ||
202 | struct kvm_memory_slot *memslot, | ||
203 | struct kvm_userspace_memory_region *mem); | ||
204 | void (*commit_memory_region)(struct kvm *kvm, | ||
205 | struct kvm_userspace_memory_region *mem, | ||
206 | const struct kvm_memory_slot *old); | ||
207 | int (*unmap_hva)(struct kvm *kvm, unsigned long hva); | ||
208 | int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, | ||
209 | unsigned long end); | ||
210 | int (*age_hva)(struct kvm *kvm, unsigned long hva); | ||
211 | int (*test_age_hva)(struct kvm *kvm, unsigned long hva); | ||
212 | void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
213 | void (*mmu_destroy)(struct kvm_vcpu *vcpu); | ||
214 | void (*free_memslot)(struct kvm_memory_slot *free, | ||
215 | struct kvm_memory_slot *dont); | ||
216 | int (*create_memslot)(struct kvm_memory_slot *slot, | ||
217 | unsigned long npages); | ||
218 | int (*init_vm)(struct kvm *kvm); | ||
219 | void (*destroy_vm)(struct kvm *kvm); | ||
220 | int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); | ||
221 | int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
222 | unsigned int inst, int *advance); | ||
223 | int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); | ||
224 | int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); | ||
225 | void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); | ||
226 | long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, | ||
227 | unsigned long arg); | ||
228 | |||
229 | }; | ||
230 | |||
231 | extern struct kvmppc_ops *kvmppc_hv_ops; | ||
232 | extern struct kvmppc_ops *kvmppc_pr_ops; | ||
233 | |||
234 | static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) | ||
235 | { | ||
236 | return kvm->arch.kvm_ops == kvmppc_hv_ops; | ||
237 | } | ||
238 | |||
180 | /* | 239 | /* |
181 | * Cuts out inst bits with ordering according to spec. | 240 | * Cuts out inst bits with ordering according to spec. |
182 | * That means the leftmost bit is zero. All given bits are included. | 241 | * That means the leftmost bit is zero. All given bits are included. |
@@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) | |||
210 | return r; | 269 | return r; |
211 | } | 270 | } |
212 | 271 | ||
213 | union kvmppc_one_reg { | ||
214 | u32 wval; | ||
215 | u64 dval; | ||
216 | vector128 vval; | ||
217 | u64 vsxval[2]; | ||
218 | struct { | ||
219 | u64 addr; | ||
220 | u64 length; | ||
221 | } vpaval; | ||
222 | }; | ||
223 | |||
224 | #define one_reg_size(id) \ | 272 | #define one_reg_size(id) \ |
225 | (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | 273 | (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) |
226 | 274 | ||
@@ -245,10 +293,10 @@ union kvmppc_one_reg { | |||
245 | __v; \ | 293 | __v; \ |
246 | }) | 294 | }) |
247 | 295 | ||
248 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 296 | int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
249 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 297 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
250 | 298 | ||
251 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 299 | int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
252 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 300 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
253 | 301 | ||
254 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); | 302 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); |
@@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); | |||
260 | 308 | ||
261 | struct openpic; | 309 | struct openpic; |
262 | 310 | ||
263 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 311 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
264 | extern void kvm_cma_reserve(void) __init; | 312 | extern void kvm_cma_reserve(void) __init; |
265 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) | 313 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) |
266 | { | 314 | { |
@@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) | |||
269 | 317 | ||
270 | static inline u32 kvmppc_get_xics_latch(void) | 318 | static inline u32 kvmppc_get_xics_latch(void) |
271 | { | 319 | { |
272 | u32 xirr = get_paca()->kvm_hstate.saved_xirr; | 320 | u32 xirr; |
273 | 321 | ||
322 | xirr = get_paca()->kvm_hstate.saved_xirr; | ||
274 | get_paca()->kvm_hstate.saved_xirr = 0; | 323 | get_paca()->kvm_hstate.saved_xirr = 0; |
275 | |||
276 | return xirr; | 324 | return xirr; |
277 | } | 325 | } |
278 | 326 | ||
@@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) | |||
281 | paca[cpu].kvm_hstate.host_ipi = host_ipi; | 329 | paca[cpu].kvm_hstate.host_ipi = host_ipi; |
282 | } | 330 | } |
283 | 331 | ||
284 | extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); | 332 | static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) |
333 | { | ||
334 | vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); | ||
335 | } | ||
285 | 336 | ||
286 | #else | 337 | #else |
287 | static inline void __init kvm_cma_reserve(void) | 338 | static inline void __init kvm_cma_reserve(void) |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index a5954cebbc55..b6ea9e068c13 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -166,7 +166,7 @@ struct paca_struct { | |||
166 | struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ | 166 | struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ |
167 | 167 | ||
168 | #ifdef CONFIG_KVM_BOOK3S_HANDLER | 168 | #ifdef CONFIG_KVM_BOOK3S_HANDLER |
169 | #ifdef CONFIG_KVM_BOOK3S_PR | 169 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
170 | /* We use this to store guest state in */ | 170 | /* We use this to store guest state in */ |
171 | struct kvmppc_book3s_shadow_vcpu shadow_vcpu; | 171 | struct kvmppc_book3s_shadow_vcpu shadow_vcpu; |
172 | #endif | 172 | #endif |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 7794b2b04eb2..fc14a38c7ccf 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -208,6 +208,7 @@ struct debug_reg { | |||
208 | 208 | ||
209 | struct thread_struct { | 209 | struct thread_struct { |
210 | unsigned long ksp; /* Kernel stack pointer */ | 210 | unsigned long ksp; /* Kernel stack pointer */ |
211 | |||
211 | #ifdef CONFIG_PPC64 | 212 | #ifdef CONFIG_PPC64 |
212 | unsigned long ksp_vsid; | 213 | unsigned long ksp_vsid; |
213 | #endif | 214 | #endif |
@@ -221,6 +222,7 @@ struct thread_struct { | |||
221 | void *pgdir; /* root of page-table tree */ | 222 | void *pgdir; /* root of page-table tree */ |
222 | unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ | 223 | unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ |
223 | #endif | 224 | #endif |
225 | /* Debug Registers */ | ||
224 | struct debug_reg debug; | 226 | struct debug_reg debug; |
225 | struct thread_fp_state fp_state; | 227 | struct thread_fp_state fp_state; |
226 | struct thread_fp_state *fp_save_area; | 228 | struct thread_fp_state *fp_save_area; |
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h index 0156702ba24e..576ad88104cb 100644 --- a/arch/powerpc/include/asm/pte-book3e.h +++ b/arch/powerpc/include/asm/pte-book3e.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #define _PAGE_U1 0x010000 | 40 | #define _PAGE_U1 0x010000 |
41 | #define _PAGE_U0 0x020000 | 41 | #define _PAGE_U0 0x020000 |
42 | #define _PAGE_ACCESSED 0x040000 | 42 | #define _PAGE_ACCESSED 0x040000 |
43 | #define _PAGE_LENDIAN 0x080000 | 43 | #define _PAGE_ENDIAN 0x080000 |
44 | #define _PAGE_GUARDED 0x100000 | 44 | #define _PAGE_GUARDED 0x100000 |
45 | #define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ | 45 | #define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ |
46 | #define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ | 46 | #define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 126f6e98f84d..5c45787d551e 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -248,6 +248,7 @@ | |||
248 | #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ | 248 | #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ |
249 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ | 249 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ |
250 | #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ | 250 | #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ |
251 | #define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */ | ||
251 | #define SPRN_SPURR 0x134 /* Scaled PURR */ | 252 | #define SPRN_SPURR 0x134 /* Scaled PURR */ |
252 | #define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ | 253 | #define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ |
253 | #define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ | 254 | #define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ |
@@ -288,6 +289,7 @@ | |||
288 | #define LPCR_ISL (1ul << (63-2)) | 289 | #define LPCR_ISL (1ul << (63-2)) |
289 | #define LPCR_VC_SH (63-2) | 290 | #define LPCR_VC_SH (63-2) |
290 | #define LPCR_DPFD_SH (63-11) | 291 | #define LPCR_DPFD_SH (63-11) |
292 | #define LPCR_DPFD (7ul << LPCR_DPFD_SH) | ||
291 | #define LPCR_VRMASD (0x1ful << (63-16)) | 293 | #define LPCR_VRMASD (0x1ful << (63-16)) |
292 | #define LPCR_VRMA_L (1ul << (63-12)) | 294 | #define LPCR_VRMA_L (1ul << (63-12)) |
293 | #define LPCR_VRMA_LP0 (1ul << (63-15)) | 295 | #define LPCR_VRMA_LP0 (1ul << (63-15)) |
@@ -304,6 +306,7 @@ | |||
304 | #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ | 306 | #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ |
305 | #define LPCR_MER 0x00000800 /* Mediated External Exception */ | 307 | #define LPCR_MER 0x00000800 /* Mediated External Exception */ |
306 | #define LPCR_MER_SH 11 | 308 | #define LPCR_MER_SH 11 |
309 | #define LPCR_TC 0x00000200 /* Translation control */ | ||
307 | #define LPCR_LPES 0x0000000c | 310 | #define LPCR_LPES 0x0000000c |
308 | #define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ | 311 | #define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ |
309 | #define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ | 312 | #define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ |
@@ -316,6 +319,10 @@ | |||
316 | #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ | 319 | #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ |
317 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ | 320 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ |
318 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ | 321 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ |
322 | #define SPRN_PCR 0x152 /* Processor compatibility register */ | ||
323 | #define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */ | ||
324 | #define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */ | ||
325 | #define PCR_ARCH_205 0x2 /* Architecture 2.05 */ | ||
319 | #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ | 326 | #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ |
320 | #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ | 327 | #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ |
321 | #define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ | 328 | #define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ |
@@ -425,6 +432,7 @@ | |||
425 | #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ | 432 | #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ |
426 | #define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ | 433 | #define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ |
427 | #define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ | 434 | #define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ |
435 | #define HID4_RMOR (0xFFFFul << HID4_RMOR_SH) | ||
428 | #define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ | 436 | #define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ |
429 | #define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ | 437 | #define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ |
430 | #define HID4_LPID1_SH 0 /* partition ID top 2 bits */ | 438 | #define HID4_LPID1_SH 0 /* partition ID top 2 bits */ |
@@ -1107,6 +1115,13 @@ | |||
1107 | #define PVR_BE 0x0070 | 1115 | #define PVR_BE 0x0070 |
1108 | #define PVR_PA6T 0x0090 | 1116 | #define PVR_PA6T 0x0090 |
1109 | 1117 | ||
1118 | /* "Logical" PVR values defined in PAPR, representing architecture levels */ | ||
1119 | #define PVR_ARCH_204 0x0f000001 | ||
1120 | #define PVR_ARCH_205 0x0f000002 | ||
1121 | #define PVR_ARCH_206 0x0f000003 | ||
1122 | #define PVR_ARCH_206p 0x0f100003 | ||
1123 | #define PVR_ARCH_207 0x0f000004 | ||
1124 | |||
1110 | /* Macros for setting and retrieving special purpose registers */ | 1125 | /* Macros for setting and retrieving special purpose registers */ |
1111 | #ifndef __ASSEMBLY__ | 1126 | #ifndef __ASSEMBLY__ |
1112 | #define mfmsr() ({unsigned long rval; \ | 1127 | #define mfmsr() ({unsigned long rval; \ |
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 0fb1a6e9ff90..6836ec79a830 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #define __KVM_HAVE_PPC_SMT | 27 | #define __KVM_HAVE_PPC_SMT |
28 | #define __KVM_HAVE_IRQCHIP | 28 | #define __KVM_HAVE_IRQCHIP |
29 | #define __KVM_HAVE_IRQ_LINE | 29 | #define __KVM_HAVE_IRQ_LINE |
30 | #define __KVM_HAVE_GUEST_DEBUG | ||
30 | 31 | ||
31 | struct kvm_regs { | 32 | struct kvm_regs { |
32 | __u64 pc; | 33 | __u64 pc; |
@@ -269,7 +270,24 @@ struct kvm_fpu { | |||
269 | __u64 fpr[32]; | 270 | __u64 fpr[32]; |
270 | }; | 271 | }; |
271 | 272 | ||
273 | /* | ||
274 | * Defines for h/w breakpoint, watchpoint (read, write or both) and | ||
275 | * software breakpoint. | ||
276 | * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status" | ||
277 | * for KVM_DEBUG_EXIT. | ||
278 | */ | ||
279 | #define KVMPPC_DEBUG_NONE 0x0 | ||
280 | #define KVMPPC_DEBUG_BREAKPOINT (1UL << 1) | ||
281 | #define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2) | ||
282 | #define KVMPPC_DEBUG_WATCH_READ (1UL << 3) | ||
272 | struct kvm_debug_exit_arch { | 283 | struct kvm_debug_exit_arch { |
284 | __u64 address; | ||
285 | /* | ||
286 | * exiting to userspace because of h/w breakpoint, watchpoint | ||
287 | * (read, write or both) and software breakpoint. | ||
288 | */ | ||
289 | __u32 status; | ||
290 | __u32 reserved; | ||
273 | }; | 291 | }; |
274 | 292 | ||
275 | /* for KVM_SET_GUEST_DEBUG */ | 293 | /* for KVM_SET_GUEST_DEBUG */ |
@@ -281,10 +299,6 @@ struct kvm_guest_debug_arch { | |||
281 | * Type denotes h/w breakpoint, read watchpoint, write | 299 | * Type denotes h/w breakpoint, read watchpoint, write |
282 | * watchpoint or watchpoint (both read and write). | 300 | * watchpoint or watchpoint (both read and write). |
283 | */ | 301 | */ |
284 | #define KVMPPC_DEBUG_NONE 0x0 | ||
285 | #define KVMPPC_DEBUG_BREAKPOINT (1UL << 1) | ||
286 | #define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2) | ||
287 | #define KVMPPC_DEBUG_WATCH_READ (1UL << 3) | ||
288 | __u32 type; | 302 | __u32 type; |
289 | __u32 reserved; | 303 | __u32 reserved; |
290 | } bp[16]; | 304 | } bp[16]; |
@@ -429,6 +443,11 @@ struct kvm_get_htab_header { | |||
429 | #define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) | 443 | #define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) |
430 | #define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) | 444 | #define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) |
431 | #define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) | 445 | #define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) |
446 | #define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13) | ||
447 | #define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14) | ||
448 | #define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15) | ||
449 | #define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16) | ||
450 | #define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17) | ||
432 | 451 | ||
433 | #define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) | 452 | #define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) |
434 | #define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) | 453 | #define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) |
@@ -499,6 +518,65 @@ struct kvm_get_htab_header { | |||
499 | #define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a) | 518 | #define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a) |
500 | #define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b) | 519 | #define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b) |
501 | 520 | ||
521 | /* Timebase offset */ | ||
522 | #define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c) | ||
523 | |||
524 | /* POWER8 registers */ | ||
525 | #define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d) | ||
526 | #define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e) | ||
527 | #define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f) | ||
528 | #define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0) | ||
529 | #define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1) | ||
530 | #define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2) | ||
531 | #define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3) | ||
532 | #define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4) | ||
533 | #define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5) | ||
534 | #define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6) | ||
535 | #define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7) | ||
536 | #define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8) | ||
537 | #define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9) | ||
538 | #define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa) | ||
539 | #define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab) | ||
540 | #define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac) | ||
541 | #define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad) | ||
542 | #define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae) | ||
543 | #define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf) | ||
544 | #define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0) | ||
545 | #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) | ||
546 | #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) | ||
547 | #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) | ||
548 | |||
549 | #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) | ||
550 | #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) | ||
551 | #define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6) | ||
552 | |||
553 | /* Architecture compatibility level */ | ||
554 | #define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) | ||
555 | |||
556 | /* Transactional Memory checkpointed state: | ||
557 | * This is all GPRs, all VSX regs and a subset of SPRs | ||
558 | */ | ||
559 | #define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000) | ||
560 | /* TM GPRs */ | ||
561 | #define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0) | ||
562 | #define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n)) | ||
563 | #define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f) | ||
564 | /* TM VSX */ | ||
565 | #define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20) | ||
566 | #define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n)) | ||
567 | #define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f) | ||
568 | /* TM SPRS */ | ||
569 | #define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60) | ||
570 | #define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61) | ||
571 | #define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62) | ||
572 | #define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63) | ||
573 | #define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64) | ||
574 | #define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65) | ||
575 | #define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66) | ||
576 | #define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67) | ||
577 | #define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68) | ||
578 | #define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69) | ||
579 | |||
502 | /* PPC64 eXternal Interrupt Controller Specification */ | 580 | /* PPC64 eXternal Interrupt Controller Specification */ |
503 | #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ | 581 | #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ |
504 | 582 | ||