aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/kvm_book3s.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/kvm_book3s.h')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h98
1 files changed, 81 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 69c7377d207..aa795ccef29 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -90,6 +90,8 @@ struct kvmppc_vcpu_book3s {
90#endif 90#endif
91 int context_id[SID_CONTEXTS]; 91 int context_id[SID_CONTEXTS];
92 92
93 bool hior_explicit; /* HIOR is set by ioctl, not PVR */
94
93 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 95 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
94 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 96 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
95 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 97 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
@@ -119,6 +121,11 @@ extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
119extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 121extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
120extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 122extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
121extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 123extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
124extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
125 struct kvm_vcpu *vcpu, unsigned long addr,
126 unsigned long status);
127extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
128 unsigned long slb_v, unsigned long valid);
122 129
123extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 130extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
124extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 131extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
@@ -138,6 +145,21 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
138extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 145extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
139extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 146extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
140extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 147extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
148extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
149 unsigned long *rmap, long pte_index, int realmode);
150extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
151 unsigned long pte_index);
152void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
153 unsigned long pte_index);
154extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
155 unsigned long *nb_ret);
156extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
157extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
158 long pte_index, unsigned long pteh, unsigned long ptel);
159extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
160 long pte_index, unsigned long pteh, unsigned long ptel);
161extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
162 struct kvm_memory_slot *memslot);
141 163
142extern void kvmppc_entry_trampoline(void); 164extern void kvmppc_entry_trampoline(void);
143extern void kvmppc_hv_entry_trampoline(void); 165extern void kvmppc_hv_entry_trampoline(void);
@@ -183,7 +205,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
183static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 205static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
184{ 206{
185 if ( num < 14 ) { 207 if ( num < 14 ) {
186 to_svcpu(vcpu)->gpr[num] = val; 208 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
209 svcpu->gpr[num] = val;
210 svcpu_put(svcpu);
187 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; 211 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
188 } else 212 } else
189 vcpu->arch.gpr[num] = val; 213 vcpu->arch.gpr[num] = val;
@@ -191,80 +215,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
191 215
192static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 216static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
193{ 217{
194 if ( num < 14 ) 218 if ( num < 14 ) {
195 return to_svcpu(vcpu)->gpr[num]; 219 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
196 else 220 ulong r = svcpu->gpr[num];
221 svcpu_put(svcpu);
222 return r;
223 } else
197 return vcpu->arch.gpr[num]; 224 return vcpu->arch.gpr[num];
198} 225}
199 226
200static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 227static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
201{ 228{
202 to_svcpu(vcpu)->cr = val; 229 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
230 svcpu->cr = val;
231 svcpu_put(svcpu);
203 to_book3s(vcpu)->shadow_vcpu->cr = val; 232 to_book3s(vcpu)->shadow_vcpu->cr = val;
204} 233}
205 234
206static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 235static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
207{ 236{
208 return to_svcpu(vcpu)->cr; 237 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
238 u32 r;
239 r = svcpu->cr;
240 svcpu_put(svcpu);
241 return r;
209} 242}
210 243
211static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) 244static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
212{ 245{
213 to_svcpu(vcpu)->xer = val; 246 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
247 svcpu->xer = val;
214 to_book3s(vcpu)->shadow_vcpu->xer = val; 248 to_book3s(vcpu)->shadow_vcpu->xer = val;
249 svcpu_put(svcpu);
215} 250}
216 251
217static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) 252static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
218{ 253{
219 return to_svcpu(vcpu)->xer; 254 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
255 u32 r;
256 r = svcpu->xer;
257 svcpu_put(svcpu);
258 return r;
220} 259}
221 260
222static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 261static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
223{ 262{
224 to_svcpu(vcpu)->ctr = val; 263 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
264 svcpu->ctr = val;
265 svcpu_put(svcpu);
225} 266}
226 267
227static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 268static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
228{ 269{
229 return to_svcpu(vcpu)->ctr; 270 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
271 ulong r;
272 r = svcpu->ctr;
273 svcpu_put(svcpu);
274 return r;
230} 275}
231 276
232static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 277static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
233{ 278{
234 to_svcpu(vcpu)->lr = val; 279 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
280 svcpu->lr = val;
281 svcpu_put(svcpu);
235} 282}
236 283
237static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 284static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
238{ 285{
239 return to_svcpu(vcpu)->lr; 286 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
287 ulong r;
288 r = svcpu->lr;
289 svcpu_put(svcpu);
290 return r;
240} 291}
241 292
242static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 293static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
243{ 294{
244 to_svcpu(vcpu)->pc = val; 295 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
296 svcpu->pc = val;
297 svcpu_put(svcpu);
245} 298}
246 299
247static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 300static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
248{ 301{
249 return to_svcpu(vcpu)->pc; 302 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
303 ulong r;
304 r = svcpu->pc;
305 svcpu_put(svcpu);
306 return r;
250} 307}
251 308
252static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 309static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
253{ 310{
254 ulong pc = kvmppc_get_pc(vcpu); 311 ulong pc = kvmppc_get_pc(vcpu);
255 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 312 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
313 u32 r;
256 314
257 /* Load the instruction manually if it failed to do so in the 315 /* Load the instruction manually if it failed to do so in the
258 * exit path */ 316 * exit path */
259 if (svcpu->last_inst == KVM_INST_FETCH_FAILED) 317 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
260 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); 318 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
261 319
262 return svcpu->last_inst; 320 r = svcpu->last_inst;
321 svcpu_put(svcpu);
322 return r;
263} 323}
264 324
265static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 325static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
266{ 326{
267 return to_svcpu(vcpu)->fault_dar; 327 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
328 ulong r;
329 r = svcpu->fault_dar;
330 svcpu_put(svcpu);
331 return r;
268} 332}
269 333
270static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 334static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)