diff options
author | Sascha Hauer <s.hauer@pengutronix.de> | 2011-08-08 02:22:41 -0400 |
---|---|---|
committer | Sascha Hauer <s.hauer@pengutronix.de> | 2011-08-08 02:22:41 -0400 |
commit | 1a43f2012455a977397deffe35912fd3f3ce17b9 (patch) | |
tree | 5189f337df44e7a495fbd097cd476b0380babd8c /arch/powerpc/include/asm/kvm_book3s.h | |
parent | e1b96ada659431669efaf3defa997abf5db68130 (diff) | |
parent | 322a8b034003c0d46d39af85bf24fee27b902f48 (diff) |
Merge commit 'v3.1-rc1' into imx-fixes
Diffstat (limited to 'arch/powerpc/include/asm/kvm_book3s.h')
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 196 |
1 files changed, 163 insertions, 33 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index d62e703f1214..98da010252a3 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -24,20 +24,6 @@ | |||
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/kvm_book3s_asm.h> | 25 | #include <asm/kvm_book3s_asm.h> |
26 | 26 | ||
27 | struct kvmppc_slb { | ||
28 | u64 esid; | ||
29 | u64 vsid; | ||
30 | u64 orige; | ||
31 | u64 origv; | ||
32 | bool valid : 1; | ||
33 | bool Ks : 1; | ||
34 | bool Kp : 1; | ||
35 | bool nx : 1; | ||
36 | bool large : 1; /* PTEs are 16MB */ | ||
37 | bool tb : 1; /* 1TB segment */ | ||
38 | bool class : 1; | ||
39 | }; | ||
40 | |||
41 | struct kvmppc_bat { | 27 | struct kvmppc_bat { |
42 | u64 raw; | 28 | u64 raw; |
43 | u32 bepi; | 29 | u32 bepi; |
@@ -67,11 +53,22 @@ struct kvmppc_sid_map { | |||
67 | #define VSID_POOL_SIZE (SID_CONTEXTS * 16) | 53 | #define VSID_POOL_SIZE (SID_CONTEXTS * 16) |
68 | #endif | 54 | #endif |
69 | 55 | ||
56 | struct hpte_cache { | ||
57 | struct hlist_node list_pte; | ||
58 | struct hlist_node list_pte_long; | ||
59 | struct hlist_node list_vpte; | ||
60 | struct hlist_node list_vpte_long; | ||
61 | struct rcu_head rcu_head; | ||
62 | u64 host_va; | ||
63 | u64 pfn; | ||
64 | ulong slot; | ||
65 | struct kvmppc_pte pte; | ||
66 | }; | ||
67 | |||
70 | struct kvmppc_vcpu_book3s { | 68 | struct kvmppc_vcpu_book3s { |
71 | struct kvm_vcpu vcpu; | 69 | struct kvm_vcpu vcpu; |
72 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; | 70 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; |
73 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; | 71 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; |
74 | struct kvmppc_slb slb[64]; | ||
75 | struct { | 72 | struct { |
76 | u64 esid; | 73 | u64 esid; |
77 | u64 vsid; | 74 | u64 vsid; |
@@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s { | |||
81 | struct kvmppc_bat dbat[8]; | 78 | struct kvmppc_bat dbat[8]; |
82 | u64 hid[6]; | 79 | u64 hid[6]; |
83 | u64 gqr[8]; | 80 | u64 gqr[8]; |
84 | int slb_nr; | ||
85 | u64 sdr1; | 81 | u64 sdr1; |
86 | u64 hior; | 82 | u64 hior; |
87 | u64 msr_mask; | 83 | u64 msr_mask; |
@@ -93,7 +89,13 @@ struct kvmppc_vcpu_book3s { | |||
93 | u64 vsid_max; | 89 | u64 vsid_max; |
94 | #endif | 90 | #endif |
95 | int context_id[SID_CONTEXTS]; | 91 | int context_id[SID_CONTEXTS]; |
96 | ulong prog_flags; /* flags to inject when giving a 700 trap */ | 92 | |
93 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; | ||
94 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | ||
95 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | ||
96 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | ||
97 | int hpte_cache_count; | ||
98 | spinlock_t mmu_lock; | ||
97 | }; | 99 | }; |
98 | 100 | ||
99 | #define CONTEXT_HOST 0 | 101 | #define CONTEXT_HOST 0 |
@@ -110,8 +112,10 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask) | |||
110 | extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); | 112 | extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); |
111 | extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); | 113 | extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); |
112 | extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); | 114 | extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); |
115 | extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); | ||
113 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); | 116 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); |
114 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); | 117 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); |
118 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); | ||
115 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | 119 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); |
116 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); | 120 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); |
117 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); | 121 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); |
@@ -123,19 +127,22 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); | |||
123 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | 127 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); |
124 | extern int kvmppc_mmu_hpte_sysinit(void); | 128 | extern int kvmppc_mmu_hpte_sysinit(void); |
125 | extern void kvmppc_mmu_hpte_sysexit(void); | 129 | extern void kvmppc_mmu_hpte_sysexit(void); |
130 | extern int kvmppc_mmu_hv_init(void); | ||
126 | 131 | ||
127 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); | 132 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
128 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); | 133 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
129 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); | 134 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); |
135 | extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); | ||
130 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | 136 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, |
131 | bool upper, u32 val); | 137 | bool upper, u32 val); |
132 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 138 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
133 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); | 139 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); |
134 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | 140 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
135 | 141 | ||
136 | extern ulong kvmppc_trampoline_lowmem; | 142 | extern void kvmppc_handler_lowmem_trampoline(void); |
137 | extern ulong kvmppc_trampoline_enter; | 143 | extern void kvmppc_handler_trampoline_enter(void); |
138 | extern void kvmppc_rmcall(ulong srr0, ulong srr1); | 144 | extern void kvmppc_rmcall(ulong srr0, ulong srr1); |
145 | extern void kvmppc_hv_entry_trampoline(void); | ||
139 | extern void kvmppc_load_up_fpu(void); | 146 | extern void kvmppc_load_up_fpu(void); |
140 | extern void kvmppc_load_up_altivec(void); | 147 | extern void kvmppc_load_up_altivec(void); |
141 | extern void kvmppc_load_up_vsx(void); | 148 | extern void kvmppc_load_up_vsx(void); |
@@ -147,15 +154,32 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | |||
147 | return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); | 154 | return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); |
148 | } | 155 | } |
149 | 156 | ||
150 | static inline ulong dsisr(void) | 157 | extern void kvm_return_point(void); |
158 | |||
159 | /* Also add subarch specific defines */ | ||
160 | |||
161 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER | ||
162 | #include <asm/kvm_book3s_32.h> | ||
163 | #endif | ||
164 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
165 | #include <asm/kvm_book3s_64.h> | ||
166 | #endif | ||
167 | |||
168 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
169 | |||
170 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | ||
151 | { | 171 | { |
152 | ulong r; | 172 | return to_book3s(vcpu)->hior; |
153 | asm ( "mfdsisr %0 " : "=r" (r) ); | ||
154 | return r; | ||
155 | } | 173 | } |
156 | 174 | ||
157 | extern void kvm_return_point(void); | 175 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, |
158 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu); | 176 | unsigned long pending_now, unsigned long old_pending) |
177 | { | ||
178 | if (pending_now) | ||
179 | vcpu->arch.shared->int_pending = 1; | ||
180 | else if (old_pending) | ||
181 | vcpu->arch.shared->int_pending = 0; | ||
182 | } | ||
159 | 183 | ||
160 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 184 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
161 | { | 185 | { |
@@ -244,6 +268,120 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
244 | return to_svcpu(vcpu)->fault_dar; | 268 | return to_svcpu(vcpu)->fault_dar; |
245 | } | 269 | } |
246 | 270 | ||
271 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | ||
272 | { | ||
273 | ulong crit_raw = vcpu->arch.shared->critical; | ||
274 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
275 | bool crit; | ||
276 | |||
277 | /* Truncate crit indicators in 32 bit mode */ | ||
278 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
279 | crit_raw &= 0xffffffff; | ||
280 | crit_r1 &= 0xffffffff; | ||
281 | } | ||
282 | |||
283 | /* Critical section when crit == r1 */ | ||
284 | crit = (crit_raw == crit_r1); | ||
285 | /* ... and we're in supervisor mode */ | ||
286 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
287 | |||
288 | return crit; | ||
289 | } | ||
290 | #else /* CONFIG_KVM_BOOK3S_PR */ | ||
291 | |||
292 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | ||
293 | { | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | ||
298 | unsigned long pending_now, unsigned long old_pending) | ||
299 | { | ||
300 | } | ||
301 | |||
302 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | ||
303 | { | ||
304 | vcpu->arch.gpr[num] = val; | ||
305 | } | ||
306 | |||
307 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | ||
308 | { | ||
309 | return vcpu->arch.gpr[num]; | ||
310 | } | ||
311 | |||
312 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | ||
313 | { | ||
314 | vcpu->arch.cr = val; | ||
315 | } | ||
316 | |||
317 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | ||
318 | { | ||
319 | return vcpu->arch.cr; | ||
320 | } | ||
321 | |||
322 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | ||
323 | { | ||
324 | vcpu->arch.xer = val; | ||
325 | } | ||
326 | |||
327 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | ||
328 | { | ||
329 | return vcpu->arch.xer; | ||
330 | } | ||
331 | |||
332 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | ||
333 | { | ||
334 | vcpu->arch.ctr = val; | ||
335 | } | ||
336 | |||
337 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | ||
338 | { | ||
339 | return vcpu->arch.ctr; | ||
340 | } | ||
341 | |||
342 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | ||
343 | { | ||
344 | vcpu->arch.lr = val; | ||
345 | } | ||
346 | |||
347 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | ||
348 | { | ||
349 | return vcpu->arch.lr; | ||
350 | } | ||
351 | |||
352 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | ||
353 | { | ||
354 | vcpu->arch.pc = val; | ||
355 | } | ||
356 | |||
357 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | ||
358 | { | ||
359 | return vcpu->arch.pc; | ||
360 | } | ||
361 | |||
362 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | ||
363 | { | ||
364 | ulong pc = kvmppc_get_pc(vcpu); | ||
365 | |||
366 | /* Load the instruction manually if it failed to do so in the | ||
367 | * exit path */ | ||
368 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) | ||
369 | kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); | ||
370 | |||
371 | return vcpu->arch.last_inst; | ||
372 | } | ||
373 | |||
374 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | ||
375 | { | ||
376 | return vcpu->arch.fault_dar; | ||
377 | } | ||
378 | |||
379 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | ||
380 | { | ||
381 | return false; | ||
382 | } | ||
383 | #endif | ||
384 | |||
247 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly | 385 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly |
248 | * instruction for the OSI hypercalls */ | 386 | * instruction for the OSI hypercalls */ |
249 | #define OSI_SC_MAGIC_R3 0x113724FA | 387 | #define OSI_SC_MAGIC_R3 0x113724FA |
@@ -251,12 +389,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
251 | 389 | ||
252 | #define INS_DCBZ 0x7c0007ec | 390 | #define INS_DCBZ 0x7c0007ec |
253 | 391 | ||
254 | /* Also add subarch specific defines */ | ||
255 | |||
256 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
257 | #include <asm/kvm_book3s_32.h> | ||
258 | #else | ||
259 | #include <asm/kvm_book3s_64.h> | ||
260 | #endif | ||
261 | |||
262 | #endif /* __ASM_KVM_BOOK3S_H__ */ | 392 | #endif /* __ASM_KVM_BOOK3S_H__ */ |