aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/kvm_book3s.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/kvm_book3s.h')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h157
1 files changed, 132 insertions, 25 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index db7db0a96967..6f74d93725a0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -22,46 +22,47 @@
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25#include <asm/kvm_book3s_64_asm.h> 25#include <asm/kvm_book3s_asm.h>
26 26
27struct kvmppc_slb { 27struct kvmppc_slb {
28 u64 esid; 28 u64 esid;
29 u64 vsid; 29 u64 vsid;
30 u64 orige; 30 u64 orige;
31 u64 origv; 31 u64 origv;
32 bool valid; 32 bool valid : 1;
33 bool Ks; 33 bool Ks : 1;
34 bool Kp; 34 bool Kp : 1;
35 bool nx; 35 bool nx : 1;
36 bool large; /* PTEs are 16MB */ 36 bool large : 1; /* PTEs are 16MB */
37 bool tb; /* 1TB segment */ 37 bool tb : 1; /* 1TB segment */
38 bool class; 38 bool class : 1;
39}; 39};
40 40
41struct kvmppc_sr { 41struct kvmppc_sr {
42 u32 raw; 42 u32 raw;
43 u32 vsid; 43 u32 vsid;
44 bool Ks; 44 bool Ks : 1;
45 bool Kp; 45 bool Kp : 1;
46 bool nx; 46 bool nx : 1;
47 bool valid : 1;
47}; 48};
48 49
49struct kvmppc_bat { 50struct kvmppc_bat {
50 u64 raw; 51 u64 raw;
51 u32 bepi; 52 u32 bepi;
52 u32 bepi_mask; 53 u32 bepi_mask;
53 bool vs;
54 bool vp;
55 u32 brpn; 54 u32 brpn;
56 u8 wimg; 55 u8 wimg;
57 u8 pp; 56 u8 pp;
57 bool vs : 1;
58 bool vp : 1;
58}; 59};
59 60
60struct kvmppc_sid_map { 61struct kvmppc_sid_map {
61 u64 guest_vsid; 62 u64 guest_vsid;
62 u64 guest_esid; 63 u64 guest_esid;
63 u64 host_vsid; 64 u64 host_vsid;
64 bool valid; 65 bool valid : 1;
65}; 66};
66 67
67#define SID_MAP_BITS 9 68#define SID_MAP_BITS 9
@@ -70,7 +71,7 @@ struct kvmppc_sid_map {
70 71
71struct kvmppc_vcpu_book3s { 72struct kvmppc_vcpu_book3s {
72 struct kvm_vcpu vcpu; 73 struct kvm_vcpu vcpu;
73 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 74 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
74 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 75 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
75 struct kvmppc_slb slb[64]; 76 struct kvmppc_slb slb[64];
76 struct { 77 struct {
@@ -82,9 +83,10 @@ struct kvmppc_vcpu_book3s {
82 struct kvmppc_bat ibat[8]; 83 struct kvmppc_bat ibat[8];
83 struct kvmppc_bat dbat[8]; 84 struct kvmppc_bat dbat[8];
84 u64 hid[6]; 85 u64 hid[6];
86 u64 gqr[8];
85 int slb_nr; 87 int slb_nr;
88 u32 dsisr;
86 u64 sdr1; 89 u64 sdr1;
87 u64 dsisr;
88 u64 hior; 90 u64 hior;
89 u64 msr_mask; 91 u64 msr_mask;
90 u64 vsid_first; 92 u64 vsid_first;
@@ -98,15 +100,15 @@ struct kvmppc_vcpu_book3s {
98#define CONTEXT_GUEST 1 100#define CONTEXT_GUEST 1
99#define CONTEXT_GUEST_END 2 101#define CONTEXT_GUEST_END 2
100 102
101#define VSID_REAL 0xfffffffffff00000 103#define VSID_REAL 0x1fffffffffc00000ULL
102#define VSID_REAL_DR 0xffffffffffe00000 104#define VSID_BAT 0x1fffffffffb00000ULL
103#define VSID_REAL_IR 0xffffffffffd00000 105#define VSID_REAL_DR 0x2000000000000000ULL
104#define VSID_BAT 0xffffffffffc00000 106#define VSID_REAL_IR 0x4000000000000000ULL
105#define VSID_PR 0x8000000000000000 107#define VSID_PR 0x8000000000000000ULL
106 108
107extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask); 109extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
108extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 110extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
109extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end); 111extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
110extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 112extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
111extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 113extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
112extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 114extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
@@ -114,11 +116,13 @@ extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
114extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 116extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
115extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 117extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
116extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data); 118extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data);
117extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data); 119extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
118extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr); 120extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
119extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 121extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
120extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 122extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
121 bool upper, u32 val); 123 bool upper, u32 val);
124extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
125extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
122 126
123extern u32 kvmppc_trampoline_lowmem; 127extern u32 kvmppc_trampoline_lowmem;
124extern u32 kvmppc_trampoline_enter; 128extern u32 kvmppc_trampoline_enter;
@@ -126,6 +130,8 @@ extern void kvmppc_rmcall(ulong srr0, ulong srr1);
126extern void kvmppc_load_up_fpu(void); 130extern void kvmppc_load_up_fpu(void);
127extern void kvmppc_load_up_altivec(void); 131extern void kvmppc_load_up_altivec(void);
128extern void kvmppc_load_up_vsx(void); 132extern void kvmppc_load_up_vsx(void);
133extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
134extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
129 135
130static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 136static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
131{ 137{
@@ -140,7 +146,108 @@ static inline ulong dsisr(void)
140} 146}
141 147
142extern void kvm_return_point(void); 148extern void kvm_return_point(void);
149static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
150
151static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
152{
153 if ( num < 14 ) {
154 to_svcpu(vcpu)->gpr[num] = val;
155 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
156 } else
157 vcpu->arch.gpr[num] = val;
158}
159
160static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
161{
162 if ( num < 14 )
163 return to_svcpu(vcpu)->gpr[num];
164 else
165 return vcpu->arch.gpr[num];
166}
167
168static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
169{
170 to_svcpu(vcpu)->cr = val;
171 to_book3s(vcpu)->shadow_vcpu->cr = val;
172}
173
174static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
175{
176 return to_svcpu(vcpu)->cr;
177}
178
179static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
180{
181 to_svcpu(vcpu)->xer = val;
182 to_book3s(vcpu)->shadow_vcpu->xer = val;
183}
184
185static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
186{
187 return to_svcpu(vcpu)->xer;
188}
189
190static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
191{
192 to_svcpu(vcpu)->ctr = val;
193}
194
195static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
196{
197 return to_svcpu(vcpu)->ctr;
198}
199
200static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
201{
202 to_svcpu(vcpu)->lr = val;
203}
204
205static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
206{
207 return to_svcpu(vcpu)->lr;
208}
209
210static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
211{
212 to_svcpu(vcpu)->pc = val;
213}
214
215static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
216{
217 return to_svcpu(vcpu)->pc;
218}
219
220static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
221{
222 ulong pc = kvmppc_get_pc(vcpu);
223 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
224
225 /* Load the instruction manually if it failed to do so in the
226 * exit path */
227 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
228 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
229
230 return svcpu->last_inst;
231}
232
233static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
234{
235 return to_svcpu(vcpu)->fault_dar;
236}
237
238/* Magic register values loaded into r3 and r4 before the 'sc' assembly
239 * instruction for the OSI hypercalls */
240#define OSI_SC_MAGIC_R3 0x113724FA
241#define OSI_SC_MAGIC_R4 0x77810F9B
143 242
144#define INS_DCBZ 0x7c0007ec 243#define INS_DCBZ 0x7c0007ec
145 244
245/* Also add subarch specific defines */
246
247#ifdef CONFIG_PPC_BOOK3S_32
248#include <asm/kvm_book3s_32.h>
249#else
250#include <asm/kvm_book3s_64.h>
251#endif
252
146#endif /* __ASM_KVM_BOOK3S_H__ */ 253#endif /* __ASM_KVM_BOOK3S_H__ */