aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/kvm_book3s.h
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2011-12-09 08:44:13 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:30 -0500
commit468a12c2b53776721ff83517d4a195b85c5fce54 (patch)
treeba417210997c2e3119525641764303d97db32815 /arch/powerpc/include/asm/kvm_book3s.h
parentd33ad328c0025c45f4688a769aeebddc342222c1 (diff)
KVM: PPC: Use get/set for to_svcpu to help preemption
When running the 64-bit Book3s PR code without CONFIG_PREEMPT_NONE, we were doing a few things wrong, most notably access to PACA fields without making sure that the pointers stay stable accross the access (preempt_disable()). This patch moves to_svcpu towards a get/put model which allows us to disable preemption while accessing the shadow vcpu fields in the PACA. That way we can run preemptible and everyone's happy! Reported-by: Jörg Sommer <joerg@alea.gnuu.de> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/include/asm/kvm_book3s.h')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h76
1 files changed, 59 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 69c7377d207..c941c21a189 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -183,7 +183,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
183static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 183static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
184{ 184{
185 if ( num < 14 ) { 185 if ( num < 14 ) {
186 to_svcpu(vcpu)->gpr[num] = val; 186 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
187 svcpu->gpr[num] = val;
188 svcpu_put(svcpu);
187 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; 189 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
188 } else 190 } else
189 vcpu->arch.gpr[num] = val; 191 vcpu->arch.gpr[num] = val;
@@ -191,80 +193,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
191 193
192static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 194static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
193{ 195{
194 if ( num < 14 ) 196 if ( num < 14 ) {
195 return to_svcpu(vcpu)->gpr[num]; 197 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
196 else 198 ulong r = svcpu->gpr[num];
199 svcpu_put(svcpu);
200 return r;
201 } else
197 return vcpu->arch.gpr[num]; 202 return vcpu->arch.gpr[num];
198} 203}
199 204
200static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 205static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
201{ 206{
202 to_svcpu(vcpu)->cr = val; 207 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
208 svcpu->cr = val;
209 svcpu_put(svcpu);
203 to_book3s(vcpu)->shadow_vcpu->cr = val; 210 to_book3s(vcpu)->shadow_vcpu->cr = val;
204} 211}
205 212
206static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 213static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
207{ 214{
208 return to_svcpu(vcpu)->cr; 215 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
216 u32 r;
217 r = svcpu->cr;
218 svcpu_put(svcpu);
219 return r;
209} 220}
210 221
211static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) 222static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
212{ 223{
213 to_svcpu(vcpu)->xer = val; 224 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
225 svcpu->xer = val;
214 to_book3s(vcpu)->shadow_vcpu->xer = val; 226 to_book3s(vcpu)->shadow_vcpu->xer = val;
227 svcpu_put(svcpu);
215} 228}
216 229
217static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) 230static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
218{ 231{
219 return to_svcpu(vcpu)->xer; 232 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
233 u32 r;
234 r = svcpu->xer;
235 svcpu_put(svcpu);
236 return r;
220} 237}
221 238
222static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 239static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
223{ 240{
224 to_svcpu(vcpu)->ctr = val; 241 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
242 svcpu->ctr = val;
243 svcpu_put(svcpu);
225} 244}
226 245
227static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 246static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
228{ 247{
229 return to_svcpu(vcpu)->ctr; 248 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
249 ulong r;
250 r = svcpu->ctr;
251 svcpu_put(svcpu);
252 return r;
230} 253}
231 254
232static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 255static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
233{ 256{
234 to_svcpu(vcpu)->lr = val; 257 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
258 svcpu->lr = val;
259 svcpu_put(svcpu);
235} 260}
236 261
237static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 262static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
238{ 263{
239 return to_svcpu(vcpu)->lr; 264 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
265 ulong r;
266 r = svcpu->lr;
267 svcpu_put(svcpu);
268 return r;
240} 269}
241 270
242static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 271static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
243{ 272{
244 to_svcpu(vcpu)->pc = val; 273 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
274 svcpu->pc = val;
275 svcpu_put(svcpu);
245} 276}
246 277
247static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 278static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
248{ 279{
249 return to_svcpu(vcpu)->pc; 280 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
281 ulong r;
282 r = svcpu->pc;
283 svcpu_put(svcpu);
284 return r;
250} 285}
251 286
252static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 287static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
253{ 288{
254 ulong pc = kvmppc_get_pc(vcpu); 289 ulong pc = kvmppc_get_pc(vcpu);
255 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 290 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
291 u32 r;
256 292
257 /* Load the instruction manually if it failed to do so in the 293 /* Load the instruction manually if it failed to do so in the
258 * exit path */ 294 * exit path */
259 if (svcpu->last_inst == KVM_INST_FETCH_FAILED) 295 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
260 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); 296 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
261 297
262 return svcpu->last_inst; 298 r = svcpu->last_inst;
299 svcpu_put(svcpu);
300 return r;
263} 301}
264 302
265static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 303static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
266{ 304{
267 return to_svcpu(vcpu)->fault_dar; 305 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
306 ulong r;
307 r = svcpu->fault_dar;
308 svcpu_put(svcpu);
309 return r;
268} 310}
269 311
270static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 312static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)