diff options
author | Alexander Graf <agraf@suse.de> | 2011-12-09 08:44:13 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-03-05 07:52:30 -0500 |
commit | 468a12c2b53776721ff83517d4a195b85c5fce54 (patch) | |
tree | ba417210997c2e3119525641764303d97db32815 /arch/powerpc/include/asm | |
parent | d33ad328c0025c45f4688a769aeebddc342222c1 (diff) |
KVM: PPC: Use get/set for to_svcpu to help preemption
When running the 64-bit Book3s PR code without CONFIG_PREEMPT_NONE, we were
doing a few things wrong, most notably access to PACA fields without making
sure that the pointers stay stable accross the access (preempt_disable()).
This patch moves to_svcpu towards a get/put model which allows us to disable
preemption while accessing the shadow vcpu fields in the PACA. That way we
can run preemptible and everyone's happy!
Reported-by: Jörg Sommer <joerg@alea.gnuu.de>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 76 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_32.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_64.h | 8 |
3 files changed, 71 insertions, 19 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 69c7377d2071..c941c21a1893 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -183,7 +183,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | |||
183 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 183 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
184 | { | 184 | { |
185 | if ( num < 14 ) { | 185 | if ( num < 14 ) { |
186 | to_svcpu(vcpu)->gpr[num] = val; | 186 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
187 | svcpu->gpr[num] = val; | ||
188 | svcpu_put(svcpu); | ||
187 | to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; | 189 | to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; |
188 | } else | 190 | } else |
189 | vcpu->arch.gpr[num] = val; | 191 | vcpu->arch.gpr[num] = val; |
@@ -191,80 +193,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | |||
191 | 193 | ||
192 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | 194 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) |
193 | { | 195 | { |
194 | if ( num < 14 ) | 196 | if ( num < 14 ) { |
195 | return to_svcpu(vcpu)->gpr[num]; | 197 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
196 | else | 198 | ulong r = svcpu->gpr[num]; |
199 | svcpu_put(svcpu); | ||
200 | return r; | ||
201 | } else | ||
197 | return vcpu->arch.gpr[num]; | 202 | return vcpu->arch.gpr[num]; |
198 | } | 203 | } |
199 | 204 | ||
200 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | 205 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) |
201 | { | 206 | { |
202 | to_svcpu(vcpu)->cr = val; | 207 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
208 | svcpu->cr = val; | ||
209 | svcpu_put(svcpu); | ||
203 | to_book3s(vcpu)->shadow_vcpu->cr = val; | 210 | to_book3s(vcpu)->shadow_vcpu->cr = val; |
204 | } | 211 | } |
205 | 212 | ||
206 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | 213 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) |
207 | { | 214 | { |
208 | return to_svcpu(vcpu)->cr; | 215 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
216 | u32 r; | ||
217 | r = svcpu->cr; | ||
218 | svcpu_put(svcpu); | ||
219 | return r; | ||
209 | } | 220 | } |
210 | 221 | ||
211 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | 222 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) |
212 | { | 223 | { |
213 | to_svcpu(vcpu)->xer = val; | 224 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
225 | svcpu->xer = val; | ||
214 | to_book3s(vcpu)->shadow_vcpu->xer = val; | 226 | to_book3s(vcpu)->shadow_vcpu->xer = val; |
227 | svcpu_put(svcpu); | ||
215 | } | 228 | } |
216 | 229 | ||
217 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | 230 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) |
218 | { | 231 | { |
219 | return to_svcpu(vcpu)->xer; | 232 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
233 | u32 r; | ||
234 | r = svcpu->xer; | ||
235 | svcpu_put(svcpu); | ||
236 | return r; | ||
220 | } | 237 | } |
221 | 238 | ||
222 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | 239 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) |
223 | { | 240 | { |
224 | to_svcpu(vcpu)->ctr = val; | 241 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
242 | svcpu->ctr = val; | ||
243 | svcpu_put(svcpu); | ||
225 | } | 244 | } |
226 | 245 | ||
227 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | 246 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) |
228 | { | 247 | { |
229 | return to_svcpu(vcpu)->ctr; | 248 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
249 | ulong r; | ||
250 | r = svcpu->ctr; | ||
251 | svcpu_put(svcpu); | ||
252 | return r; | ||
230 | } | 253 | } |
231 | 254 | ||
232 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | 255 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) |
233 | { | 256 | { |
234 | to_svcpu(vcpu)->lr = val; | 257 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
258 | svcpu->lr = val; | ||
259 | svcpu_put(svcpu); | ||
235 | } | 260 | } |
236 | 261 | ||
237 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | 262 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) |
238 | { | 263 | { |
239 | return to_svcpu(vcpu)->lr; | 264 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
265 | ulong r; | ||
266 | r = svcpu->lr; | ||
267 | svcpu_put(svcpu); | ||
268 | return r; | ||
240 | } | 269 | } |
241 | 270 | ||
242 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | 271 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) |
243 | { | 272 | { |
244 | to_svcpu(vcpu)->pc = val; | 273 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
274 | svcpu->pc = val; | ||
275 | svcpu_put(svcpu); | ||
245 | } | 276 | } |
246 | 277 | ||
247 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | 278 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) |
248 | { | 279 | { |
249 | return to_svcpu(vcpu)->pc; | 280 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
281 | ulong r; | ||
282 | r = svcpu->pc; | ||
283 | svcpu_put(svcpu); | ||
284 | return r; | ||
250 | } | 285 | } |
251 | 286 | ||
252 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | 287 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) |
253 | { | 288 | { |
254 | ulong pc = kvmppc_get_pc(vcpu); | 289 | ulong pc = kvmppc_get_pc(vcpu); |
255 | struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); | 290 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
291 | u32 r; | ||
256 | 292 | ||
257 | /* Load the instruction manually if it failed to do so in the | 293 | /* Load the instruction manually if it failed to do so in the |
258 | * exit path */ | 294 | * exit path */ |
259 | if (svcpu->last_inst == KVM_INST_FETCH_FAILED) | 295 | if (svcpu->last_inst == KVM_INST_FETCH_FAILED) |
260 | kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); | 296 | kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); |
261 | 297 | ||
262 | return svcpu->last_inst; | 298 | r = svcpu->last_inst; |
299 | svcpu_put(svcpu); | ||
300 | return r; | ||
263 | } | 301 | } |
264 | 302 | ||
265 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | 303 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) |
266 | { | 304 | { |
267 | return to_svcpu(vcpu)->fault_dar; | 305 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
306 | ulong r; | ||
307 | r = svcpu->fault_dar; | ||
308 | svcpu_put(svcpu); | ||
309 | return r; | ||
268 | } | 310 | } |
269 | 311 | ||
270 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | 312 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h index de604db135f5..38040ff82063 100644 --- a/arch/powerpc/include/asm/kvm_book3s_32.h +++ b/arch/powerpc/include/asm/kvm_book3s_32.h | |||
@@ -20,11 +20,15 @@ | |||
20 | #ifndef __ASM_KVM_BOOK3S_32_H__ | 20 | #ifndef __ASM_KVM_BOOK3S_32_H__ |
21 | #define __ASM_KVM_BOOK3S_32_H__ | 21 | #define __ASM_KVM_BOOK3S_32_H__ |
22 | 22 | ||
23 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) | 23 | static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) |
24 | { | 24 | { |
25 | return to_book3s(vcpu)->shadow_vcpu; | 25 | return to_book3s(vcpu)->shadow_vcpu; |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) | ||
29 | { | ||
30 | } | ||
31 | |||
28 | #define PTE_SIZE 12 | 32 | #define PTE_SIZE 12 |
29 | #define VSID_ALL 0 | 33 | #define VSID_ALL 0 |
30 | #define SR_INVALID 0x00000001 /* VSID 1 should always be unused */ | 34 | #define SR_INVALID 0x00000001 /* VSID 1 should always be unused */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index d0ac94f98f9e..2054e4726ba2 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -21,10 +21,16 @@ | |||
21 | #define __ASM_KVM_BOOK3S_64_H__ | 21 | #define __ASM_KVM_BOOK3S_64_H__ |
22 | 22 | ||
23 | #ifdef CONFIG_KVM_BOOK3S_PR | 23 | #ifdef CONFIG_KVM_BOOK3S_PR |
24 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) | 24 | static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) |
25 | { | 25 | { |
26 | preempt_disable(); | ||
26 | return &get_paca()->shadow_vcpu; | 27 | return &get_paca()->shadow_vcpu; |
27 | } | 28 | } |
29 | |||
30 | static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) | ||
31 | { | ||
32 | preempt_enable(); | ||
33 | } | ||
28 | #endif | 34 | #endif |
29 | 35 | ||
30 | #define SPAPR_TCE_SHIFT 12 | 36 | #define SPAPR_TCE_SHIFT 12 |