aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/emulate.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-07 20:58:01 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:47 -0500
commit8e5b26b55a8b6aee2c789b1d20ec715f9e4bea5c (patch)
tree4e2d003852ce327a47153b6c100239c6d8e1418f /arch/powerpc/kvm/emulate.c
parent0d178975d0a5afe5e0fd3211bd1397905b225be5 (diff)
KVM: PPC: Use accessor functions for GPR access
All code in PPC KVM currently accesses gprs in the vcpu struct directly. While there's nothing wrong with that wrt the current way gprs are stored and loaded, it doesn't suffice for the PACA acceleration that will follow in this patchset. So let's just create little wrapper inline functions that we call whenever a GPR needs to be read from or written to. The compiled code shouldn't really change at all for now. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r--arch/powerpc/kvm/emulate.c106
1 files changed, 57 insertions, 49 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 303457b2f52a..38219af0cd0e 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -170,14 +170,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
170 case OP_31_XOP_STWX: 170 case OP_31_XOP_STWX:
171 rs = get_rs(inst); 171 rs = get_rs(inst);
172 emulated = kvmppc_handle_store(run, vcpu, 172 emulated = kvmppc_handle_store(run, vcpu,
173 vcpu->arch.gpr[rs], 173 kvmppc_get_gpr(vcpu, rs),
174 4, 1); 174 4, 1);
175 break; 175 break;
176 176
177 case OP_31_XOP_STBX: 177 case OP_31_XOP_STBX:
178 rs = get_rs(inst); 178 rs = get_rs(inst);
179 emulated = kvmppc_handle_store(run, vcpu, 179 emulated = kvmppc_handle_store(run, vcpu,
180 vcpu->arch.gpr[rs], 180 kvmppc_get_gpr(vcpu, rs),
181 1, 1); 181 1, 1);
182 break; 182 break;
183 183
@@ -186,14 +186,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
186 ra = get_ra(inst); 186 ra = get_ra(inst);
187 rb = get_rb(inst); 187 rb = get_rb(inst);
188 188
189 ea = vcpu->arch.gpr[rb]; 189 ea = kvmppc_get_gpr(vcpu, rb);
190 if (ra) 190 if (ra)
191 ea += vcpu->arch.gpr[ra]; 191 ea += kvmppc_get_gpr(vcpu, ra);
192 192
193 emulated = kvmppc_handle_store(run, vcpu, 193 emulated = kvmppc_handle_store(run, vcpu,
194 vcpu->arch.gpr[rs], 194 kvmppc_get_gpr(vcpu, rs),
195 1, 1); 195 1, 1);
196 vcpu->arch.gpr[rs] = ea; 196 kvmppc_set_gpr(vcpu, rs, ea);
197 break; 197 break;
198 198
199 case OP_31_XOP_LHZX: 199 case OP_31_XOP_LHZX:
@@ -206,12 +206,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
206 ra = get_ra(inst); 206 ra = get_ra(inst);
207 rb = get_rb(inst); 207 rb = get_rb(inst);
208 208
209 ea = vcpu->arch.gpr[rb]; 209 ea = kvmppc_get_gpr(vcpu, rb);
210 if (ra) 210 if (ra)
211 ea += vcpu->arch.gpr[ra]; 211 ea += kvmppc_get_gpr(vcpu, ra);
212 212
213 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 213 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
214 vcpu->arch.gpr[ra] = ea; 214 kvmppc_set_gpr(vcpu, ra, ea);
215 break; 215 break;
216 216
217 case OP_31_XOP_MFSPR: 217 case OP_31_XOP_MFSPR:
@@ -220,47 +220,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
220 220
221 switch (sprn) { 221 switch (sprn) {
222 case SPRN_SRR0: 222 case SPRN_SRR0:
223 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 223 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
224 case SPRN_SRR1: 224 case SPRN_SRR1:
225 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 225 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
226 case SPRN_PVR: 226 case SPRN_PVR:
227 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; 227 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
228 case SPRN_PIR: 228 case SPRN_PIR:
229 vcpu->arch.gpr[rt] = vcpu->vcpu_id; break; 229 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
230 case SPRN_MSSSR0: 230 case SPRN_MSSSR0:
231 vcpu->arch.gpr[rt] = 0; break; 231 kvmppc_set_gpr(vcpu, rt, 0); break;
232 232
233 /* Note: mftb and TBRL/TBWL are user-accessible, so 233 /* Note: mftb and TBRL/TBWL are user-accessible, so
234 * the guest can always access the real TB anyways. 234 * the guest can always access the real TB anyways.
235 * In fact, we probably will never see these traps. */ 235 * In fact, we probably will never see these traps. */
236 case SPRN_TBWL: 236 case SPRN_TBWL:
237 vcpu->arch.gpr[rt] = get_tb() >> 32; break; 237 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
238 case SPRN_TBWU: 238 case SPRN_TBWU:
239 vcpu->arch.gpr[rt] = get_tb(); break; 239 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
240 240
241 case SPRN_SPRG0: 241 case SPRN_SPRG0:
242 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; 242 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
243 case SPRN_SPRG1: 243 case SPRN_SPRG1:
244 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; 244 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
245 case SPRN_SPRG2: 245 case SPRN_SPRG2:
246 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; 246 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
247 case SPRN_SPRG3: 247 case SPRN_SPRG3:
248 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; 248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
249 /* Note: SPRG4-7 are user-readable, so we don't get 249 /* Note: SPRG4-7 are user-readable, so we don't get
250 * a trap. */ 250 * a trap. */
251 251
252 case SPRN_DEC: 252 case SPRN_DEC:
253 { 253 {
254 u64 jd = get_tb() - vcpu->arch.dec_jiffies; 254 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
255 vcpu->arch.gpr[rt] = vcpu->arch.dec - jd; 255 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
256 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]); 256 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
257 vcpu->arch.dec, jd,
258 kvmppc_get_gpr(vcpu, rt));
257 break; 259 break;
258 } 260 }
259 default: 261 default:
260 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 262 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
261 if (emulated == EMULATE_FAIL) { 263 if (emulated == EMULATE_FAIL) {
262 printk("mfspr: unknown spr %x\n", sprn); 264 printk("mfspr: unknown spr %x\n", sprn);
263 vcpu->arch.gpr[rt] = 0; 265 kvmppc_set_gpr(vcpu, rt, 0);
264 } 266 }
265 break; 267 break;
266 } 268 }
@@ -272,7 +274,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
272 rb = get_rb(inst); 274 rb = get_rb(inst);
273 275
274 emulated = kvmppc_handle_store(run, vcpu, 276 emulated = kvmppc_handle_store(run, vcpu,
275 vcpu->arch.gpr[rs], 277 kvmppc_get_gpr(vcpu, rs),
276 2, 1); 278 2, 1);
277 break; 279 break;
278 280
@@ -281,14 +283,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
281 ra = get_ra(inst); 283 ra = get_ra(inst);
282 rb = get_rb(inst); 284 rb = get_rb(inst);
283 285
284 ea = vcpu->arch.gpr[rb]; 286 ea = kvmppc_get_gpr(vcpu, rb);
285 if (ra) 287 if (ra)
286 ea += vcpu->arch.gpr[ra]; 288 ea += kvmppc_get_gpr(vcpu, ra);
287 289
288 emulated = kvmppc_handle_store(run, vcpu, 290 emulated = kvmppc_handle_store(run, vcpu,
289 vcpu->arch.gpr[rs], 291 kvmppc_get_gpr(vcpu, rs),
290 2, 1); 292 2, 1);
291 vcpu->arch.gpr[ra] = ea; 293 kvmppc_set_gpr(vcpu, ra, ea);
292 break; 294 break;
293 295
294 case OP_31_XOP_MTSPR: 296 case OP_31_XOP_MTSPR:
@@ -296,9 +298,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
296 rs = get_rs(inst); 298 rs = get_rs(inst);
297 switch (sprn) { 299 switch (sprn) {
298 case SPRN_SRR0: 300 case SPRN_SRR0:
299 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 301 vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
300 case SPRN_SRR1: 302 case SPRN_SRR1:
301 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 303 vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
302 304
303 /* XXX We need to context-switch the timebase for 305 /* XXX We need to context-switch the timebase for
304 * watchdog and FIT. */ 306 * watchdog and FIT. */
@@ -308,18 +310,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
308 case SPRN_MSSSR0: break; 310 case SPRN_MSSSR0: break;
309 311
310 case SPRN_DEC: 312 case SPRN_DEC:
311 vcpu->arch.dec = vcpu->arch.gpr[rs]; 313 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
312 kvmppc_emulate_dec(vcpu); 314 kvmppc_emulate_dec(vcpu);
313 break; 315 break;
314 316
315 case SPRN_SPRG0: 317 case SPRN_SPRG0:
316 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 318 vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
317 case SPRN_SPRG1: 319 case SPRN_SPRG1:
318 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; 320 vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
319 case SPRN_SPRG2: 321 case SPRN_SPRG2:
320 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; 322 vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
321 case SPRN_SPRG3: 323 case SPRN_SPRG3:
322 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 324 vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
323 325
324 default: 326 default:
325 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 327 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
@@ -351,7 +353,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
351 rb = get_rb(inst); 353 rb = get_rb(inst);
352 354
353 emulated = kvmppc_handle_store(run, vcpu, 355 emulated = kvmppc_handle_store(run, vcpu,
354 vcpu->arch.gpr[rs], 356 kvmppc_get_gpr(vcpu, rs),
355 4, 0); 357 4, 0);
356 break; 358 break;
357 359
@@ -366,7 +368,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
366 rb = get_rb(inst); 368 rb = get_rb(inst);
367 369
368 emulated = kvmppc_handle_store(run, vcpu, 370 emulated = kvmppc_handle_store(run, vcpu,
369 vcpu->arch.gpr[rs], 371 kvmppc_get_gpr(vcpu, rs),
370 2, 0); 372 2, 0);
371 break; 373 break;
372 374
@@ -385,7 +387,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
385 ra = get_ra(inst); 387 ra = get_ra(inst);
386 rt = get_rt(inst); 388 rt = get_rt(inst);
387 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 389 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
388 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 390 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
389 break; 391 break;
390 392
391 case OP_LBZ: 393 case OP_LBZ:
@@ -397,35 +399,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
397 ra = get_ra(inst); 399 ra = get_ra(inst);
398 rt = get_rt(inst); 400 rt = get_rt(inst);
399 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 401 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
400 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 402 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
401 break; 403 break;
402 404
403 case OP_STW: 405 case OP_STW:
404 rs = get_rs(inst); 406 rs = get_rs(inst);
405 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 407 emulated = kvmppc_handle_store(run, vcpu,
408 kvmppc_get_gpr(vcpu, rs),
406 4, 1); 409 4, 1);
407 break; 410 break;
408 411
409 case OP_STWU: 412 case OP_STWU:
410 ra = get_ra(inst); 413 ra = get_ra(inst);
411 rs = get_rs(inst); 414 rs = get_rs(inst);
412 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 415 emulated = kvmppc_handle_store(run, vcpu,
416 kvmppc_get_gpr(vcpu, rs),
413 4, 1); 417 4, 1);
414 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 418 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
415 break; 419 break;
416 420
417 case OP_STB: 421 case OP_STB:
418 rs = get_rs(inst); 422 rs = get_rs(inst);
419 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 423 emulated = kvmppc_handle_store(run, vcpu,
424 kvmppc_get_gpr(vcpu, rs),
420 1, 1); 425 1, 1);
421 break; 426 break;
422 427
423 case OP_STBU: 428 case OP_STBU:
424 ra = get_ra(inst); 429 ra = get_ra(inst);
425 rs = get_rs(inst); 430 rs = get_rs(inst);
426 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 431 emulated = kvmppc_handle_store(run, vcpu,
432 kvmppc_get_gpr(vcpu, rs),
427 1, 1); 433 1, 1);
428 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 434 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
429 break; 435 break;
430 436
431 case OP_LHZ: 437 case OP_LHZ:
@@ -437,21 +443,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
437 ra = get_ra(inst); 443 ra = get_ra(inst);
438 rt = get_rt(inst); 444 rt = get_rt(inst);
439 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 445 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
440 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 446 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
441 break; 447 break;
442 448
443 case OP_STH: 449 case OP_STH:
444 rs = get_rs(inst); 450 rs = get_rs(inst);
445 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 451 emulated = kvmppc_handle_store(run, vcpu,
452 kvmppc_get_gpr(vcpu, rs),
446 2, 1); 453 2, 1);
447 break; 454 break;
448 455
449 case OP_STHU: 456 case OP_STHU:
450 ra = get_ra(inst); 457 ra = get_ra(inst);
451 rs = get_rs(inst); 458 rs = get_rs(inst);
452 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 459 emulated = kvmppc_handle_store(run, vcpu,
460 kvmppc_get_gpr(vcpu, rs),
453 2, 1); 461 2, 1);
454 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 462 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
455 break; 463 break;
456 464
457 default: 465 default: