aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/emulate.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-05-04 08:01:33 -0400
committerAlexander Graf <agraf@suse.de>2012-05-06 10:19:12 -0400
commitc46dc9a86148bc37c31d67a22a3887144ba7aa81 (patch)
treeba2a4f49072d3524d607ce3add83ef697fae517b /arch/powerpc/kvm/emulate.c
parent5b74716ebab10e7bce960d148fe6d8f6920451e5 (diff)
KVM: PPC: Emulator: clean up instruction parsing
Instructions on PPC are pretty similarly encoded. So instead of every instruction emulation code decoding the instruction fields itself, we can move that code to more generic places and rely on the compiler to optimize the unused bits away. This has 2 advantages. It makes the code smaller and it makes the code less error prone, as the instruction fields are always available, so accidental misusage is reduced. Functionally, this patch doesn't change anything. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r--arch/powerpc/kvm/emulate.c71
1 files changed, 4 insertions, 67 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index a27d4dc3b4a3..f63b5cbd8221 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -148,11 +148,10 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
148int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 148int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
149{ 149{
150 u32 inst = kvmppc_get_last_inst(vcpu); 150 u32 inst = kvmppc_get_last_inst(vcpu);
151 int ra; 151 int ra = get_ra(inst);
152 int rb; 152 int rs = get_rs(inst);
153 int rs; 153 int rt = get_rt(inst);
154 int rt; 154 int sprn = get_sprn(inst);
155 int sprn;
156 enum emulation_result emulated = EMULATE_DONE; 155 enum emulation_result emulated = EMULATE_DONE;
157 int advance = 1; 156 int advance = 1;
158 157
@@ -189,43 +188,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
189 advance = 0; 188 advance = 0;
190 break; 189 break;
191 case OP_31_XOP_LWZX: 190 case OP_31_XOP_LWZX:
192 rt = get_rt(inst);
193 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 191 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
194 break; 192 break;
195 193
196 case OP_31_XOP_LBZX: 194 case OP_31_XOP_LBZX:
197 rt = get_rt(inst);
198 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 195 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
199 break; 196 break;
200 197
201 case OP_31_XOP_LBZUX: 198 case OP_31_XOP_LBZUX:
202 rt = get_rt(inst);
203 ra = get_ra(inst);
204 rb = get_rb(inst);
205
206 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 199 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
207 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 200 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
208 break; 201 break;
209 202
210 case OP_31_XOP_STWX: 203 case OP_31_XOP_STWX:
211 rs = get_rs(inst);
212 emulated = kvmppc_handle_store(run, vcpu, 204 emulated = kvmppc_handle_store(run, vcpu,
213 kvmppc_get_gpr(vcpu, rs), 205 kvmppc_get_gpr(vcpu, rs),
214 4, 1); 206 4, 1);
215 break; 207 break;
216 208
217 case OP_31_XOP_STBX: 209 case OP_31_XOP_STBX:
218 rs = get_rs(inst);
219 emulated = kvmppc_handle_store(run, vcpu, 210 emulated = kvmppc_handle_store(run, vcpu,
220 kvmppc_get_gpr(vcpu, rs), 211 kvmppc_get_gpr(vcpu, rs),
221 1, 1); 212 1, 1);
222 break; 213 break;
223 214
224 case OP_31_XOP_STBUX: 215 case OP_31_XOP_STBUX:
225 rs = get_rs(inst);
226 ra = get_ra(inst);
227 rb = get_rb(inst);
228
229 emulated = kvmppc_handle_store(run, vcpu, 216 emulated = kvmppc_handle_store(run, vcpu,
230 kvmppc_get_gpr(vcpu, rs), 217 kvmppc_get_gpr(vcpu, rs),
231 1, 1); 218 1, 1);
@@ -233,28 +220,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
233 break; 220 break;
234 221
235 case OP_31_XOP_LHAX: 222 case OP_31_XOP_LHAX:
236 rt = get_rt(inst);
237 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 223 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
238 break; 224 break;
239 225
240 case OP_31_XOP_LHZX: 226 case OP_31_XOP_LHZX:
241 rt = get_rt(inst);
242 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 227 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
243 break; 228 break;
244 229
245 case OP_31_XOP_LHZUX: 230 case OP_31_XOP_LHZUX:
246 rt = get_rt(inst);
247 ra = get_ra(inst);
248 rb = get_rb(inst);
249
250 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 231 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
251 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 232 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
252 break; 233 break;
253 234
254 case OP_31_XOP_MFSPR: 235 case OP_31_XOP_MFSPR:
255 sprn = get_sprn(inst);
256 rt = get_rt(inst);
257
258 switch (sprn) { 236 switch (sprn) {
259 case SPRN_SRR0: 237 case SPRN_SRR0:
260 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); 238 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
@@ -310,20 +288,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
310 break; 288 break;
311 289
312 case OP_31_XOP_STHX: 290 case OP_31_XOP_STHX:
313 rs = get_rs(inst);
314 ra = get_ra(inst);
315 rb = get_rb(inst);
316
317 emulated = kvmppc_handle_store(run, vcpu, 291 emulated = kvmppc_handle_store(run, vcpu,
318 kvmppc_get_gpr(vcpu, rs), 292 kvmppc_get_gpr(vcpu, rs),
319 2, 1); 293 2, 1);
320 break; 294 break;
321 295
322 case OP_31_XOP_STHUX: 296 case OP_31_XOP_STHUX:
323 rs = get_rs(inst);
324 ra = get_ra(inst);
325 rb = get_rb(inst);
326
327 emulated = kvmppc_handle_store(run, vcpu, 297 emulated = kvmppc_handle_store(run, vcpu,
328 kvmppc_get_gpr(vcpu, rs), 298 kvmppc_get_gpr(vcpu, rs),
329 2, 1); 299 2, 1);
@@ -331,8 +301,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
331 break; 301 break;
332 302
333 case OP_31_XOP_MTSPR: 303 case OP_31_XOP_MTSPR:
334 sprn = get_sprn(inst);
335 rs = get_rs(inst);
336 switch (sprn) { 304 switch (sprn) {
337 case SPRN_SRR0: 305 case SPRN_SRR0:
338 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); 306 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
@@ -384,7 +352,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
384 break; 352 break;
385 353
386 case OP_31_XOP_LWBRX: 354 case OP_31_XOP_LWBRX:
387 rt = get_rt(inst);
388 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); 355 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
389 break; 356 break;
390 357
@@ -392,25 +359,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
392 break; 359 break;
393 360
394 case OP_31_XOP_STWBRX: 361 case OP_31_XOP_STWBRX:
395 rs = get_rs(inst);
396 ra = get_ra(inst);
397 rb = get_rb(inst);
398
399 emulated = kvmppc_handle_store(run, vcpu, 362 emulated = kvmppc_handle_store(run, vcpu,
400 kvmppc_get_gpr(vcpu, rs), 363 kvmppc_get_gpr(vcpu, rs),
401 4, 0); 364 4, 0);
402 break; 365 break;
403 366
404 case OP_31_XOP_LHBRX: 367 case OP_31_XOP_LHBRX:
405 rt = get_rt(inst);
406 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 368 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
407 break; 369 break;
408 370
409 case OP_31_XOP_STHBRX: 371 case OP_31_XOP_STHBRX:
410 rs = get_rs(inst);
411 ra = get_ra(inst);
412 rb = get_rb(inst);
413
414 emulated = kvmppc_handle_store(run, vcpu, 372 emulated = kvmppc_handle_store(run, vcpu,
415 kvmppc_get_gpr(vcpu, rs), 373 kvmppc_get_gpr(vcpu, rs),
416 2, 0); 374 2, 0);
@@ -423,39 +381,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
423 break; 381 break;
424 382
425 case OP_LWZ: 383 case OP_LWZ:
426 rt = get_rt(inst);
427 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 384 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
428 break; 385 break;
429 386
430 case OP_LWZU: 387 case OP_LWZU:
431 ra = get_ra(inst);
432 rt = get_rt(inst);
433 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 388 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
434 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 389 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
435 break; 390 break;
436 391
437 case OP_LBZ: 392 case OP_LBZ:
438 rt = get_rt(inst);
439 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 393 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
440 break; 394 break;
441 395
442 case OP_LBZU: 396 case OP_LBZU:
443 ra = get_ra(inst);
444 rt = get_rt(inst);
445 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 397 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
446 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 398 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
447 break; 399 break;
448 400
449 case OP_STW: 401 case OP_STW:
450 rs = get_rs(inst);
451 emulated = kvmppc_handle_store(run, vcpu, 402 emulated = kvmppc_handle_store(run, vcpu,
452 kvmppc_get_gpr(vcpu, rs), 403 kvmppc_get_gpr(vcpu, rs),
453 4, 1); 404 4, 1);
454 break; 405 break;
455 406
456 case OP_STWU: 407 case OP_STWU:
457 ra = get_ra(inst);
458 rs = get_rs(inst);
459 emulated = kvmppc_handle_store(run, vcpu, 408 emulated = kvmppc_handle_store(run, vcpu,
460 kvmppc_get_gpr(vcpu, rs), 409 kvmppc_get_gpr(vcpu, rs),
461 4, 1); 410 4, 1);
@@ -463,15 +412,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
463 break; 412 break;
464 413
465 case OP_STB: 414 case OP_STB:
466 rs = get_rs(inst);
467 emulated = kvmppc_handle_store(run, vcpu, 415 emulated = kvmppc_handle_store(run, vcpu,
468 kvmppc_get_gpr(vcpu, rs), 416 kvmppc_get_gpr(vcpu, rs),
469 1, 1); 417 1, 1);
470 break; 418 break;
471 419
472 case OP_STBU: 420 case OP_STBU:
473 ra = get_ra(inst);
474 rs = get_rs(inst);
475 emulated = kvmppc_handle_store(run, vcpu, 421 emulated = kvmppc_handle_store(run, vcpu,
476 kvmppc_get_gpr(vcpu, rs), 422 kvmppc_get_gpr(vcpu, rs),
477 1, 1); 423 1, 1);
@@ -479,39 +425,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
479 break; 425 break;
480 426
481 case OP_LHZ: 427 case OP_LHZ:
482 rt = get_rt(inst);
483 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 428 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
484 break; 429 break;
485 430
486 case OP_LHZU: 431 case OP_LHZU:
487 ra = get_ra(inst);
488 rt = get_rt(inst);
489 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 432 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
490 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 433 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
491 break; 434 break;
492 435
493 case OP_LHA: 436 case OP_LHA:
494 rt = get_rt(inst);
495 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 437 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
496 break; 438 break;
497 439
498 case OP_LHAU: 440 case OP_LHAU:
499 ra = get_ra(inst);
500 rt = get_rt(inst);
501 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 441 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
502 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 442 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
503 break; 443 break;
504 444
505 case OP_STH: 445 case OP_STH:
506 rs = get_rs(inst);
507 emulated = kvmppc_handle_store(run, vcpu, 446 emulated = kvmppc_handle_store(run, vcpu,
508 kvmppc_get_gpr(vcpu, rs), 447 kvmppc_get_gpr(vcpu, rs),
509 2, 1); 448 2, 1);
510 break; 449 break;
511 450
512 case OP_STHU: 451 case OP_STHU:
513 ra = get_ra(inst);
514 rs = get_rs(inst);
515 emulated = kvmppc_handle_store(run, vcpu, 452 emulated = kvmppc_handle_store(run, vcpu,
516 kvmppc_get_gpr(vcpu, rs), 453 kvmppc_get_gpr(vcpu, rs),
517 2, 1); 454 2, 1);