aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h8
-rw-r--r--arch/powerpc/include/asm/kvm_host.h6
-rw-r--r--arch/powerpc/include/asm/kvm_para.h31
-rw-r--r--arch/powerpc/kernel/asm-offsets.c15
-rw-r--r--arch/powerpc/kernel/kvm.c204
-rw-r--r--arch/powerpc/kvm/book3s.c16
-rw-r--r--arch/powerpc/kvm/booke.c23
-rw-r--r--arch/powerpc/kvm/booke_emulate.c12
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S18
-rw-r--r--arch/powerpc/kvm/e500.c24
-rw-r--r--arch/powerpc/kvm/e500_emulate.c38
-rw-r--r--arch/powerpc/kvm/e500_tlb.c83
-rw-r--r--arch/powerpc/kvm/e500_tlb.h25
-rw-r--r--arch/powerpc/kvm/emulate.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c2
15 files changed, 339 insertions, 169 deletions
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index bc17441535f2..8cd50a514271 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -71,14 +71,6 @@ struct kvmppc_vcpu_e500 {
71 u32 pid[E500_PID_NUM]; 71 u32 pid[E500_PID_NUM];
72 u32 svr; 72 u32 svr;
73 73
74 u32 mas0;
75 u32 mas1;
76 u32 mas2;
77 u64 mas7_3;
78 u32 mas4;
79 u32 mas5;
80 u32 mas6;
81
82 /* vcpu id table */ 74 /* vcpu id table */
83 struct vcpu_id_table *idt; 75 struct vcpu_id_table *idt;
84 76
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bf8af5d5d5dc..bfd0c9912da5 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -318,10 +318,6 @@ struct kvm_vcpu_arch {
318 u32 vrsave; /* also USPRG0 */ 318 u32 vrsave; /* also USPRG0 */
319 u32 mmucr; 319 u32 mmucr;
320 ulong shadow_msr; 320 ulong shadow_msr;
321 ulong sprg4;
322 ulong sprg5;
323 ulong sprg6;
324 ulong sprg7;
325 ulong csrr0; 321 ulong csrr0;
326 ulong csrr1; 322 ulong csrr1;
327 ulong dsrr0; 323 ulong dsrr0;
@@ -329,7 +325,6 @@ struct kvm_vcpu_arch {
329 ulong mcsrr0; 325 ulong mcsrr0;
330 ulong mcsrr1; 326 ulong mcsrr1;
331 ulong mcsr; 327 ulong mcsr;
332 ulong esr;
333 u32 dec; 328 u32 dec;
334 u32 decar; 329 u32 decar;
335 u32 tbl; 330 u32 tbl;
@@ -338,7 +333,6 @@ struct kvm_vcpu_arch {
338 u32 tsr; 333 u32 tsr;
339 u32 ivor[64]; 334 u32 ivor[64];
340 ulong ivpr; 335 ulong ivpr;
341 u32 pir;
342 u32 pvr; 336 u32 pvr;
343 337
344 u32 shadow_pid; 338 u32 shadow_pid;
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 50533f9adf40..ece70fb36513 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -33,11 +33,35 @@ struct kvm_vcpu_arch_shared {
33 __u64 sprg3; 33 __u64 sprg3;
34 __u64 srr0; 34 __u64 srr0;
35 __u64 srr1; 35 __u64 srr1;
36 __u64 dar; 36 __u64 dar; /* dear on BookE */
37 __u64 msr; 37 __u64 msr;
38 __u32 dsisr; 38 __u32 dsisr;
39 __u32 int_pending; /* Tells the guest if we have an interrupt */ 39 __u32 int_pending; /* Tells the guest if we have an interrupt */
40 __u32 sr[16]; 40 __u32 sr[16];
41 __u32 mas0;
42 __u32 mas1;
43 __u64 mas7_3;
44 __u64 mas2;
45 __u32 mas4;
46 __u32 mas6;
47 __u32 esr;
48 __u32 pir;
49
50 /*
51 * SPRG4-7 are user-readable, so we can only keep these consistent
52 * between the shared area and the real registers when there's an
53 * intervening exit to KVM. This also applies to SPRG3 on some
54 * chips.
55 *
56 * This suffices for access by guest userspace, since in PR-mode
57 * KVM, an exit must occur when changing the guest's MSR[PR].
58 * If the guest kernel writes to SPRG3-7 via the shared area, it
59 * must also use the shared area for reading while in kernel space.
60 */
61 __u64 sprg4;
62 __u64 sprg5;
63 __u64 sprg6;
64 __u64 sprg7;
41}; 65};
42 66
43#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */ 67#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
@@ -47,7 +71,10 @@ struct kvm_vcpu_arch_shared {
47 71
48#define KVM_FEATURE_MAGIC_PAGE 1 72#define KVM_FEATURE_MAGIC_PAGE 1
49 73
50#define KVM_MAGIC_FEAT_SR (1 << 0) 74#define KVM_MAGIC_FEAT_SR (1 << 0)
75
76/* MASn, ESR, PIR, and high SPRGs */
77#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
51 78
52#ifdef __KERNEL__ 79#ifdef __KERNEL__
53 80
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 04caee7d9bc1..e7bfcf81b746 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -426,16 +426,23 @@ int main(void)
426 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); 426 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
427 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); 427 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
428#endif 428#endif
429 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 429 DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
430 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 430 DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
431 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 431 DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
432 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 432 DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7));
433 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 433 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
434 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); 434 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
435 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 435 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
436 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); 436 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
437 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 437 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
438 438
439 DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
440 DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
441 DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
442 DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
443 DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
444 DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
445
439 /* book3s */ 446 /* book3s */
440#ifdef CONFIG_KVM_BOOK3S_64_HV 447#ifdef CONFIG_KVM_BOOK3S_64_HV
441 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); 448 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 06b15ee997f7..04d4b5aa6dca 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -49,23 +49,17 @@
49#define KVM_RT_30 0x03c00000 49#define KVM_RT_30 0x03c00000
50#define KVM_MASK_RB 0x0000f800 50#define KVM_MASK_RB 0x0000f800
51#define KVM_INST_MFMSR 0x7c0000a6 51#define KVM_INST_MFMSR 0x7c0000a6
52#define KVM_INST_MFSPR_SPRG0 0x7c1042a6 52
53#define KVM_INST_MFSPR_SPRG1 0x7c1142a6 53#define SPR_FROM 0
54#define KVM_INST_MFSPR_SPRG2 0x7c1242a6 54#define SPR_TO 0x100
55#define KVM_INST_MFSPR_SPRG3 0x7c1342a6 55
56#define KVM_INST_MFSPR_SRR0 0x7c1a02a6 56#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
57#define KVM_INST_MFSPR_SRR1 0x7c1b02a6 57 (((sprn) & 0x1f) << 16) | \
58#define KVM_INST_MFSPR_DAR 0x7c1302a6 58 (((sprn) & 0x3e0) << 6) | \
59#define KVM_INST_MFSPR_DSISR 0x7c1202a6 59 (moveto))
60 60
61#define KVM_INST_MTSPR_SPRG0 0x7c1043a6 61#define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
62#define KVM_INST_MTSPR_SPRG1 0x7c1143a6 62#define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
63#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
64#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
65#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
66#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
67#define KVM_INST_MTSPR_DAR 0x7c1303a6
68#define KVM_INST_MTSPR_DSISR 0x7c1203a6
69 63
70#define KVM_INST_TLBSYNC 0x7c00046c 64#define KVM_INST_TLBSYNC 0x7c00046c
71#define KVM_INST_MTMSRD_L0 0x7c000164 65#define KVM_INST_MTMSRD_L0 0x7c000164
@@ -440,56 +434,191 @@ static void kvm_check_ins(u32 *inst, u32 features)
440 case KVM_INST_MFMSR: 434 case KVM_INST_MFMSR:
441 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); 435 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
442 break; 436 break;
443 case KVM_INST_MFSPR_SPRG0: 437 case KVM_INST_MFSPR(SPRN_SPRG0):
444 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); 438 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
445 break; 439 break;
446 case KVM_INST_MFSPR_SPRG1: 440 case KVM_INST_MFSPR(SPRN_SPRG1):
447 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); 441 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
448 break; 442 break;
449 case KVM_INST_MFSPR_SPRG2: 443 case KVM_INST_MFSPR(SPRN_SPRG2):
450 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); 444 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
451 break; 445 break;
452 case KVM_INST_MFSPR_SPRG3: 446 case KVM_INST_MFSPR(SPRN_SPRG3):
453 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); 447 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
454 break; 448 break;
455 case KVM_INST_MFSPR_SRR0: 449 case KVM_INST_MFSPR(SPRN_SRR0):
456 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); 450 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
457 break; 451 break;
458 case KVM_INST_MFSPR_SRR1: 452 case KVM_INST_MFSPR(SPRN_SRR1):
459 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); 453 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
460 break; 454 break;
461 case KVM_INST_MFSPR_DAR: 455#ifdef CONFIG_BOOKE
456 case KVM_INST_MFSPR(SPRN_DEAR):
457#else
458 case KVM_INST_MFSPR(SPRN_DAR):
459#endif
462 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); 460 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
463 break; 461 break;
464 case KVM_INST_MFSPR_DSISR: 462 case KVM_INST_MFSPR(SPRN_DSISR):
465 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); 463 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
466 break; 464 break;
467 465
466#ifdef CONFIG_PPC_BOOK3E_MMU
467 case KVM_INST_MFSPR(SPRN_MAS0):
468 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
470 break;
471 case KVM_INST_MFSPR(SPRN_MAS1):
472 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
474 break;
475 case KVM_INST_MFSPR(SPRN_MAS2):
476 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
478 break;
479 case KVM_INST_MFSPR(SPRN_MAS3):
480 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
482 break;
483 case KVM_INST_MFSPR(SPRN_MAS4):
484 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
486 break;
487 case KVM_INST_MFSPR(SPRN_MAS6):
488 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
489 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
490 break;
491 case KVM_INST_MFSPR(SPRN_MAS7):
492 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
494 break;
495#endif /* CONFIG_PPC_BOOK3E_MMU */
496
497 case KVM_INST_MFSPR(SPRN_SPRG4):
498#ifdef CONFIG_BOOKE
499 case KVM_INST_MFSPR(SPRN_SPRG4R):
500#endif
501 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
502 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
503 break;
504 case KVM_INST_MFSPR(SPRN_SPRG5):
505#ifdef CONFIG_BOOKE
506 case KVM_INST_MFSPR(SPRN_SPRG5R):
507#endif
508 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
509 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
510 break;
511 case KVM_INST_MFSPR(SPRN_SPRG6):
512#ifdef CONFIG_BOOKE
513 case KVM_INST_MFSPR(SPRN_SPRG6R):
514#endif
515 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
516 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
517 break;
518 case KVM_INST_MFSPR(SPRN_SPRG7):
519#ifdef CONFIG_BOOKE
520 case KVM_INST_MFSPR(SPRN_SPRG7R):
521#endif
522 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
523 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
524 break;
525
526#ifdef CONFIG_BOOKE
527 case KVM_INST_MFSPR(SPRN_ESR):
528 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
529 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
530 break;
531#endif
532
533 case KVM_INST_MFSPR(SPRN_PIR):
534 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
535 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
536 break;
537
538
468 /* Stores */ 539 /* Stores */
469 case KVM_INST_MTSPR_SPRG0: 540 case KVM_INST_MTSPR(SPRN_SPRG0):
470 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); 541 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
471 break; 542 break;
472 case KVM_INST_MTSPR_SPRG1: 543 case KVM_INST_MTSPR(SPRN_SPRG1):
473 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); 544 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
474 break; 545 break;
475 case KVM_INST_MTSPR_SPRG2: 546 case KVM_INST_MTSPR(SPRN_SPRG2):
476 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); 547 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
477 break; 548 break;
478 case KVM_INST_MTSPR_SPRG3: 549 case KVM_INST_MTSPR(SPRN_SPRG3):
479 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); 550 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
480 break; 551 break;
481 case KVM_INST_MTSPR_SRR0: 552 case KVM_INST_MTSPR(SPRN_SRR0):
482 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); 553 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
483 break; 554 break;
484 case KVM_INST_MTSPR_SRR1: 555 case KVM_INST_MTSPR(SPRN_SRR1):
485 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); 556 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
486 break; 557 break;
487 case KVM_INST_MTSPR_DAR: 558#ifdef CONFIG_BOOKE
559 case KVM_INST_MTSPR(SPRN_DEAR):
560#else
561 case KVM_INST_MTSPR(SPRN_DAR):
562#endif
488 kvm_patch_ins_std(inst, magic_var(dar), inst_rt); 563 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
489 break; 564 break;
490 case KVM_INST_MTSPR_DSISR: 565 case KVM_INST_MTSPR(SPRN_DSISR):
491 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); 566 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
492 break; 567 break;
568#ifdef CONFIG_PPC_BOOK3E_MMU
569 case KVM_INST_MTSPR(SPRN_MAS0):
570 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
572 break;
573 case KVM_INST_MTSPR(SPRN_MAS1):
574 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
576 break;
577 case KVM_INST_MTSPR(SPRN_MAS2):
578 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
580 break;
581 case KVM_INST_MTSPR(SPRN_MAS3):
582 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
584 break;
585 case KVM_INST_MTSPR(SPRN_MAS4):
586 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
588 break;
589 case KVM_INST_MTSPR(SPRN_MAS6):
590 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
591 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
592 break;
593 case KVM_INST_MTSPR(SPRN_MAS7):
594 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
595 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
596 break;
597#endif /* CONFIG_PPC_BOOK3E_MMU */
598
599 case KVM_INST_MTSPR(SPRN_SPRG4):
600 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
602 break;
603 case KVM_INST_MTSPR(SPRN_SPRG5):
604 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
606 break;
607 case KVM_INST_MTSPR(SPRN_SPRG6):
608 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
609 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
610 break;
611 case KVM_INST_MTSPR(SPRN_SPRG7):
612 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
613 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
614 break;
615
616#ifdef CONFIG_BOOKE
617 case KVM_INST_MTSPR(SPRN_ESR):
618 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
619 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
620 break;
621#endif
493 622
494 /* Nops */ 623 /* Nops */
495 case KVM_INST_TLBSYNC: 624 case KVM_INST_TLBSYNC:
@@ -556,9 +685,18 @@ static void kvm_use_magic_page(void)
556 start = (void*)_stext; 685 start = (void*)_stext;
557 end = (void*)_etext; 686 end = (void*)_etext;
558 687
688 /*
689 * Being interrupted in the middle of patching would
690 * be bad for SPRG4-7, which KVM can't keep in sync
691 * with emulated accesses because reads don't trap.
692 */
693 local_irq_disable();
694
559 for (p = start; p < end; p++) 695 for (p = start; p < end; p++)
560 kvm_check_ins(p, features); 696 kvm_check_ins(p, features);
561 697
698 local_irq_enable();
699
562 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", 700 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
563 kvm_patching_worked ? "worked" : "failed"); 701 kvm_patching_worked ? "worked" : "failed");
564} 702}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 73fc9f046107..5398744cd773 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -423,10 +423,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
423 regs->sprg1 = vcpu->arch.shared->sprg1; 423 regs->sprg1 = vcpu->arch.shared->sprg1;
424 regs->sprg2 = vcpu->arch.shared->sprg2; 424 regs->sprg2 = vcpu->arch.shared->sprg2;
425 regs->sprg3 = vcpu->arch.shared->sprg3; 425 regs->sprg3 = vcpu->arch.shared->sprg3;
426 regs->sprg4 = vcpu->arch.sprg4; 426 regs->sprg4 = vcpu->arch.shared->sprg4;
427 regs->sprg5 = vcpu->arch.sprg5; 427 regs->sprg5 = vcpu->arch.shared->sprg5;
428 regs->sprg6 = vcpu->arch.sprg6; 428 regs->sprg6 = vcpu->arch.shared->sprg6;
429 regs->sprg7 = vcpu->arch.sprg7; 429 regs->sprg7 = vcpu->arch.shared->sprg7;
430 430
431 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 431 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
432 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 432 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -450,10 +450,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
450 vcpu->arch.shared->sprg1 = regs->sprg1; 450 vcpu->arch.shared->sprg1 = regs->sprg1;
451 vcpu->arch.shared->sprg2 = regs->sprg2; 451 vcpu->arch.shared->sprg2 = regs->sprg2;
452 vcpu->arch.shared->sprg3 = regs->sprg3; 452 vcpu->arch.shared->sprg3 = regs->sprg3;
453 vcpu->arch.sprg4 = regs->sprg4; 453 vcpu->arch.shared->sprg4 = regs->sprg4;
454 vcpu->arch.sprg5 = regs->sprg5; 454 vcpu->arch.shared->sprg5 = regs->sprg5;
455 vcpu->arch.sprg6 = regs->sprg6; 455 vcpu->arch.shared->sprg6 = regs->sprg6;
456 vcpu->arch.sprg7 = regs->sprg7; 456 vcpu->arch.shared->sprg7 = regs->sprg7;
457 457
458 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 458 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
459 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 459 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8dfc59a8a715..50803dd0b8f2 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -270,7 +270,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
270 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; 270 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
271 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 271 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
272 if (update_esr == true) 272 if (update_esr == true)
273 vcpu->arch.esr = vcpu->arch.queued_esr; 273 vcpu->arch.shared->esr = vcpu->arch.queued_esr;
274 if (update_dear == true) 274 if (update_dear == true)
275 vcpu->arch.shared->dar = vcpu->arch.queued_dear; 275 vcpu->arch.shared->dar = vcpu->arch.queued_dear;
276 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); 276 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
@@ -644,6 +644,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
644 vcpu->arch.pc = 0; 644 vcpu->arch.pc = 0;
645 vcpu->arch.shared->msr = 0; 645 vcpu->arch.shared->msr = 0;
646 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; 646 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
647 vcpu->arch.shared->pir = vcpu->vcpu_id;
647 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 648 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
648 649
649 vcpu->arch.shadow_pid = 1; 650 vcpu->arch.shadow_pid = 1;
@@ -678,10 +679,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
678 regs->sprg1 = vcpu->arch.shared->sprg1; 679 regs->sprg1 = vcpu->arch.shared->sprg1;
679 regs->sprg2 = vcpu->arch.shared->sprg2; 680 regs->sprg2 = vcpu->arch.shared->sprg2;
680 regs->sprg3 = vcpu->arch.shared->sprg3; 681 regs->sprg3 = vcpu->arch.shared->sprg3;
681 regs->sprg4 = vcpu->arch.sprg4; 682 regs->sprg4 = vcpu->arch.shared->sprg4;
682 regs->sprg5 = vcpu->arch.sprg5; 683 regs->sprg5 = vcpu->arch.shared->sprg5;
683 regs->sprg6 = vcpu->arch.sprg6; 684 regs->sprg6 = vcpu->arch.shared->sprg6;
684 regs->sprg7 = vcpu->arch.sprg7; 685 regs->sprg7 = vcpu->arch.shared->sprg7;
685 686
686 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 687 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
687 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 688 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -706,10 +707,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
706 vcpu->arch.shared->sprg1 = regs->sprg1; 707 vcpu->arch.shared->sprg1 = regs->sprg1;
707 vcpu->arch.shared->sprg2 = regs->sprg2; 708 vcpu->arch.shared->sprg2 = regs->sprg2;
708 vcpu->arch.shared->sprg3 = regs->sprg3; 709 vcpu->arch.shared->sprg3 = regs->sprg3;
709 vcpu->arch.sprg4 = regs->sprg4; 710 vcpu->arch.shared->sprg4 = regs->sprg4;
710 vcpu->arch.sprg5 = regs->sprg5; 711 vcpu->arch.shared->sprg5 = regs->sprg5;
711 vcpu->arch.sprg6 = regs->sprg6; 712 vcpu->arch.shared->sprg6 = regs->sprg6;
712 vcpu->arch.sprg7 = regs->sprg7; 713 vcpu->arch.shared->sprg7 = regs->sprg7;
713 714
714 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 715 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
715 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 716 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -727,7 +728,7 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
727 sregs->u.e.csrr0 = vcpu->arch.csrr0; 728 sregs->u.e.csrr0 = vcpu->arch.csrr0;
728 sregs->u.e.csrr1 = vcpu->arch.csrr1; 729 sregs->u.e.csrr1 = vcpu->arch.csrr1;
729 sregs->u.e.mcsr = vcpu->arch.mcsr; 730 sregs->u.e.mcsr = vcpu->arch.mcsr;
730 sregs->u.e.esr = vcpu->arch.esr; 731 sregs->u.e.esr = vcpu->arch.shared->esr;
731 sregs->u.e.dear = vcpu->arch.shared->dar; 732 sregs->u.e.dear = vcpu->arch.shared->dar;
732 sregs->u.e.tsr = vcpu->arch.tsr; 733 sregs->u.e.tsr = vcpu->arch.tsr;
733 sregs->u.e.tcr = vcpu->arch.tcr; 734 sregs->u.e.tcr = vcpu->arch.tcr;
@@ -745,7 +746,7 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
745 vcpu->arch.csrr0 = sregs->u.e.csrr0; 746 vcpu->arch.csrr0 = sregs->u.e.csrr0;
746 vcpu->arch.csrr1 = sregs->u.e.csrr1; 747 vcpu->arch.csrr1 = sregs->u.e.csrr1;
747 vcpu->arch.mcsr = sregs->u.e.mcsr; 748 vcpu->arch.mcsr = sregs->u.e.mcsr;
748 vcpu->arch.esr = sregs->u.e.esr; 749 vcpu->arch.shared->esr = sregs->u.e.esr;
749 vcpu->arch.shared->dar = sregs->u.e.dear; 750 vcpu->arch.shared->dar = sregs->u.e.dear;
750 vcpu->arch.vrsave = sregs->u.e.vrsave; 751 vcpu->arch.vrsave = sregs->u.e.vrsave;
751 vcpu->arch.tcr = sregs->u.e.tcr; 752 vcpu->arch.tcr = sregs->u.e.tcr;
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 1260f5f24c0c..bae9288ac1e1 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -107,7 +107,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
107 case SPRN_DEAR: 107 case SPRN_DEAR:
108 vcpu->arch.shared->dar = spr_val; break; 108 vcpu->arch.shared->dar = spr_val; break;
109 case SPRN_ESR: 109 case SPRN_ESR:
110 vcpu->arch.esr = spr_val; break; 110 vcpu->arch.shared->esr = spr_val; break;
111 case SPRN_DBCR0: 111 case SPRN_DBCR0:
112 vcpu->arch.dbcr0 = spr_val; break; 112 vcpu->arch.dbcr0 = spr_val; break;
113 case SPRN_DBCR1: 113 case SPRN_DBCR1:
@@ -125,13 +125,13 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
125 * loaded into the real SPRGs when resuming the 125 * loaded into the real SPRGs when resuming the
126 * guest. */ 126 * guest. */
127 case SPRN_SPRG4: 127 case SPRN_SPRG4:
128 vcpu->arch.sprg4 = spr_val; break; 128 vcpu->arch.shared->sprg4 = spr_val; break;
129 case SPRN_SPRG5: 129 case SPRN_SPRG5:
130 vcpu->arch.sprg5 = spr_val; break; 130 vcpu->arch.shared->sprg5 = spr_val; break;
131 case SPRN_SPRG6: 131 case SPRN_SPRG6:
132 vcpu->arch.sprg6 = spr_val; break; 132 vcpu->arch.shared->sprg6 = spr_val; break;
133 case SPRN_SPRG7: 133 case SPRN_SPRG7:
134 vcpu->arch.sprg7 = spr_val; break; 134 vcpu->arch.shared->sprg7 = spr_val; break;
135 135
136 case SPRN_IVPR: 136 case SPRN_IVPR:
137 vcpu->arch.ivpr = spr_val; 137 vcpu->arch.ivpr = spr_val;
@@ -202,7 +202,7 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
202 case SPRN_DEAR: 202 case SPRN_DEAR:
203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; 203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
204 case SPRN_ESR: 204 case SPRN_ESR:
205 kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; 205 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
206 case SPRN_DBCR0: 206 case SPRN_DBCR0:
207 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; 207 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
208 case SPRN_DBCR1: 208 case SPRN_DBCR1:
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 42f2fb1f66e9..10d8ef602e5c 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -402,19 +402,25 @@ lightweight_exit:
402 /* Save vcpu pointer for the exception handlers. */ 402 /* Save vcpu pointer for the exception handlers. */
403 mtspr SPRN_SPRG_WVCPU, r4 403 mtspr SPRN_SPRG_WVCPU, r4
404 404
405 lwz r5, VCPU_SHARED(r4)
406
405 /* Can't switch the stack pointer until after IVPR is switched, 407 /* Can't switch the stack pointer until after IVPR is switched,
406 * because host interrupt handlers would get confused. */ 408 * because host interrupt handlers would get confused. */
407 lwz r1, VCPU_GPR(r1)(r4) 409 lwz r1, VCPU_GPR(r1)(r4)
408 410
409 /* Host interrupt handlers may have clobbered these guest-readable 411 /*
410 * SPRGs, so we need to reload them here with the guest's values. */ 412 * Host interrupt handlers may have clobbered these
411 lwz r3, VCPU_SPRG4(r4) 413 * guest-readable SPRGs, or the guest kernel may have
414 * written directly to the shared area, so we
415 * need to reload them here with the guest's values.
416 */
417 lwz r3, VCPU_SHARED_SPRG4(r5)
412 mtspr SPRN_SPRG4W, r3 418 mtspr SPRN_SPRG4W, r3
413 lwz r3, VCPU_SPRG5(r4) 419 lwz r3, VCPU_SHARED_SPRG5(r5)
414 mtspr SPRN_SPRG5W, r3 420 mtspr SPRN_SPRG5W, r3
415 lwz r3, VCPU_SPRG6(r4) 421 lwz r3, VCPU_SHARED_SPRG6(r5)
416 mtspr SPRN_SPRG6W, r3 422 mtspr SPRN_SPRG6W, r3
417 lwz r3, VCPU_SPRG7(r4) 423 lwz r3, VCPU_SHARED_SPRG7(r5)
418 mtspr SPRN_SPRG7W, r3 424 mtspr SPRN_SPRG7W, r3
419 425
420#ifdef CONFIG_KVM_EXIT_TIMING 426#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ac3c4bf21677..709d82f956e3 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -115,12 +115,12 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
115 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; 115 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
116 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; 116 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
117 117
118 sregs->u.e.mas0 = vcpu_e500->mas0; 118 sregs->u.e.mas0 = vcpu->arch.shared->mas0;
119 sregs->u.e.mas1 = vcpu_e500->mas1; 119 sregs->u.e.mas1 = vcpu->arch.shared->mas1;
120 sregs->u.e.mas2 = vcpu_e500->mas2; 120 sregs->u.e.mas2 = vcpu->arch.shared->mas2;
121 sregs->u.e.mas7_3 = vcpu_e500->mas7_3; 121 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
122 sregs->u.e.mas4 = vcpu_e500->mas4; 122 sregs->u.e.mas4 = vcpu->arch.shared->mas4;
123 sregs->u.e.mas6 = vcpu_e500->mas6; 123 sregs->u.e.mas6 = vcpu->arch.shared->mas6;
124 124
125 sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG); 125 sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
126 sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg; 126 sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
@@ -148,12 +148,12 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
148 } 148 }
149 149
150 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { 150 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
151 vcpu_e500->mas0 = sregs->u.e.mas0; 151 vcpu->arch.shared->mas0 = sregs->u.e.mas0;
152 vcpu_e500->mas1 = sregs->u.e.mas1; 152 vcpu->arch.shared->mas1 = sregs->u.e.mas1;
153 vcpu_e500->mas2 = sregs->u.e.mas2; 153 vcpu->arch.shared->mas2 = sregs->u.e.mas2;
154 vcpu_e500->mas7_3 = sregs->u.e.mas7_3; 154 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
155 vcpu_e500->mas4 = sregs->u.e.mas4; 155 vcpu->arch.shared->mas4 = sregs->u.e.mas4;
156 vcpu_e500->mas6 = sregs->u.e.mas6; 156 vcpu->arch.shared->mas6 = sregs->u.e.mas6;
157 } 157 }
158 158
159 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) 159 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index e0d36099c756..6d0b2bd54fb0 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -89,22 +89,22 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
89 return EMULATE_FAIL; 89 return EMULATE_FAIL;
90 vcpu_e500->pid[2] = spr_val; break; 90 vcpu_e500->pid[2] = spr_val; break;
91 case SPRN_MAS0: 91 case SPRN_MAS0:
92 vcpu_e500->mas0 = spr_val; break; 92 vcpu->arch.shared->mas0 = spr_val; break;
93 case SPRN_MAS1: 93 case SPRN_MAS1:
94 vcpu_e500->mas1 = spr_val; break; 94 vcpu->arch.shared->mas1 = spr_val; break;
95 case SPRN_MAS2: 95 case SPRN_MAS2:
96 vcpu_e500->mas2 = spr_val; break; 96 vcpu->arch.shared->mas2 = spr_val; break;
97 case SPRN_MAS3: 97 case SPRN_MAS3:
98 vcpu_e500->mas7_3 &= ~(u64)0xffffffff; 98 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
99 vcpu_e500->mas7_3 |= spr_val; 99 vcpu->arch.shared->mas7_3 |= spr_val;
100 break; 100 break;
101 case SPRN_MAS4: 101 case SPRN_MAS4:
102 vcpu_e500->mas4 = spr_val; break; 102 vcpu->arch.shared->mas4 = spr_val; break;
103 case SPRN_MAS6: 103 case SPRN_MAS6:
104 vcpu_e500->mas6 = spr_val; break; 104 vcpu->arch.shared->mas6 = spr_val; break;
105 case SPRN_MAS7: 105 case SPRN_MAS7:
106 vcpu_e500->mas7_3 &= (u64)0xffffffff; 106 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
107 vcpu_e500->mas7_3 |= (u64)spr_val << 32; 107 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
108 break; 108 break;
109 case SPRN_L1CSR0: 109 case SPRN_L1CSR0:
110 vcpu_e500->l1csr0 = spr_val; 110 vcpu_e500->l1csr0 = spr_val;
@@ -147,6 +147,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
147{ 147{
148 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 148 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
149 int emulated = EMULATE_DONE; 149 int emulated = EMULATE_DONE;
150 unsigned long val;
150 151
151 switch (sprn) { 152 switch (sprn) {
152 case SPRN_PID: 153 case SPRN_PID:
@@ -156,20 +157,23 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
156 case SPRN_PID2: 157 case SPRN_PID2:
157 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; 158 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
158 case SPRN_MAS0: 159 case SPRN_MAS0:
159 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break; 160 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
160 case SPRN_MAS1: 161 case SPRN_MAS1:
161 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break; 162 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
162 case SPRN_MAS2: 163 case SPRN_MAS2:
163 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break; 164 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
164 case SPRN_MAS3: 165 case SPRN_MAS3:
165 kvmppc_set_gpr(vcpu, rt, (u32)vcpu_e500->mas7_3); break; 166 val = (u32)vcpu->arch.shared->mas7_3;
167 kvmppc_set_gpr(vcpu, rt, val);
168 break;
166 case SPRN_MAS4: 169 case SPRN_MAS4:
167 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break; 170 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
168 case SPRN_MAS6: 171 case SPRN_MAS6:
169 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break; 172 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
170 case SPRN_MAS7: 173 case SPRN_MAS7:
171 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7_3 >> 32); break; 174 val = vcpu->arch.shared->mas7_3 >> 32;
172 175 kvmppc_set_gpr(vcpu, rt, val);
176 break;
173 case SPRN_TLB0CFG: 177 case SPRN_TLB0CFG:
174 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; 178 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
175 case SPRN_TLB1CFG: 179 case SPRN_TLB1CFG:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 6fefb9144f23..9cd124a11acd 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -428,13 +428,14 @@ static int htlb0_set_base(gva_t addr)
428 host_tlb_params[0].ways); 428 host_tlb_params[0].ways);
429} 429}
430 430
431static unsigned int get_tlb_esel(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel) 431static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
432{ 432{
433 unsigned int esel = get_tlb_esel_bit(vcpu_e500); 433 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
434 int esel = get_tlb_esel_bit(vcpu);
434 435
435 if (tlbsel == 0) { 436 if (tlbsel == 0) {
436 esel &= vcpu_e500->gtlb_params[0].ways - 1; 437 esel &= vcpu_e500->gtlb_params[0].ways - 1;
437 esel += gtlb0_set_base(vcpu_e500, vcpu_e500->mas2); 438 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
438 } else { 439 } else {
439 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; 440 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
440 } 441 }
@@ -545,20 +546,20 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
545 int tlbsel; 546 int tlbsel;
546 547
547 /* since we only have two TLBs, only lower bit is used. */ 548 /* since we only have two TLBs, only lower bit is used. */
548 tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; 549 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
549 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; 550 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
550 pidsel = (vcpu_e500->mas4 >> 16) & 0xf; 551 pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
551 tsized = (vcpu_e500->mas4 >> 7) & 0x1f; 552 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
552 553
553 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) 554 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
554 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 555 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
555 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) 556 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
556 | MAS1_TID(vcpu_e500->pid[pidsel]) 557 | MAS1_TID(vcpu_e500->pid[pidsel])
557 | MAS1_TSIZE(tsized); 558 | MAS1_TSIZE(tsized);
558 vcpu_e500->mas2 = (eaddr & MAS2_EPN) 559 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
559 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK); 560 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
560 vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; 561 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
561 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1) 562 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
562 | (get_cur_pid(vcpu) << 16) 563 | (get_cur_pid(vcpu) << 16)
563 | (as ? MAS6_SAS : 0); 564 | (as ? MAS6_SAS : 0);
564} 565}
@@ -844,15 +845,15 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
844 int tlbsel, esel; 845 int tlbsel, esel;
845 struct kvm_book3e_206_tlb_entry *gtlbe; 846 struct kvm_book3e_206_tlb_entry *gtlbe;
846 847
847 tlbsel = get_tlb_tlbsel(vcpu_e500); 848 tlbsel = get_tlb_tlbsel(vcpu);
848 esel = get_tlb_esel(vcpu_e500, tlbsel); 849 esel = get_tlb_esel(vcpu, tlbsel);
849 850
850 gtlbe = get_entry(vcpu_e500, tlbsel, esel); 851 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
851 vcpu_e500->mas0 &= ~MAS0_NV(~0); 852 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
852 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 853 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
853 vcpu_e500->mas1 = gtlbe->mas1; 854 vcpu->arch.shared->mas1 = gtlbe->mas1;
854 vcpu_e500->mas2 = gtlbe->mas2; 855 vcpu->arch.shared->mas2 = gtlbe->mas2;
855 vcpu_e500->mas7_3 = gtlbe->mas7_3; 856 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
856 857
857 return EMULATE_DONE; 858 return EMULATE_DONE;
858} 859}
@@ -860,8 +861,8 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
860int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) 861int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
861{ 862{
862 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 863 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
863 int as = !!get_cur_sas(vcpu_e500); 864 int as = !!get_cur_sas(vcpu);
864 unsigned int pid = get_cur_spid(vcpu_e500); 865 unsigned int pid = get_cur_spid(vcpu);
865 int esel, tlbsel; 866 int esel, tlbsel;
866 struct kvm_book3e_206_tlb_entry *gtlbe = NULL; 867 struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
867 gva_t ea; 868 gva_t ea;
@@ -879,26 +880,30 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
879 if (gtlbe) { 880 if (gtlbe) {
880 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; 881 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
881 882
882 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) 883 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
883 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 884 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
884 vcpu_e500->mas1 = gtlbe->mas1; 885 vcpu->arch.shared->mas1 = gtlbe->mas1;
885 vcpu_e500->mas2 = gtlbe->mas2; 886 vcpu->arch.shared->mas2 = gtlbe->mas2;
886 vcpu_e500->mas7_3 = gtlbe->mas7_3; 887 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
887 } else { 888 } else {
888 int victim; 889 int victim;
889 890
890 /* since we only have two TLBs, only lower bit is used. */ 891 /* since we only have two TLBs, only lower bit is used. */
891 tlbsel = vcpu_e500->mas4 >> 28 & 0x1; 892 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
892 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; 893 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
893 894
894 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) 895 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
896 | MAS0_ESEL(victim)
895 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 897 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
896 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0) 898 vcpu->arch.shared->mas1 =
897 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0)) 899 (vcpu->arch.shared->mas6 & MAS6_SPID0)
898 | (vcpu_e500->mas4 & MAS4_TSIZED(~0)); 900 | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
899 vcpu_e500->mas2 &= MAS2_EPN; 901 | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
900 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK; 902 vcpu->arch.shared->mas2 &= MAS2_EPN;
901 vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; 903 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
904 MAS2_ATTRIB_MASK;
905 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
906 MAS3_U2 | MAS3_U3;
902 } 907 }
903 908
904 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); 909 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
@@ -929,19 +934,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
929 struct kvm_book3e_206_tlb_entry *gtlbe; 934 struct kvm_book3e_206_tlb_entry *gtlbe;
930 int tlbsel, esel; 935 int tlbsel, esel;
931 936
932 tlbsel = get_tlb_tlbsel(vcpu_e500); 937 tlbsel = get_tlb_tlbsel(vcpu);
933 esel = get_tlb_esel(vcpu_e500, tlbsel); 938 esel = get_tlb_esel(vcpu, tlbsel);
934 939
935 gtlbe = get_entry(vcpu_e500, tlbsel, esel); 940 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
936 941
937 if (get_tlb_v(gtlbe)) 942 if (get_tlb_v(gtlbe))
938 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); 943 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
939 944
940 gtlbe->mas1 = vcpu_e500->mas1; 945 gtlbe->mas1 = vcpu->arch.shared->mas1;
941 gtlbe->mas2 = vcpu_e500->mas2; 946 gtlbe->mas2 = vcpu->arch.shared->mas2;
942 gtlbe->mas7_3 = vcpu_e500->mas7_3; 947 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
943 948
944 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2, 949 trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2,
945 (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32)); 950 (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
946 951
947 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 952 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 2c296407e759..5c6d2d7bf058 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -121,38 +121,33 @@ static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
121 return !!(vcpu->arch.shared->msr & MSR_PR); 121 return !!(vcpu->arch.shared->msr & MSR_PR);
122} 122}
123 123
124static inline unsigned int get_cur_spid( 124static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
125 const struct kvmppc_vcpu_e500 *vcpu_e500)
126{ 125{
127 return (vcpu_e500->mas6 >> 16) & 0xff; 126 return (vcpu->arch.shared->mas6 >> 16) & 0xff;
128} 127}
129 128
130static inline unsigned int get_cur_sas( 129static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
131 const struct kvmppc_vcpu_e500 *vcpu_e500)
132{ 130{
133 return vcpu_e500->mas6 & 0x1; 131 return vcpu->arch.shared->mas6 & 0x1;
134} 132}
135 133
136static inline unsigned int get_tlb_tlbsel( 134static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
137 const struct kvmppc_vcpu_e500 *vcpu_e500)
138{ 135{
139 /* 136 /*
140 * Manual says that tlbsel has 2 bits wide. 137 * Manual says that tlbsel has 2 bits wide.
141 * Since we only have two TLBs, only lower bit is used. 138 * Since we only have two TLBs, only lower bit is used.
142 */ 139 */
143 return (vcpu_e500->mas0 >> 28) & 0x1; 140 return (vcpu->arch.shared->mas0 >> 28) & 0x1;
144} 141}
145 142
146static inline unsigned int get_tlb_nv_bit( 143static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
147 const struct kvmppc_vcpu_e500 *vcpu_e500)
148{ 144{
149 return vcpu_e500->mas0 & 0xfff; 145 return vcpu->arch.shared->mas0 & 0xfff;
150} 146}
151 147
152static inline unsigned int get_tlb_esel_bit( 148static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
153 const struct kvmppc_vcpu_e500 *vcpu_e500)
154{ 149{
155 return (vcpu_e500->mas0 >> 16) & 0xfff; 150 return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
156} 151}
157 152
158static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, 153static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index b6df56dd93ba..bda052e2264b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -162,7 +162,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
162 case OP_TRAP_64: 162 case OP_TRAP_64:
163 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 163 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
164#else 164#else
165 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); 165 kvmppc_core_queue_program(vcpu,
166 vcpu->arch.shared->esr | ESR_PTR);
166#endif 167#endif
167 advance = 0; 168 advance = 0;
168 break; 169 break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 7411bdd8ff6f..d02e4c84e213 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -66,7 +66,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
66 vcpu->arch.magic_page_pa = param1; 66 vcpu->arch.magic_page_pa = param1;
67 vcpu->arch.magic_page_ea = param2; 67 vcpu->arch.magic_page_ea = param2;
68 68
69 r2 = KVM_MAGIC_FEAT_SR; 69 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
70 70
71 r = HC_EV_SUCCESS; 71 r = HC_EV_SUCCESS;
72 break; 72 break;