aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2011-11-08 19:23:30 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:26 -0500
commitb59049720dd95021dfe0d9f4e1fa9458a67cfe29 (patch)
tree3b54577e12ba4a84bec409518c6c8f399ebba6e0 /arch/powerpc/kernel
parent940b45ec18cf00046b8b28299d97066a2c43d559 (diff)
KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
This allows additional registers to be accessed by the guest in PR-mode KVM without trapping. SPRG4-7 are readable from userspace. On booke, KVM will sync these registers when it enters the guest, so that accesses from guest userspace will work. The guest kernel, OTOH, must consistently use either the real registers or the shared area between exits. This also applies to the already-paravirted SPRG3. On non-booke, it's not clear to what extent SPRG4-7 are supported (they're not architected for book3s, but exist on at least some classic chips). They are copied in the get/set regs ioctls, but I do not see any non-booke emulation. I also do not see any syncing with real registers (in PR-mode) including the user-readable SPRG3. This patch should not make that situation any worse. Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c15
-rw-r--r--arch/powerpc/kernel/kvm.c204
2 files changed, 182 insertions, 37 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 04caee7d9bc1..e7bfcf81b746 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -426,16 +426,23 @@ int main(void)
426 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); 426 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
427 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); 427 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
428#endif 428#endif
429 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 429 DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
430 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 430 DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
431 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 431 DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
432 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 432 DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7));
433 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 433 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
434 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); 434 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
435 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 435 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
436 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); 436 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
437 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 437 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
438 438
439 DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
440 DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
441 DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
442 DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
443 DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
444 DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
445
439 /* book3s */ 446 /* book3s */
440#ifdef CONFIG_KVM_BOOK3S_64_HV 447#ifdef CONFIG_KVM_BOOK3S_64_HV
441 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); 448 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 06b15ee997f7..04d4b5aa6dca 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -49,23 +49,17 @@
49#define KVM_RT_30 0x03c00000 49#define KVM_RT_30 0x03c00000
50#define KVM_MASK_RB 0x0000f800 50#define KVM_MASK_RB 0x0000f800
51#define KVM_INST_MFMSR 0x7c0000a6 51#define KVM_INST_MFMSR 0x7c0000a6
52#define KVM_INST_MFSPR_SPRG0 0x7c1042a6 52
53#define KVM_INST_MFSPR_SPRG1 0x7c1142a6 53#define SPR_FROM 0
54#define KVM_INST_MFSPR_SPRG2 0x7c1242a6 54#define SPR_TO 0x100
55#define KVM_INST_MFSPR_SPRG3 0x7c1342a6 55
56#define KVM_INST_MFSPR_SRR0 0x7c1a02a6 56#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
57#define KVM_INST_MFSPR_SRR1 0x7c1b02a6 57 (((sprn) & 0x1f) << 16) | \
58#define KVM_INST_MFSPR_DAR 0x7c1302a6 58 (((sprn) & 0x3e0) << 6) | \
59#define KVM_INST_MFSPR_DSISR 0x7c1202a6 59 (moveto))
60 60
61#define KVM_INST_MTSPR_SPRG0 0x7c1043a6 61#define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
62#define KVM_INST_MTSPR_SPRG1 0x7c1143a6 62#define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
63#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
64#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
65#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
66#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
67#define KVM_INST_MTSPR_DAR 0x7c1303a6
68#define KVM_INST_MTSPR_DSISR 0x7c1203a6
69 63
70#define KVM_INST_TLBSYNC 0x7c00046c 64#define KVM_INST_TLBSYNC 0x7c00046c
71#define KVM_INST_MTMSRD_L0 0x7c000164 65#define KVM_INST_MTMSRD_L0 0x7c000164
@@ -440,56 +434,191 @@ static void kvm_check_ins(u32 *inst, u32 features)
440 case KVM_INST_MFMSR: 434 case KVM_INST_MFMSR:
441 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); 435 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
442 break; 436 break;
443 case KVM_INST_MFSPR_SPRG0: 437 case KVM_INST_MFSPR(SPRN_SPRG0):
444 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); 438 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
445 break; 439 break;
446 case KVM_INST_MFSPR_SPRG1: 440 case KVM_INST_MFSPR(SPRN_SPRG1):
447 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); 441 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
448 break; 442 break;
449 case KVM_INST_MFSPR_SPRG2: 443 case KVM_INST_MFSPR(SPRN_SPRG2):
450 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); 444 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
451 break; 445 break;
452 case KVM_INST_MFSPR_SPRG3: 446 case KVM_INST_MFSPR(SPRN_SPRG3):
453 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); 447 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
454 break; 448 break;
455 case KVM_INST_MFSPR_SRR0: 449 case KVM_INST_MFSPR(SPRN_SRR0):
456 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); 450 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
457 break; 451 break;
458 case KVM_INST_MFSPR_SRR1: 452 case KVM_INST_MFSPR(SPRN_SRR1):
459 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); 453 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
460 break; 454 break;
461 case KVM_INST_MFSPR_DAR: 455#ifdef CONFIG_BOOKE
456 case KVM_INST_MFSPR(SPRN_DEAR):
457#else
458 case KVM_INST_MFSPR(SPRN_DAR):
459#endif
462 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); 460 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
463 break; 461 break;
464 case KVM_INST_MFSPR_DSISR: 462 case KVM_INST_MFSPR(SPRN_DSISR):
465 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); 463 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
466 break; 464 break;
467 465
466#ifdef CONFIG_PPC_BOOK3E_MMU
467 case KVM_INST_MFSPR(SPRN_MAS0):
468 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
470 break;
471 case KVM_INST_MFSPR(SPRN_MAS1):
472 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
474 break;
475 case KVM_INST_MFSPR(SPRN_MAS2):
476 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
478 break;
479 case KVM_INST_MFSPR(SPRN_MAS3):
480 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
482 break;
483 case KVM_INST_MFSPR(SPRN_MAS4):
484 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
486 break;
487 case KVM_INST_MFSPR(SPRN_MAS6):
488 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
489 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
490 break;
491 case KVM_INST_MFSPR(SPRN_MAS7):
492 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
494 break;
495#endif /* CONFIG_PPC_BOOK3E_MMU */
496
497 case KVM_INST_MFSPR(SPRN_SPRG4):
498#ifdef CONFIG_BOOKE
499 case KVM_INST_MFSPR(SPRN_SPRG4R):
500#endif
501 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
502 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
503 break;
504 case KVM_INST_MFSPR(SPRN_SPRG5):
505#ifdef CONFIG_BOOKE
506 case KVM_INST_MFSPR(SPRN_SPRG5R):
507#endif
508 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
509 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
510 break;
511 case KVM_INST_MFSPR(SPRN_SPRG6):
512#ifdef CONFIG_BOOKE
513 case KVM_INST_MFSPR(SPRN_SPRG6R):
514#endif
515 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
516 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
517 break;
518 case KVM_INST_MFSPR(SPRN_SPRG7):
519#ifdef CONFIG_BOOKE
520 case KVM_INST_MFSPR(SPRN_SPRG7R):
521#endif
522 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
523 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
524 break;
525
526#ifdef CONFIG_BOOKE
527 case KVM_INST_MFSPR(SPRN_ESR):
528 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
529 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
530 break;
531#endif
532
533 case KVM_INST_MFSPR(SPRN_PIR):
534 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
535 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
536 break;
537
538
468 /* Stores */ 539 /* Stores */
469 case KVM_INST_MTSPR_SPRG0: 540 case KVM_INST_MTSPR(SPRN_SPRG0):
470 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); 541 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
471 break; 542 break;
472 case KVM_INST_MTSPR_SPRG1: 543 case KVM_INST_MTSPR(SPRN_SPRG1):
473 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); 544 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
474 break; 545 break;
475 case KVM_INST_MTSPR_SPRG2: 546 case KVM_INST_MTSPR(SPRN_SPRG2):
476 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); 547 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
477 break; 548 break;
478 case KVM_INST_MTSPR_SPRG3: 549 case KVM_INST_MTSPR(SPRN_SPRG3):
479 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); 550 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
480 break; 551 break;
481 case KVM_INST_MTSPR_SRR0: 552 case KVM_INST_MTSPR(SPRN_SRR0):
482 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); 553 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
483 break; 554 break;
484 case KVM_INST_MTSPR_SRR1: 555 case KVM_INST_MTSPR(SPRN_SRR1):
485 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); 556 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
486 break; 557 break;
487 case KVM_INST_MTSPR_DAR: 558#ifdef CONFIG_BOOKE
559 case KVM_INST_MTSPR(SPRN_DEAR):
560#else
561 case KVM_INST_MTSPR(SPRN_DAR):
562#endif
488 kvm_patch_ins_std(inst, magic_var(dar), inst_rt); 563 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
489 break; 564 break;
490 case KVM_INST_MTSPR_DSISR: 565 case KVM_INST_MTSPR(SPRN_DSISR):
491 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); 566 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
492 break; 567 break;
568#ifdef CONFIG_PPC_BOOK3E_MMU
569 case KVM_INST_MTSPR(SPRN_MAS0):
570 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
572 break;
573 case KVM_INST_MTSPR(SPRN_MAS1):
574 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
576 break;
577 case KVM_INST_MTSPR(SPRN_MAS2):
578 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
580 break;
581 case KVM_INST_MTSPR(SPRN_MAS3):
582 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
584 break;
585 case KVM_INST_MTSPR(SPRN_MAS4):
586 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
588 break;
589 case KVM_INST_MTSPR(SPRN_MAS6):
590 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
591 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
592 break;
593 case KVM_INST_MTSPR(SPRN_MAS7):
594 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
595 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
596 break;
597#endif /* CONFIG_PPC_BOOK3E_MMU */
598
599 case KVM_INST_MTSPR(SPRN_SPRG4):
600 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
602 break;
603 case KVM_INST_MTSPR(SPRN_SPRG5):
604 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
606 break;
607 case KVM_INST_MTSPR(SPRN_SPRG6):
608 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
609 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
610 break;
611 case KVM_INST_MTSPR(SPRN_SPRG7):
612 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
613 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
614 break;
615
616#ifdef CONFIG_BOOKE
617 case KVM_INST_MTSPR(SPRN_ESR):
618 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
619 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
620 break;
621#endif
493 622
494 /* Nops */ 623 /* Nops */
495 case KVM_INST_TLBSYNC: 624 case KVM_INST_TLBSYNC:
@@ -556,9 +685,18 @@ static void kvm_use_magic_page(void)
556 start = (void*)_stext; 685 start = (void*)_stext;
557 end = (void*)_etext; 686 end = (void*)_etext;
558 687
688 /*
689 * Being interrupted in the middle of patching would
690 * be bad for SPRG4-7, which KVM can't keep in sync
691 * with emulated accesses because reads don't trap.
692 */
693 local_irq_disable();
694
559 for (p = start; p < end; p++) 695 for (p = start; p < end; p++)
560 kvm_check_ins(p, features); 696 kvm_check_ins(p, features);
561 697
698 local_irq_enable();
699
562 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", 700 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
563 kvm_patching_worked ? "worked" : "failed"); 701 kvm_patching_worked ? "worked" : "failed");
564} 702}