aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-12-08 17:46:41 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:21:30 -0500
commit654229a02456a9af372defb13d1911345360074d (patch)
treef3adc76105a5a675e87d98b841fdbd21c11fe291
parent230c57244c2c4d945dba7f9d15845bffe4135b58 (diff)
KVM: MIPS/T&E: Move CP0 register access into T&E
Access to various CP0 registers via the KVM register access API needs to be implementation specific to allow restrictions to be made on changes, for example when VZ guest registers aren't present, so move them all into trap_emul.c in preparation for VZ. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/mips.c198
-rw-r--r--arch/mips/kvm/trap_emul.c181
4 files changed, 179 insertions, 203 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 718dfffa17d5..bc56a312497d 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -345,7 +345,6 @@ struct kvm_vcpu_arch {
345 345
346 u8 fpu_enabled; 346 u8 fpu_enabled;
347 u8 msa_enabled; 347 u8 msa_enabled;
348 u8 kscratch_enabled;
349}; 348};
350 349
351 350
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index b6cafb0a9df4..f2b054b80bca 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1066,7 +1066,7 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1066 unsigned int mask = MIPS_CONF_M; 1066 unsigned int mask = MIPS_CONF_M;
1067 1067
1068 /* KScrExist */ 1068 /* KScrExist */
1069 mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16; 1069 mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT;
1070 1070
1071 return mask; 1071 return mask;
1072} 1072}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 591426cda15e..9338aec08790 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -520,33 +520,6 @@ static u64 kvm_mips_get_one_regs[] = {
520 KVM_REG_MIPS_LO, 520 KVM_REG_MIPS_LO,
521#endif 521#endif
522 KVM_REG_MIPS_PC, 522 KVM_REG_MIPS_PC,
523
524 KVM_REG_MIPS_CP0_INDEX,
525 KVM_REG_MIPS_CP0_CONTEXT,
526 KVM_REG_MIPS_CP0_USERLOCAL,
527 KVM_REG_MIPS_CP0_PAGEMASK,
528 KVM_REG_MIPS_CP0_WIRED,
529 KVM_REG_MIPS_CP0_HWRENA,
530 KVM_REG_MIPS_CP0_BADVADDR,
531 KVM_REG_MIPS_CP0_COUNT,
532 KVM_REG_MIPS_CP0_ENTRYHI,
533 KVM_REG_MIPS_CP0_COMPARE,
534 KVM_REG_MIPS_CP0_STATUS,
535 KVM_REG_MIPS_CP0_CAUSE,
536 KVM_REG_MIPS_CP0_EPC,
537 KVM_REG_MIPS_CP0_PRID,
538 KVM_REG_MIPS_CP0_CONFIG,
539 KVM_REG_MIPS_CP0_CONFIG1,
540 KVM_REG_MIPS_CP0_CONFIG2,
541 KVM_REG_MIPS_CP0_CONFIG3,
542 KVM_REG_MIPS_CP0_CONFIG4,
543 KVM_REG_MIPS_CP0_CONFIG5,
544 KVM_REG_MIPS_CP0_CONFIG7,
545 KVM_REG_MIPS_CP0_ERROREPC,
546
547 KVM_REG_MIPS_COUNT_CTL,
548 KVM_REG_MIPS_COUNT_RESUME,
549 KVM_REG_MIPS_COUNT_HZ,
550}; 523};
551 524
552static u64 kvm_mips_get_one_regs_fpu[] = { 525static u64 kvm_mips_get_one_regs_fpu[] = {
@@ -559,15 +532,6 @@ static u64 kvm_mips_get_one_regs_msa[] = {
559 KVM_REG_MIPS_MSA_CSR, 532 KVM_REG_MIPS_MSA_CSR,
560}; 533};
561 534
562static u64 kvm_mips_get_one_regs_kscratch[] = {
563 KVM_REG_MIPS_CP0_KSCRATCH1,
564 KVM_REG_MIPS_CP0_KSCRATCH2,
565 KVM_REG_MIPS_CP0_KSCRATCH3,
566 KVM_REG_MIPS_CP0_KSCRATCH4,
567 KVM_REG_MIPS_CP0_KSCRATCH5,
568 KVM_REG_MIPS_CP0_KSCRATCH6,
569};
570
571static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) 535static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
572{ 536{
573 unsigned long ret; 537 unsigned long ret;
@@ -581,7 +545,6 @@ static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
581 } 545 }
582 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 546 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
583 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; 547 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
584 ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
585 ret += kvm_mips_callbacks->num_regs(vcpu); 548 ret += kvm_mips_callbacks->num_regs(vcpu);
586 549
587 return ret; 550 return ret;
@@ -634,16 +597,6 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
634 } 597 }
635 } 598 }
636 599
637 for (i = 0; i < 6; ++i) {
638 if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
639 continue;
640
641 if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
642 sizeof(kvm_mips_get_one_regs_kscratch[i])))
643 return -EFAULT;
644 ++indices;
645 }
646
647 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); 600 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
648} 601}
649 602
@@ -734,95 +687,6 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
734 v = fpu->msacsr; 687 v = fpu->msacsr;
735 break; 688 break;
736 689
737 /* Co-processor 0 registers */
738 case KVM_REG_MIPS_CP0_INDEX:
739 v = (long)kvm_read_c0_guest_index(cop0);
740 break;
741 case KVM_REG_MIPS_CP0_CONTEXT:
742 v = (long)kvm_read_c0_guest_context(cop0);
743 break;
744 case KVM_REG_MIPS_CP0_USERLOCAL:
745 v = (long)kvm_read_c0_guest_userlocal(cop0);
746 break;
747 case KVM_REG_MIPS_CP0_PAGEMASK:
748 v = (long)kvm_read_c0_guest_pagemask(cop0);
749 break;
750 case KVM_REG_MIPS_CP0_WIRED:
751 v = (long)kvm_read_c0_guest_wired(cop0);
752 break;
753 case KVM_REG_MIPS_CP0_HWRENA:
754 v = (long)kvm_read_c0_guest_hwrena(cop0);
755 break;
756 case KVM_REG_MIPS_CP0_BADVADDR:
757 v = (long)kvm_read_c0_guest_badvaddr(cop0);
758 break;
759 case KVM_REG_MIPS_CP0_ENTRYHI:
760 v = (long)kvm_read_c0_guest_entryhi(cop0);
761 break;
762 case KVM_REG_MIPS_CP0_COMPARE:
763 v = (long)kvm_read_c0_guest_compare(cop0);
764 break;
765 case KVM_REG_MIPS_CP0_STATUS:
766 v = (long)kvm_read_c0_guest_status(cop0);
767 break;
768 case KVM_REG_MIPS_CP0_CAUSE:
769 v = (long)kvm_read_c0_guest_cause(cop0);
770 break;
771 case KVM_REG_MIPS_CP0_EPC:
772 v = (long)kvm_read_c0_guest_epc(cop0);
773 break;
774 case KVM_REG_MIPS_CP0_PRID:
775 v = (long)kvm_read_c0_guest_prid(cop0);
776 break;
777 case KVM_REG_MIPS_CP0_CONFIG:
778 v = (long)kvm_read_c0_guest_config(cop0);
779 break;
780 case KVM_REG_MIPS_CP0_CONFIG1:
781 v = (long)kvm_read_c0_guest_config1(cop0);
782 break;
783 case KVM_REG_MIPS_CP0_CONFIG2:
784 v = (long)kvm_read_c0_guest_config2(cop0);
785 break;
786 case KVM_REG_MIPS_CP0_CONFIG3:
787 v = (long)kvm_read_c0_guest_config3(cop0);
788 break;
789 case KVM_REG_MIPS_CP0_CONFIG4:
790 v = (long)kvm_read_c0_guest_config4(cop0);
791 break;
792 case KVM_REG_MIPS_CP0_CONFIG5:
793 v = (long)kvm_read_c0_guest_config5(cop0);
794 break;
795 case KVM_REG_MIPS_CP0_CONFIG7:
796 v = (long)kvm_read_c0_guest_config7(cop0);
797 break;
798 case KVM_REG_MIPS_CP0_ERROREPC:
799 v = (long)kvm_read_c0_guest_errorepc(cop0);
800 break;
801 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
802 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
803 if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
804 return -EINVAL;
805 switch (idx) {
806 case 2:
807 v = (long)kvm_read_c0_guest_kscratch1(cop0);
808 break;
809 case 3:
810 v = (long)kvm_read_c0_guest_kscratch2(cop0);
811 break;
812 case 4:
813 v = (long)kvm_read_c0_guest_kscratch3(cop0);
814 break;
815 case 5:
816 v = (long)kvm_read_c0_guest_kscratch4(cop0);
817 break;
818 case 6:
819 v = (long)kvm_read_c0_guest_kscratch5(cop0);
820 break;
821 case 7:
822 v = (long)kvm_read_c0_guest_kscratch6(cop0);
823 break;
824 }
825 break;
826 /* registers to be handled specially */ 690 /* registers to be handled specially */
827 default: 691 default:
828 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); 692 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
@@ -954,68 +818,6 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
954 fpu->msacsr = v; 818 fpu->msacsr = v;
955 break; 819 break;
956 820
957 /* Co-processor 0 registers */
958 case KVM_REG_MIPS_CP0_INDEX:
959 kvm_write_c0_guest_index(cop0, v);
960 break;
961 case KVM_REG_MIPS_CP0_CONTEXT:
962 kvm_write_c0_guest_context(cop0, v);
963 break;
964 case KVM_REG_MIPS_CP0_USERLOCAL:
965 kvm_write_c0_guest_userlocal(cop0, v);
966 break;
967 case KVM_REG_MIPS_CP0_PAGEMASK:
968 kvm_write_c0_guest_pagemask(cop0, v);
969 break;
970 case KVM_REG_MIPS_CP0_WIRED:
971 kvm_write_c0_guest_wired(cop0, v);
972 break;
973 case KVM_REG_MIPS_CP0_HWRENA:
974 kvm_write_c0_guest_hwrena(cop0, v);
975 break;
976 case KVM_REG_MIPS_CP0_BADVADDR:
977 kvm_write_c0_guest_badvaddr(cop0, v);
978 break;
979 case KVM_REG_MIPS_CP0_ENTRYHI:
980 kvm_write_c0_guest_entryhi(cop0, v);
981 break;
982 case KVM_REG_MIPS_CP0_STATUS:
983 kvm_write_c0_guest_status(cop0, v);
984 break;
985 case KVM_REG_MIPS_CP0_EPC:
986 kvm_write_c0_guest_epc(cop0, v);
987 break;
988 case KVM_REG_MIPS_CP0_PRID:
989 kvm_write_c0_guest_prid(cop0, v);
990 break;
991 case KVM_REG_MIPS_CP0_ERROREPC:
992 kvm_write_c0_guest_errorepc(cop0, v);
993 break;
994 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
995 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
996 if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
997 return -EINVAL;
998 switch (idx) {
999 case 2:
1000 kvm_write_c0_guest_kscratch1(cop0, v);
1001 break;
1002 case 3:
1003 kvm_write_c0_guest_kscratch2(cop0, v);
1004 break;
1005 case 4:
1006 kvm_write_c0_guest_kscratch3(cop0, v);
1007 break;
1008 case 5:
1009 kvm_write_c0_guest_kscratch4(cop0, v);
1010 break;
1011 case 6:
1012 kvm_write_c0_guest_kscratch5(cop0, v);
1013 break;
1014 case 7:
1015 kvm_write_c0_guest_kscratch6(cop0, v);
1016 break;
1017 }
1018 break;
1019 /* registers to be handled specially */ 821 /* registers to be handled specially */
1020 default: 822 default:
1021 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); 823 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 001c5fb61049..08327de4323a 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -489,8 +489,6 @@ static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
489 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 489 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
490 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 490 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
491 491
492 vcpu->arch.kscratch_enabled = 0xfc;
493
494 /* 492 /*
495 * Allocate GVA -> HPA page tables. 493 * Allocate GVA -> HPA page tables.
496 * MIPS doesn't use the mm_struct pointer argument. 494 * MIPS doesn't use the mm_struct pointer argument.
@@ -640,14 +638,54 @@ static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
640 kvm_trap_emul_flush_shadow_all(kvm); 638 kvm_trap_emul_flush_shadow_all(kvm);
641} 639}
642 640
641static u64 kvm_trap_emul_get_one_regs[] = {
642 KVM_REG_MIPS_CP0_INDEX,
643 KVM_REG_MIPS_CP0_CONTEXT,
644 KVM_REG_MIPS_CP0_USERLOCAL,
645 KVM_REG_MIPS_CP0_PAGEMASK,
646 KVM_REG_MIPS_CP0_WIRED,
647 KVM_REG_MIPS_CP0_HWRENA,
648 KVM_REG_MIPS_CP0_BADVADDR,
649 KVM_REG_MIPS_CP0_COUNT,
650 KVM_REG_MIPS_CP0_ENTRYHI,
651 KVM_REG_MIPS_CP0_COMPARE,
652 KVM_REG_MIPS_CP0_STATUS,
653 KVM_REG_MIPS_CP0_CAUSE,
654 KVM_REG_MIPS_CP0_EPC,
655 KVM_REG_MIPS_CP0_PRID,
656 KVM_REG_MIPS_CP0_CONFIG,
657 KVM_REG_MIPS_CP0_CONFIG1,
658 KVM_REG_MIPS_CP0_CONFIG2,
659 KVM_REG_MIPS_CP0_CONFIG3,
660 KVM_REG_MIPS_CP0_CONFIG4,
661 KVM_REG_MIPS_CP0_CONFIG5,
662 KVM_REG_MIPS_CP0_CONFIG7,
663 KVM_REG_MIPS_CP0_ERROREPC,
664 KVM_REG_MIPS_CP0_KSCRATCH1,
665 KVM_REG_MIPS_CP0_KSCRATCH2,
666 KVM_REG_MIPS_CP0_KSCRATCH3,
667 KVM_REG_MIPS_CP0_KSCRATCH4,
668 KVM_REG_MIPS_CP0_KSCRATCH5,
669 KVM_REG_MIPS_CP0_KSCRATCH6,
670
671 KVM_REG_MIPS_COUNT_CTL,
672 KVM_REG_MIPS_COUNT_RESUME,
673 KVM_REG_MIPS_COUNT_HZ,
674};
675
643static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) 676static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
644{ 677{
645 return 0; 678 return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
646} 679}
647 680
648static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, 681static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
649 u64 __user *indices) 682 u64 __user *indices)
650{ 683{
684 if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
685 sizeof(kvm_trap_emul_get_one_regs)))
686 return -EFAULT;
687 indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
688
651 return 0; 689 return 0;
652} 690}
653 691
@@ -655,7 +693,69 @@ static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
655 const struct kvm_one_reg *reg, 693 const struct kvm_one_reg *reg,
656 s64 *v) 694 s64 *v)
657{ 695{
696 struct mips_coproc *cop0 = vcpu->arch.cop0;
697
658 switch (reg->id) { 698 switch (reg->id) {
699 case KVM_REG_MIPS_CP0_INDEX:
700 *v = (long)kvm_read_c0_guest_index(cop0);
701 break;
702 case KVM_REG_MIPS_CP0_CONTEXT:
703 *v = (long)kvm_read_c0_guest_context(cop0);
704 break;
705 case KVM_REG_MIPS_CP0_USERLOCAL:
706 *v = (long)kvm_read_c0_guest_userlocal(cop0);
707 break;
708 case KVM_REG_MIPS_CP0_PAGEMASK:
709 *v = (long)kvm_read_c0_guest_pagemask(cop0);
710 break;
711 case KVM_REG_MIPS_CP0_WIRED:
712 *v = (long)kvm_read_c0_guest_wired(cop0);
713 break;
714 case KVM_REG_MIPS_CP0_HWRENA:
715 *v = (long)kvm_read_c0_guest_hwrena(cop0);
716 break;
717 case KVM_REG_MIPS_CP0_BADVADDR:
718 *v = (long)kvm_read_c0_guest_badvaddr(cop0);
719 break;
720 case KVM_REG_MIPS_CP0_ENTRYHI:
721 *v = (long)kvm_read_c0_guest_entryhi(cop0);
722 break;
723 case KVM_REG_MIPS_CP0_COMPARE:
724 *v = (long)kvm_read_c0_guest_compare(cop0);
725 break;
726 case KVM_REG_MIPS_CP0_STATUS:
727 *v = (long)kvm_read_c0_guest_status(cop0);
728 break;
729 case KVM_REG_MIPS_CP0_CAUSE:
730 *v = (long)kvm_read_c0_guest_cause(cop0);
731 break;
732 case KVM_REG_MIPS_CP0_EPC:
733 *v = (long)kvm_read_c0_guest_epc(cop0);
734 break;
735 case KVM_REG_MIPS_CP0_PRID:
736 *v = (long)kvm_read_c0_guest_prid(cop0);
737 break;
738 case KVM_REG_MIPS_CP0_CONFIG:
739 *v = (long)kvm_read_c0_guest_config(cop0);
740 break;
741 case KVM_REG_MIPS_CP0_CONFIG1:
742 *v = (long)kvm_read_c0_guest_config1(cop0);
743 break;
744 case KVM_REG_MIPS_CP0_CONFIG2:
745 *v = (long)kvm_read_c0_guest_config2(cop0);
746 break;
747 case KVM_REG_MIPS_CP0_CONFIG3:
748 *v = (long)kvm_read_c0_guest_config3(cop0);
749 break;
750 case KVM_REG_MIPS_CP0_CONFIG4:
751 *v = (long)kvm_read_c0_guest_config4(cop0);
752 break;
753 case KVM_REG_MIPS_CP0_CONFIG5:
754 *v = (long)kvm_read_c0_guest_config5(cop0);
755 break;
756 case KVM_REG_MIPS_CP0_CONFIG7:
757 *v = (long)kvm_read_c0_guest_config7(cop0);
758 break;
659 case KVM_REG_MIPS_CP0_COUNT: 759 case KVM_REG_MIPS_CP0_COUNT:
660 *v = kvm_mips_read_count(vcpu); 760 *v = kvm_mips_read_count(vcpu);
661 break; 761 break;
@@ -668,6 +768,27 @@ static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
668 case KVM_REG_MIPS_COUNT_HZ: 768 case KVM_REG_MIPS_COUNT_HZ:
669 *v = vcpu->arch.count_hz; 769 *v = vcpu->arch.count_hz;
670 break; 770 break;
771 case KVM_REG_MIPS_CP0_ERROREPC:
772 *v = (long)kvm_read_c0_guest_errorepc(cop0);
773 break;
774 case KVM_REG_MIPS_CP0_KSCRATCH1:
775 *v = (long)kvm_read_c0_guest_kscratch1(cop0);
776 break;
777 case KVM_REG_MIPS_CP0_KSCRATCH2:
778 *v = (long)kvm_read_c0_guest_kscratch2(cop0);
779 break;
780 case KVM_REG_MIPS_CP0_KSCRATCH3:
781 *v = (long)kvm_read_c0_guest_kscratch3(cop0);
782 break;
783 case KVM_REG_MIPS_CP0_KSCRATCH4:
784 *v = (long)kvm_read_c0_guest_kscratch4(cop0);
785 break;
786 case KVM_REG_MIPS_CP0_KSCRATCH5:
787 *v = (long)kvm_read_c0_guest_kscratch5(cop0);
788 break;
789 case KVM_REG_MIPS_CP0_KSCRATCH6:
790 *v = (long)kvm_read_c0_guest_kscratch6(cop0);
791 break;
671 default: 792 default:
672 return -EINVAL; 793 return -EINVAL;
673 } 794 }
@@ -683,6 +804,39 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
683 unsigned int cur, change; 804 unsigned int cur, change;
684 805
685 switch (reg->id) { 806 switch (reg->id) {
807 case KVM_REG_MIPS_CP0_INDEX:
808 kvm_write_c0_guest_index(cop0, v);
809 break;
810 case KVM_REG_MIPS_CP0_CONTEXT:
811 kvm_write_c0_guest_context(cop0, v);
812 break;
813 case KVM_REG_MIPS_CP0_USERLOCAL:
814 kvm_write_c0_guest_userlocal(cop0, v);
815 break;
816 case KVM_REG_MIPS_CP0_PAGEMASK:
817 kvm_write_c0_guest_pagemask(cop0, v);
818 break;
819 case KVM_REG_MIPS_CP0_WIRED:
820 kvm_write_c0_guest_wired(cop0, v);
821 break;
822 case KVM_REG_MIPS_CP0_HWRENA:
823 kvm_write_c0_guest_hwrena(cop0, v);
824 break;
825 case KVM_REG_MIPS_CP0_BADVADDR:
826 kvm_write_c0_guest_badvaddr(cop0, v);
827 break;
828 case KVM_REG_MIPS_CP0_ENTRYHI:
829 kvm_write_c0_guest_entryhi(cop0, v);
830 break;
831 case KVM_REG_MIPS_CP0_STATUS:
832 kvm_write_c0_guest_status(cop0, v);
833 break;
834 case KVM_REG_MIPS_CP0_EPC:
835 kvm_write_c0_guest_epc(cop0, v);
836 break;
837 case KVM_REG_MIPS_CP0_PRID:
838 kvm_write_c0_guest_prid(cop0, v);
839 break;
686 case KVM_REG_MIPS_CP0_COUNT: 840 case KVM_REG_MIPS_CP0_COUNT:
687 kvm_mips_write_count(vcpu, v); 841 kvm_mips_write_count(vcpu, v);
688 break; 842 break;
@@ -759,6 +913,27 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
759 case KVM_REG_MIPS_COUNT_HZ: 913 case KVM_REG_MIPS_COUNT_HZ:
760 ret = kvm_mips_set_count_hz(vcpu, v); 914 ret = kvm_mips_set_count_hz(vcpu, v);
761 break; 915 break;
916 case KVM_REG_MIPS_CP0_ERROREPC:
917 kvm_write_c0_guest_errorepc(cop0, v);
918 break;
919 case KVM_REG_MIPS_CP0_KSCRATCH1:
920 kvm_write_c0_guest_kscratch1(cop0, v);
921 break;
922 case KVM_REG_MIPS_CP0_KSCRATCH2:
923 kvm_write_c0_guest_kscratch2(cop0, v);
924 break;
925 case KVM_REG_MIPS_CP0_KSCRATCH3:
926 kvm_write_c0_guest_kscratch3(cop0, v);
927 break;
928 case KVM_REG_MIPS_CP0_KSCRATCH4:
929 kvm_write_c0_guest_kscratch4(cop0, v);
930 break;
931 case KVM_REG_MIPS_CP0_KSCRATCH5:
932 kvm_write_c0_guest_kscratch5(cop0, v);
933 break;
934 case KVM_REG_MIPS_CP0_KSCRATCH6:
935 kvm_write_c0_guest_kscratch6(cop0, v);
936 break;
762 default: 937 default:
763 return -EINVAL; 938 return -EINVAL;
764 } 939 }