aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-13 12:47:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-13 12:47:01 -0400
commit900360131066f192c82311a098d03d6ac6429e20 (patch)
treee9681537a2d1f75fa5be21d8f1116f9f0ba8a391 /arch/mips
parent4541fec3104bef0c60633f9e180be94ea5ccc2b7 (diff)
parentca3f0874723fad81d0c701b63ae3a17a408d5f25 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "First batch of KVM changes for 4.1 The most interesting bit here is irqfd/ioeventfd support for ARM and ARM64. Summary: ARM/ARM64: fixes for live migration, irqfd and ioeventfd support (enabling vhost, too), page aging s390: interrupt handling rework, allowing to inject all local interrupts via new ioctl and to get/set the full local irq state for migration and introspection. New ioctls to access memory by virtual address, and to get/set the guest storage keys. SIMD support. MIPS: FPU and MIPS SIMD Architecture (MSA) support. Includes some patches from Ralf Baechle's MIPS tree. x86: bugfixes (notably for pvclock, the others are small) and cleanups. Another small latency improvement for the TSC deadline timer" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits) KVM: use slowpath for cross page cached accesses kvm: mmu: lazy collapse small sptes into large sptes KVM: x86: Clear CR2 on VCPU reset KVM: x86: DR0-DR3 are not clear on reset KVM: x86: BSP in MSR_IA32_APICBASE is writable KVM: x86: simplify kvm_apic_map KVM: x86: avoid logical_map when it is invalid KVM: x86: fix mixed APIC mode broadcast KVM: x86: use MDA for interrupt matching kvm/ppc/mpic: drop unused IRQ_testbit KVM: nVMX: remove unnecessary double caching of MAXPHYADDR KVM: nVMX: checks for address bits beyond MAXPHYADDR on VM-entry KVM: x86: cache maxphyaddr CPUID leaf in struct kvm_vcpu KVM: vmx: pass error code with internal error #2 x86: vdso: fix pvclock races with task migration KVM: remove kvm_read_hva and kvm_read_hva_atomic KVM: x86: optimize delivery of TSC deadline timer interrupt KVM: x86: extract blocking logic from __vcpu_run kvm: x86: fix x86 eflags fixed bit KVM: s390: migrate vcpu interrupt state ...
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/asmmacro-32.h128
-rw-r--r--arch/mips/include/asm/asmmacro.h218
-rw-r--r--arch/mips/include/asm/fpu.h20
-rw-r--r--arch/mips/include/asm/kdebug.h3
-rw-r--r--arch/mips/include/asm/kvm_host.h125
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/uapi/asm/kvm.h164
-rw-r--r--arch/mips/kernel/asm-offsets.c105
-rw-r--r--arch/mips/kernel/genex.S15
-rw-r--r--arch/mips/kernel/ptrace.c30
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/traps.c33
-rw-r--r--arch/mips/kvm/Makefile8
-rw-r--r--arch/mips/kvm/emulate.c332
-rw-r--r--arch/mips/kvm/fpu.S122
-rw-r--r--arch/mips/kvm/locore.S38
-rw-r--r--arch/mips/kvm/mips.c472
-rw-r--r--arch/mips/kvm/msa.S161
-rw-r--r--arch/mips/kvm/stats.c4
-rw-r--r--arch/mips/kvm/tlb.c6
-rw-r--r--arch/mips/kvm/trap_emul.c199
21 files changed, 1821 insertions, 366 deletions
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index cdac7b3eeaf7..80386470d3a4 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -16,38 +16,38 @@
16 .set push 16 .set push
17 SET_HARDFLOAT 17 SET_HARDFLOAT
18 cfc1 \tmp, fcr31 18 cfc1 \tmp, fcr31
19 swc1 $f0, THREAD_FPR0_LS64(\thread) 19 swc1 $f0, THREAD_FPR0(\thread)
20 swc1 $f1, THREAD_FPR1_LS64(\thread) 20 swc1 $f1, THREAD_FPR1(\thread)
21 swc1 $f2, THREAD_FPR2_LS64(\thread) 21 swc1 $f2, THREAD_FPR2(\thread)
22 swc1 $f3, THREAD_FPR3_LS64(\thread) 22 swc1 $f3, THREAD_FPR3(\thread)
23 swc1 $f4, THREAD_FPR4_LS64(\thread) 23 swc1 $f4, THREAD_FPR4(\thread)
24 swc1 $f5, THREAD_FPR5_LS64(\thread) 24 swc1 $f5, THREAD_FPR5(\thread)
25 swc1 $f6, THREAD_FPR6_LS64(\thread) 25 swc1 $f6, THREAD_FPR6(\thread)
26 swc1 $f7, THREAD_FPR7_LS64(\thread) 26 swc1 $f7, THREAD_FPR7(\thread)
27 swc1 $f8, THREAD_FPR8_LS64(\thread) 27 swc1 $f8, THREAD_FPR8(\thread)
28 swc1 $f9, THREAD_FPR9_LS64(\thread) 28 swc1 $f9, THREAD_FPR9(\thread)
29 swc1 $f10, THREAD_FPR10_LS64(\thread) 29 swc1 $f10, THREAD_FPR10(\thread)
30 swc1 $f11, THREAD_FPR11_LS64(\thread) 30 swc1 $f11, THREAD_FPR11(\thread)
31 swc1 $f12, THREAD_FPR12_LS64(\thread) 31 swc1 $f12, THREAD_FPR12(\thread)
32 swc1 $f13, THREAD_FPR13_LS64(\thread) 32 swc1 $f13, THREAD_FPR13(\thread)
33 swc1 $f14, THREAD_FPR14_LS64(\thread) 33 swc1 $f14, THREAD_FPR14(\thread)
34 swc1 $f15, THREAD_FPR15_LS64(\thread) 34 swc1 $f15, THREAD_FPR15(\thread)
35 swc1 $f16, THREAD_FPR16_LS64(\thread) 35 swc1 $f16, THREAD_FPR16(\thread)
36 swc1 $f17, THREAD_FPR17_LS64(\thread) 36 swc1 $f17, THREAD_FPR17(\thread)
37 swc1 $f18, THREAD_FPR18_LS64(\thread) 37 swc1 $f18, THREAD_FPR18(\thread)
38 swc1 $f19, THREAD_FPR19_LS64(\thread) 38 swc1 $f19, THREAD_FPR19(\thread)
39 swc1 $f20, THREAD_FPR20_LS64(\thread) 39 swc1 $f20, THREAD_FPR20(\thread)
40 swc1 $f21, THREAD_FPR21_LS64(\thread) 40 swc1 $f21, THREAD_FPR21(\thread)
41 swc1 $f22, THREAD_FPR22_LS64(\thread) 41 swc1 $f22, THREAD_FPR22(\thread)
42 swc1 $f23, THREAD_FPR23_LS64(\thread) 42 swc1 $f23, THREAD_FPR23(\thread)
43 swc1 $f24, THREAD_FPR24_LS64(\thread) 43 swc1 $f24, THREAD_FPR24(\thread)
44 swc1 $f25, THREAD_FPR25_LS64(\thread) 44 swc1 $f25, THREAD_FPR25(\thread)
45 swc1 $f26, THREAD_FPR26_LS64(\thread) 45 swc1 $f26, THREAD_FPR26(\thread)
46 swc1 $f27, THREAD_FPR27_LS64(\thread) 46 swc1 $f27, THREAD_FPR27(\thread)
47 swc1 $f28, THREAD_FPR28_LS64(\thread) 47 swc1 $f28, THREAD_FPR28(\thread)
48 swc1 $f29, THREAD_FPR29_LS64(\thread) 48 swc1 $f29, THREAD_FPR29(\thread)
49 swc1 $f30, THREAD_FPR30_LS64(\thread) 49 swc1 $f30, THREAD_FPR30(\thread)
50 swc1 $f31, THREAD_FPR31_LS64(\thread) 50 swc1 $f31, THREAD_FPR31(\thread)
51 sw \tmp, THREAD_FCR31(\thread) 51 sw \tmp, THREAD_FCR31(\thread)
52 .set pop 52 .set pop
53 .endm 53 .endm
@@ -56,38 +56,38 @@
56 .set push 56 .set push
57 SET_HARDFLOAT 57 SET_HARDFLOAT
58 lw \tmp, THREAD_FCR31(\thread) 58 lw \tmp, THREAD_FCR31(\thread)
59 lwc1 $f0, THREAD_FPR0_LS64(\thread) 59 lwc1 $f0, THREAD_FPR0(\thread)
60 lwc1 $f1, THREAD_FPR1_LS64(\thread) 60 lwc1 $f1, THREAD_FPR1(\thread)
61 lwc1 $f2, THREAD_FPR2_LS64(\thread) 61 lwc1 $f2, THREAD_FPR2(\thread)
62 lwc1 $f3, THREAD_FPR3_LS64(\thread) 62 lwc1 $f3, THREAD_FPR3(\thread)
63 lwc1 $f4, THREAD_FPR4_LS64(\thread) 63 lwc1 $f4, THREAD_FPR4(\thread)
64 lwc1 $f5, THREAD_FPR5_LS64(\thread) 64 lwc1 $f5, THREAD_FPR5(\thread)
65 lwc1 $f6, THREAD_FPR6_LS64(\thread) 65 lwc1 $f6, THREAD_FPR6(\thread)
66 lwc1 $f7, THREAD_FPR7_LS64(\thread) 66 lwc1 $f7, THREAD_FPR7(\thread)
67 lwc1 $f8, THREAD_FPR8_LS64(\thread) 67 lwc1 $f8, THREAD_FPR8(\thread)
68 lwc1 $f9, THREAD_FPR9_LS64(\thread) 68 lwc1 $f9, THREAD_FPR9(\thread)
69 lwc1 $f10, THREAD_FPR10_LS64(\thread) 69 lwc1 $f10, THREAD_FPR10(\thread)
70 lwc1 $f11, THREAD_FPR11_LS64(\thread) 70 lwc1 $f11, THREAD_FPR11(\thread)
71 lwc1 $f12, THREAD_FPR12_LS64(\thread) 71 lwc1 $f12, THREAD_FPR12(\thread)
72 lwc1 $f13, THREAD_FPR13_LS64(\thread) 72 lwc1 $f13, THREAD_FPR13(\thread)
73 lwc1 $f14, THREAD_FPR14_LS64(\thread) 73 lwc1 $f14, THREAD_FPR14(\thread)
74 lwc1 $f15, THREAD_FPR15_LS64(\thread) 74 lwc1 $f15, THREAD_FPR15(\thread)
75 lwc1 $f16, THREAD_FPR16_LS64(\thread) 75 lwc1 $f16, THREAD_FPR16(\thread)
76 lwc1 $f17, THREAD_FPR17_LS64(\thread) 76 lwc1 $f17, THREAD_FPR17(\thread)
77 lwc1 $f18, THREAD_FPR18_LS64(\thread) 77 lwc1 $f18, THREAD_FPR18(\thread)
78 lwc1 $f19, THREAD_FPR19_LS64(\thread) 78 lwc1 $f19, THREAD_FPR19(\thread)
79 lwc1 $f20, THREAD_FPR20_LS64(\thread) 79 lwc1 $f20, THREAD_FPR20(\thread)
80 lwc1 $f21, THREAD_FPR21_LS64(\thread) 80 lwc1 $f21, THREAD_FPR21(\thread)
81 lwc1 $f22, THREAD_FPR22_LS64(\thread) 81 lwc1 $f22, THREAD_FPR22(\thread)
82 lwc1 $f23, THREAD_FPR23_LS64(\thread) 82 lwc1 $f23, THREAD_FPR23(\thread)
83 lwc1 $f24, THREAD_FPR24_LS64(\thread) 83 lwc1 $f24, THREAD_FPR24(\thread)
84 lwc1 $f25, THREAD_FPR25_LS64(\thread) 84 lwc1 $f25, THREAD_FPR25(\thread)
85 lwc1 $f26, THREAD_FPR26_LS64(\thread) 85 lwc1 $f26, THREAD_FPR26(\thread)
86 lwc1 $f27, THREAD_FPR27_LS64(\thread) 86 lwc1 $f27, THREAD_FPR27(\thread)
87 lwc1 $f28, THREAD_FPR28_LS64(\thread) 87 lwc1 $f28, THREAD_FPR28(\thread)
88 lwc1 $f29, THREAD_FPR29_LS64(\thread) 88 lwc1 $f29, THREAD_FPR29(\thread)
89 lwc1 $f30, THREAD_FPR30_LS64(\thread) 89 lwc1 $f30, THREAD_FPR30(\thread)
90 lwc1 $f31, THREAD_FPR31_LS64(\thread) 90 lwc1 $f31, THREAD_FPR31(\thread)
91 ctc1 \tmp, fcr31 91 ctc1 \tmp, fcr31
92 .set pop 92 .set pop
93 .endm 93 .endm
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 0cae4595e985..6156ac8c4cfb 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -60,22 +60,22 @@
60 .set push 60 .set push
61 SET_HARDFLOAT 61 SET_HARDFLOAT
62 cfc1 \tmp, fcr31 62 cfc1 \tmp, fcr31
63 sdc1 $f0, THREAD_FPR0_LS64(\thread) 63 sdc1 $f0, THREAD_FPR0(\thread)
64 sdc1 $f2, THREAD_FPR2_LS64(\thread) 64 sdc1 $f2, THREAD_FPR2(\thread)
65 sdc1 $f4, THREAD_FPR4_LS64(\thread) 65 sdc1 $f4, THREAD_FPR4(\thread)
66 sdc1 $f6, THREAD_FPR6_LS64(\thread) 66 sdc1 $f6, THREAD_FPR6(\thread)
67 sdc1 $f8, THREAD_FPR8_LS64(\thread) 67 sdc1 $f8, THREAD_FPR8(\thread)
68 sdc1 $f10, THREAD_FPR10_LS64(\thread) 68 sdc1 $f10, THREAD_FPR10(\thread)
69 sdc1 $f12, THREAD_FPR12_LS64(\thread) 69 sdc1 $f12, THREAD_FPR12(\thread)
70 sdc1 $f14, THREAD_FPR14_LS64(\thread) 70 sdc1 $f14, THREAD_FPR14(\thread)
71 sdc1 $f16, THREAD_FPR16_LS64(\thread) 71 sdc1 $f16, THREAD_FPR16(\thread)
72 sdc1 $f18, THREAD_FPR18_LS64(\thread) 72 sdc1 $f18, THREAD_FPR18(\thread)
73 sdc1 $f20, THREAD_FPR20_LS64(\thread) 73 sdc1 $f20, THREAD_FPR20(\thread)
74 sdc1 $f22, THREAD_FPR22_LS64(\thread) 74 sdc1 $f22, THREAD_FPR22(\thread)
75 sdc1 $f24, THREAD_FPR24_LS64(\thread) 75 sdc1 $f24, THREAD_FPR24(\thread)
76 sdc1 $f26, THREAD_FPR26_LS64(\thread) 76 sdc1 $f26, THREAD_FPR26(\thread)
77 sdc1 $f28, THREAD_FPR28_LS64(\thread) 77 sdc1 $f28, THREAD_FPR28(\thread)
78 sdc1 $f30, THREAD_FPR30_LS64(\thread) 78 sdc1 $f30, THREAD_FPR30(\thread)
79 sw \tmp, THREAD_FCR31(\thread) 79 sw \tmp, THREAD_FCR31(\thread)
80 .set pop 80 .set pop
81 .endm 81 .endm
@@ -84,22 +84,22 @@
84 .set push 84 .set push
85 .set mips64r2 85 .set mips64r2
86 SET_HARDFLOAT 86 SET_HARDFLOAT
87 sdc1 $f1, THREAD_FPR1_LS64(\thread) 87 sdc1 $f1, THREAD_FPR1(\thread)
88 sdc1 $f3, THREAD_FPR3_LS64(\thread) 88 sdc1 $f3, THREAD_FPR3(\thread)
89 sdc1 $f5, THREAD_FPR5_LS64(\thread) 89 sdc1 $f5, THREAD_FPR5(\thread)
90 sdc1 $f7, THREAD_FPR7_LS64(\thread) 90 sdc1 $f7, THREAD_FPR7(\thread)
91 sdc1 $f9, THREAD_FPR9_LS64(\thread) 91 sdc1 $f9, THREAD_FPR9(\thread)
92 sdc1 $f11, THREAD_FPR11_LS64(\thread) 92 sdc1 $f11, THREAD_FPR11(\thread)
93 sdc1 $f13, THREAD_FPR13_LS64(\thread) 93 sdc1 $f13, THREAD_FPR13(\thread)
94 sdc1 $f15, THREAD_FPR15_LS64(\thread) 94 sdc1 $f15, THREAD_FPR15(\thread)
95 sdc1 $f17, THREAD_FPR17_LS64(\thread) 95 sdc1 $f17, THREAD_FPR17(\thread)
96 sdc1 $f19, THREAD_FPR19_LS64(\thread) 96 sdc1 $f19, THREAD_FPR19(\thread)
97 sdc1 $f21, THREAD_FPR21_LS64(\thread) 97 sdc1 $f21, THREAD_FPR21(\thread)
98 sdc1 $f23, THREAD_FPR23_LS64(\thread) 98 sdc1 $f23, THREAD_FPR23(\thread)
99 sdc1 $f25, THREAD_FPR25_LS64(\thread) 99 sdc1 $f25, THREAD_FPR25(\thread)
100 sdc1 $f27, THREAD_FPR27_LS64(\thread) 100 sdc1 $f27, THREAD_FPR27(\thread)
101 sdc1 $f29, THREAD_FPR29_LS64(\thread) 101 sdc1 $f29, THREAD_FPR29(\thread)
102 sdc1 $f31, THREAD_FPR31_LS64(\thread) 102 sdc1 $f31, THREAD_FPR31(\thread)
103 .set pop 103 .set pop
104 .endm 104 .endm
105 105
@@ -118,22 +118,22 @@
118 .set push 118 .set push
119 SET_HARDFLOAT 119 SET_HARDFLOAT
120 lw \tmp, THREAD_FCR31(\thread) 120 lw \tmp, THREAD_FCR31(\thread)
121 ldc1 $f0, THREAD_FPR0_LS64(\thread) 121 ldc1 $f0, THREAD_FPR0(\thread)
122 ldc1 $f2, THREAD_FPR2_LS64(\thread) 122 ldc1 $f2, THREAD_FPR2(\thread)
123 ldc1 $f4, THREAD_FPR4_LS64(\thread) 123 ldc1 $f4, THREAD_FPR4(\thread)
124 ldc1 $f6, THREAD_FPR6_LS64(\thread) 124 ldc1 $f6, THREAD_FPR6(\thread)
125 ldc1 $f8, THREAD_FPR8_LS64(\thread) 125 ldc1 $f8, THREAD_FPR8(\thread)
126 ldc1 $f10, THREAD_FPR10_LS64(\thread) 126 ldc1 $f10, THREAD_FPR10(\thread)
127 ldc1 $f12, THREAD_FPR12_LS64(\thread) 127 ldc1 $f12, THREAD_FPR12(\thread)
128 ldc1 $f14, THREAD_FPR14_LS64(\thread) 128 ldc1 $f14, THREAD_FPR14(\thread)
129 ldc1 $f16, THREAD_FPR16_LS64(\thread) 129 ldc1 $f16, THREAD_FPR16(\thread)
130 ldc1 $f18, THREAD_FPR18_LS64(\thread) 130 ldc1 $f18, THREAD_FPR18(\thread)
131 ldc1 $f20, THREAD_FPR20_LS64(\thread) 131 ldc1 $f20, THREAD_FPR20(\thread)
132 ldc1 $f22, THREAD_FPR22_LS64(\thread) 132 ldc1 $f22, THREAD_FPR22(\thread)
133 ldc1 $f24, THREAD_FPR24_LS64(\thread) 133 ldc1 $f24, THREAD_FPR24(\thread)
134 ldc1 $f26, THREAD_FPR26_LS64(\thread) 134 ldc1 $f26, THREAD_FPR26(\thread)
135 ldc1 $f28, THREAD_FPR28_LS64(\thread) 135 ldc1 $f28, THREAD_FPR28(\thread)
136 ldc1 $f30, THREAD_FPR30_LS64(\thread) 136 ldc1 $f30, THREAD_FPR30(\thread)
137 ctc1 \tmp, fcr31 137 ctc1 \tmp, fcr31
138 .endm 138 .endm
139 139
@@ -141,22 +141,22 @@
141 .set push 141 .set push
142 .set mips64r2 142 .set mips64r2
143 SET_HARDFLOAT 143 SET_HARDFLOAT
144 ldc1 $f1, THREAD_FPR1_LS64(\thread) 144 ldc1 $f1, THREAD_FPR1(\thread)
145 ldc1 $f3, THREAD_FPR3_LS64(\thread) 145 ldc1 $f3, THREAD_FPR3(\thread)
146 ldc1 $f5, THREAD_FPR5_LS64(\thread) 146 ldc1 $f5, THREAD_FPR5(\thread)
147 ldc1 $f7, THREAD_FPR7_LS64(\thread) 147 ldc1 $f7, THREAD_FPR7(\thread)
148 ldc1 $f9, THREAD_FPR9_LS64(\thread) 148 ldc1 $f9, THREAD_FPR9(\thread)
149 ldc1 $f11, THREAD_FPR11_LS64(\thread) 149 ldc1 $f11, THREAD_FPR11(\thread)
150 ldc1 $f13, THREAD_FPR13_LS64(\thread) 150 ldc1 $f13, THREAD_FPR13(\thread)
151 ldc1 $f15, THREAD_FPR15_LS64(\thread) 151 ldc1 $f15, THREAD_FPR15(\thread)
152 ldc1 $f17, THREAD_FPR17_LS64(\thread) 152 ldc1 $f17, THREAD_FPR17(\thread)
153 ldc1 $f19, THREAD_FPR19_LS64(\thread) 153 ldc1 $f19, THREAD_FPR19(\thread)
154 ldc1 $f21, THREAD_FPR21_LS64(\thread) 154 ldc1 $f21, THREAD_FPR21(\thread)
155 ldc1 $f23, THREAD_FPR23_LS64(\thread) 155 ldc1 $f23, THREAD_FPR23(\thread)
156 ldc1 $f25, THREAD_FPR25_LS64(\thread) 156 ldc1 $f25, THREAD_FPR25(\thread)
157 ldc1 $f27, THREAD_FPR27_LS64(\thread) 157 ldc1 $f27, THREAD_FPR27(\thread)
158 ldc1 $f29, THREAD_FPR29_LS64(\thread) 158 ldc1 $f29, THREAD_FPR29(\thread)
159 ldc1 $f31, THREAD_FPR31_LS64(\thread) 159 ldc1 $f31, THREAD_FPR31(\thread)
160 .set pop 160 .set pop
161 .endm 161 .endm
162 162
@@ -211,6 +211,22 @@
211 .endm 211 .endm
212 212
213#ifdef TOOLCHAIN_SUPPORTS_MSA 213#ifdef TOOLCHAIN_SUPPORTS_MSA
214 .macro _cfcmsa rd, cs
215 .set push
216 .set mips32r2
217 .set msa
218 cfcmsa \rd, $\cs
219 .set pop
220 .endm
221
222 .macro _ctcmsa cd, rs
223 .set push
224 .set mips32r2
225 .set msa
226 ctcmsa $\cd, \rs
227 .set pop
228 .endm
229
214 .macro ld_d wd, off, base 230 .macro ld_d wd, off, base
215 .set push 231 .set push
216 .set mips32r2 232 .set mips32r2
@@ -227,35 +243,35 @@
227 .set pop 243 .set pop
228 .endm 244 .endm
229 245
230 .macro copy_u_w rd, ws, n 246 .macro copy_u_w ws, n
231 .set push 247 .set push
232 .set mips32r2 248 .set mips32r2
233 .set msa 249 .set msa
234 copy_u.w \rd, $w\ws[\n] 250 copy_u.w $1, $w\ws[\n]
235 .set pop 251 .set pop
236 .endm 252 .endm
237 253
238 .macro copy_u_d rd, ws, n 254 .macro copy_u_d ws, n
239 .set push 255 .set push
240 .set mips64r2 256 .set mips64r2
241 .set msa 257 .set msa
242 copy_u.d \rd, $w\ws[\n] 258 copy_u.d $1, $w\ws[\n]
243 .set pop 259 .set pop
244 .endm 260 .endm
245 261
246 .macro insert_w wd, n, rs 262 .macro insert_w wd, n
247 .set push 263 .set push
248 .set mips32r2 264 .set mips32r2
249 .set msa 265 .set msa
250 insert.w $w\wd[\n], \rs 266 insert.w $w\wd[\n], $1
251 .set pop 267 .set pop
252 .endm 268 .endm
253 269
254 .macro insert_d wd, n, rs 270 .macro insert_d wd, n
255 .set push 271 .set push
256 .set mips64r2 272 .set mips64r2
257 .set msa 273 .set msa
258 insert.d $w\wd[\n], \rs 274 insert.d $w\wd[\n], $1
259 .set pop 275 .set pop
260 .endm 276 .endm
261#else 277#else
@@ -283,7 +299,7 @@
283 /* 299 /*
284 * Temporary until all toolchains in use include MSA support. 300 * Temporary until all toolchains in use include MSA support.
285 */ 301 */
286 .macro cfcmsa rd, cs 302 .macro _cfcmsa rd, cs
287 .set push 303 .set push
288 .set noat 304 .set noat
289 SET_HARDFLOAT 305 SET_HARDFLOAT
@@ -293,7 +309,7 @@
293 .set pop 309 .set pop
294 .endm 310 .endm
295 311
296 .macro ctcmsa cd, rs 312 .macro _ctcmsa cd, rs
297 .set push 313 .set push
298 .set noat 314 .set noat
299 SET_HARDFLOAT 315 SET_HARDFLOAT
@@ -320,44 +336,36 @@
320 .set pop 336 .set pop
321 .endm 337 .endm
322 338
323 .macro copy_u_w rd, ws, n 339 .macro copy_u_w ws, n
324 .set push 340 .set push
325 .set noat 341 .set noat
326 SET_HARDFLOAT 342 SET_HARDFLOAT
327 .insn 343 .insn
328 .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) 344 .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
329 /* move triggers an assembler bug... */
330 or \rd, $1, zero
331 .set pop 345 .set pop
332 .endm 346 .endm
333 347
334 .macro copy_u_d rd, ws, n 348 .macro copy_u_d ws, n
335 .set push 349 .set push
336 .set noat 350 .set noat
337 SET_HARDFLOAT 351 SET_HARDFLOAT
338 .insn 352 .insn
339 .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) 353 .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
340 /* move triggers an assembler bug... */
341 or \rd, $1, zero
342 .set pop 354 .set pop
343 .endm 355 .endm
344 356
345 .macro insert_w wd, n, rs 357 .macro insert_w wd, n
346 .set push 358 .set push
347 .set noat 359 .set noat
348 SET_HARDFLOAT 360 SET_HARDFLOAT
349 /* move triggers an assembler bug... */
350 or $1, \rs, zero
351 .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) 361 .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
352 .set pop 362 .set pop
353 .endm 363 .endm
354 364
355 .macro insert_d wd, n, rs 365 .macro insert_d wd, n
356 .set push 366 .set push
357 .set noat 367 .set noat
358 SET_HARDFLOAT 368 SET_HARDFLOAT
359 /* move triggers an assembler bug... */
360 or $1, \rs, zero
361 .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) 369 .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
362 .set pop 370 .set pop
363 .endm 371 .endm
@@ -399,7 +407,7 @@
399 .set push 407 .set push
400 .set noat 408 .set noat
401 SET_HARDFLOAT 409 SET_HARDFLOAT
402 cfcmsa $1, MSA_CSR 410 _cfcmsa $1, MSA_CSR
403 sw $1, THREAD_MSA_CSR(\thread) 411 sw $1, THREAD_MSA_CSR(\thread)
404 .set pop 412 .set pop
405 .endm 413 .endm
@@ -409,7 +417,7 @@
409 .set noat 417 .set noat
410 SET_HARDFLOAT 418 SET_HARDFLOAT
411 lw $1, THREAD_MSA_CSR(\thread) 419 lw $1, THREAD_MSA_CSR(\thread)
412 ctcmsa MSA_CSR, $1 420 _ctcmsa MSA_CSR, $1
413 .set pop 421 .set pop
414 ld_d 0, THREAD_FPR0, \thread 422 ld_d 0, THREAD_FPR0, \thread
415 ld_d 1, THREAD_FPR1, \thread 423 ld_d 1, THREAD_FPR1, \thread
@@ -452,9 +460,6 @@
452 insert_w \wd, 2 460 insert_w \wd, 2
453 insert_w \wd, 3 461 insert_w \wd, 3
454#endif 462#endif
455 .if 31-\wd
456 msa_init_upper (\wd+1)
457 .endif
458 .endm 463 .endm
459 464
460 .macro msa_init_all_upper 465 .macro msa_init_all_upper
@@ -463,6 +468,37 @@
463 SET_HARDFLOAT 468 SET_HARDFLOAT
464 not $1, zero 469 not $1, zero
465 msa_init_upper 0 470 msa_init_upper 0
471 msa_init_upper 1
472 msa_init_upper 2
473 msa_init_upper 3
474 msa_init_upper 4
475 msa_init_upper 5
476 msa_init_upper 6
477 msa_init_upper 7
478 msa_init_upper 8
479 msa_init_upper 9
480 msa_init_upper 10
481 msa_init_upper 11
482 msa_init_upper 12
483 msa_init_upper 13
484 msa_init_upper 14
485 msa_init_upper 15
486 msa_init_upper 16
487 msa_init_upper 17
488 msa_init_upper 18
489 msa_init_upper 19
490 msa_init_upper 20
491 msa_init_upper 21
492 msa_init_upper 22
493 msa_init_upper 23
494 msa_init_upper 24
495 msa_init_upper 25
496 msa_init_upper 26
497 msa_init_upper 27
498 msa_init_upper 28
499 msa_init_upper 29
500 msa_init_upper 30
501 msa_init_upper 31
466 .set pop 502 .set pop
467 .endm 503 .endm
468 504
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index dd083e999b08..b104ad9d655f 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -48,6 +48,12 @@ enum fpu_mode {
48#define FPU_FR_MASK 0x1 48#define FPU_FR_MASK 0x1
49}; 49};
50 50
51#define __disable_fpu() \
52do { \
53 clear_c0_status(ST0_CU1); \
54 disable_fpu_hazard(); \
55} while (0)
56
51static inline int __enable_fpu(enum fpu_mode mode) 57static inline int __enable_fpu(enum fpu_mode mode)
52{ 58{
53 int fr; 59 int fr;
@@ -86,7 +92,12 @@ fr_common:
86 enable_fpu_hazard(); 92 enable_fpu_hazard();
87 93
88 /* check FR has the desired value */ 94 /* check FR has the desired value */
89 return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE; 95 if (!!(read_c0_status() & ST0_FR) == !!fr)
96 return 0;
97
98 /* unsupported FR value */
99 __disable_fpu();
100 return SIGFPE;
90 101
91 default: 102 default:
92 BUG(); 103 BUG();
@@ -95,12 +106,6 @@ fr_common:
95 return SIGFPE; 106 return SIGFPE;
96} 107}
97 108
98#define __disable_fpu() \
99do { \
100 clear_c0_status(ST0_CU1); \
101 disable_fpu_hazard(); \
102} while (0)
103
104#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU) 109#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
105 110
106static inline int __is_fpu_owner(void) 111static inline int __is_fpu_owner(void)
@@ -170,6 +175,7 @@ static inline void lose_fpu(int save)
170 } 175 }
171 disable_msa(); 176 disable_msa();
172 clear_thread_flag(TIF_USEDMSA); 177 clear_thread_flag(TIF_USEDMSA);
178 __disable_fpu();
173 } else if (is_fpu_owner()) { 179 } else if (is_fpu_owner()) {
174 if (save) 180 if (save)
175 _save_fp(current); 181 _save_fp(current);
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h
index 6a9af5fcb5d7..cba22ab7ad4d 100644
--- a/arch/mips/include/asm/kdebug.h
+++ b/arch/mips/include/asm/kdebug.h
@@ -10,7 +10,8 @@ enum die_val {
10 DIE_RI, 10 DIE_RI,
11 DIE_PAGE_FAULT, 11 DIE_PAGE_FAULT,
12 DIE_BREAK, 12 DIE_BREAK,
13 DIE_SSTEPBP 13 DIE_SSTEPBP,
14 DIE_MSAFP
14}; 15};
15 16
16#endif /* _ASM_MIPS_KDEBUG_H */ 17#endif /* _ASM_MIPS_KDEBUG_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index ac4fc716062b..4c25823563fe 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -21,10 +21,10 @@
21 21
22/* MIPS KVM register ids */ 22/* MIPS KVM register ids */
23#define MIPS_CP0_32(_R, _S) \ 23#define MIPS_CP0_32(_R, _S) \
24 (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) 24 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
25 25
26#define MIPS_CP0_64(_R, _S) \ 26#define MIPS_CP0_64(_R, _S) \
27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) 27 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
28 28
29#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 29#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
30#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) 30#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
@@ -42,11 +42,14 @@
42#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 42#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
43#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 43#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
44#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) 44#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
45#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
45#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) 46#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
46#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 47#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
47#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 48#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
48#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 49#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
49#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 50#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
51#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
52#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
50#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) 53#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
51#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) 54#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
52#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 55#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
@@ -119,6 +122,10 @@ struct kvm_vcpu_stat {
119 u32 syscall_exits; 122 u32 syscall_exits;
120 u32 resvd_inst_exits; 123 u32 resvd_inst_exits;
121 u32 break_inst_exits; 124 u32 break_inst_exits;
125 u32 trap_inst_exits;
126 u32 msa_fpe_exits;
127 u32 fpe_exits;
128 u32 msa_disabled_exits;
122 u32 flush_dcache_exits; 129 u32 flush_dcache_exits;
123 u32 halt_successful_poll; 130 u32 halt_successful_poll;
124 u32 halt_wakeup; 131 u32 halt_wakeup;
@@ -138,6 +145,10 @@ enum kvm_mips_exit_types {
138 SYSCALL_EXITS, 145 SYSCALL_EXITS,
139 RESVD_INST_EXITS, 146 RESVD_INST_EXITS,
140 BREAK_INST_EXITS, 147 BREAK_INST_EXITS,
148 TRAP_INST_EXITS,
149 MSA_FPE_EXITS,
150 FPE_EXITS,
151 MSA_DISABLED_EXITS,
141 FLUSH_DCACHE_EXITS, 152 FLUSH_DCACHE_EXITS,
142 MAX_KVM_MIPS_EXIT_TYPES 153 MAX_KVM_MIPS_EXIT_TYPES
143}; 154};
@@ -206,6 +217,8 @@ struct mips_coproc {
206#define MIPS_CP0_CONFIG1_SEL 1 217#define MIPS_CP0_CONFIG1_SEL 1
207#define MIPS_CP0_CONFIG2_SEL 2 218#define MIPS_CP0_CONFIG2_SEL 2
208#define MIPS_CP0_CONFIG3_SEL 3 219#define MIPS_CP0_CONFIG3_SEL 3
220#define MIPS_CP0_CONFIG4_SEL 4
221#define MIPS_CP0_CONFIG5_SEL 5
209 222
210/* Config0 register bits */ 223/* Config0 register bits */
211#define CP0C0_M 31 224#define CP0C0_M 31
@@ -262,31 +275,6 @@ struct mips_coproc {
262#define CP0C3_SM 1 275#define CP0C3_SM 1
263#define CP0C3_TL 0 276#define CP0C3_TL 0
264 277
265/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
266#define MIPS_CONFIG0 \
267 ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
268
269/* Have config2, no coprocessor2 attached, no MDMX support attached,
270 no performance counters, watch registers present,
271 no code compression, EJTAG present, no FPU, no watch registers */
272#define MIPS_CONFIG1 \
273((1 << CP0C1_M) | \
274 (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
275 (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
276 (0 << CP0C1_FP))
277
278/* Have config3, no tertiary/secondary caches implemented */
279#define MIPS_CONFIG2 \
280((1 << CP0C2_M))
281
282/* No config4, no DSP ASE, no large physaddr (PABITS),
283 no external interrupt controller, no vectored interrupts,
284 no 1kb pages, no SmartMIPS ASE, no trace logic */
285#define MIPS_CONFIG3 \
286((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
287 (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
288 (0 << CP0C3_SM) | (0 << CP0C3_TL))
289
290/* MMU types, the first four entries have the same layout as the 278/* MMU types, the first four entries have the same layout as the
291 CP0C0_MT field. */ 279 CP0C0_MT field. */
292enum mips_mmu_types { 280enum mips_mmu_types {
@@ -321,7 +309,9 @@ enum mips_mmu_types {
321 */ 309 */
322#define T_TRAP 13 /* Trap instruction */ 310#define T_TRAP 13 /* Trap instruction */
323#define T_VCEI 14 /* Virtual coherency exception */ 311#define T_VCEI 14 /* Virtual coherency exception */
312#define T_MSAFPE 14 /* MSA floating point exception */
324#define T_FPE 15 /* Floating point exception */ 313#define T_FPE 15 /* Floating point exception */
314#define T_MSADIS 21 /* MSA disabled exception */
325#define T_WATCH 23 /* Watch address reference */ 315#define T_WATCH 23 /* Watch address reference */
326#define T_VCED 31 /* Virtual coherency data */ 316#define T_VCED 31 /* Virtual coherency data */
327 317
@@ -374,6 +364,9 @@ struct kvm_mips_tlb {
374 long tlb_lo1; 364 long tlb_lo1;
375}; 365};
376 366
367#define KVM_MIPS_FPU_FPU 0x1
368#define KVM_MIPS_FPU_MSA 0x2
369
377#define KVM_MIPS_GUEST_TLB_SIZE 64 370#define KVM_MIPS_GUEST_TLB_SIZE 64
378struct kvm_vcpu_arch { 371struct kvm_vcpu_arch {
379 void *host_ebase, *guest_ebase; 372 void *host_ebase, *guest_ebase;
@@ -395,6 +388,8 @@ struct kvm_vcpu_arch {
395 388
396 /* FPU State */ 389 /* FPU State */
397 struct mips_fpu_struct fpu; 390 struct mips_fpu_struct fpu;
391 /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
392 unsigned int fpu_inuse;
398 393
399 /* COP0 State */ 394 /* COP0 State */
400 struct mips_coproc *cop0; 395 struct mips_coproc *cop0;
@@ -441,6 +436,9 @@ struct kvm_vcpu_arch {
441 436
442 /* WAIT executed */ 437 /* WAIT executed */
443 int wait; 438 int wait;
439
440 u8 fpu_enabled;
441 u8 msa_enabled;
444}; 442};
445 443
446 444
@@ -482,11 +480,15 @@ struct kvm_vcpu_arch {
482#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) 480#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
483#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) 481#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
484#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) 482#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
483#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
484#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
485#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) 485#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
486#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) 486#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
487#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) 487#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
488#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) 488#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
489#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) 489#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
490#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
491#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
490#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) 492#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
491#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) 493#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
492#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) 494#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
@@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
567 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ 569 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
568} 570}
569 571
572/* Helpers */
573
574static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
575{
576 return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
577 vcpu->fpu_enabled;
578}
579
580static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
581{
582 return kvm_mips_guest_can_have_fpu(vcpu) &&
583 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
584}
585
586static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
587{
588 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
589 vcpu->msa_enabled;
590}
591
592static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
593{
594 return kvm_mips_guest_can_have_msa(vcpu) &&
595 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
596}
570 597
571struct kvm_mips_callbacks { 598struct kvm_mips_callbacks {
572 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); 599 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
@@ -578,6 +605,10 @@ struct kvm_mips_callbacks {
578 int (*handle_syscall)(struct kvm_vcpu *vcpu); 605 int (*handle_syscall)(struct kvm_vcpu *vcpu);
579 int (*handle_res_inst)(struct kvm_vcpu *vcpu); 606 int (*handle_res_inst)(struct kvm_vcpu *vcpu);
580 int (*handle_break)(struct kvm_vcpu *vcpu); 607 int (*handle_break)(struct kvm_vcpu *vcpu);
608 int (*handle_trap)(struct kvm_vcpu *vcpu);
609 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
610 int (*handle_fpe)(struct kvm_vcpu *vcpu);
611 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
581 int (*vm_init)(struct kvm *kvm); 612 int (*vm_init)(struct kvm *kvm);
582 int (*vcpu_init)(struct kvm_vcpu *vcpu); 613 int (*vcpu_init)(struct kvm_vcpu *vcpu);
583 int (*vcpu_setup)(struct kvm_vcpu *vcpu); 614 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
@@ -596,6 +627,8 @@ struct kvm_mips_callbacks {
596 const struct kvm_one_reg *reg, s64 *v); 627 const struct kvm_one_reg *reg, s64 *v);
597 int (*set_one_reg)(struct kvm_vcpu *vcpu, 628 int (*set_one_reg)(struct kvm_vcpu *vcpu,
598 const struct kvm_one_reg *reg, s64 v); 629 const struct kvm_one_reg *reg, s64 v);
630 int (*vcpu_get_regs)(struct kvm_vcpu *vcpu);
631 int (*vcpu_set_regs)(struct kvm_vcpu *vcpu);
599}; 632};
600extern struct kvm_mips_callbacks *kvm_mips_callbacks; 633extern struct kvm_mips_callbacks *kvm_mips_callbacks;
601int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); 634int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
606/* Trampoline ASM routine to start running in "Guest" context */ 639/* Trampoline ASM routine to start running in "Guest" context */
607extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); 640extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
608 641
642/* FPU/MSA context management */
643void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
644void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
645void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
646void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
647void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
648void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
649void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
650void kvm_own_fpu(struct kvm_vcpu *vcpu);
651void kvm_own_msa(struct kvm_vcpu *vcpu);
652void kvm_drop_fpu(struct kvm_vcpu *vcpu);
653void kvm_lose_fpu(struct kvm_vcpu *vcpu);
654
609/* TLB handling */ 655/* TLB handling */
610uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); 656uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
611 657
@@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
711 struct kvm_run *run, 757 struct kvm_run *run,
712 struct kvm_vcpu *vcpu); 758 struct kvm_vcpu *vcpu);
713 759
760extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
761 uint32_t *opc,
762 struct kvm_run *run,
763 struct kvm_vcpu *vcpu);
764
765extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
766 uint32_t *opc,
767 struct kvm_run *run,
768 struct kvm_vcpu *vcpu);
769
770extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
771 uint32_t *opc,
772 struct kvm_run *run,
773 struct kvm_vcpu *vcpu);
774
775extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
776 uint32_t *opc,
777 struct kvm_run *run,
778 struct kvm_vcpu *vcpu);
779
714extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, 780extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
715 struct kvm_run *run); 781 struct kvm_run *run);
716 782
@@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst,
749 struct kvm_run *run, 815 struct kvm_run *run,
750 struct kvm_vcpu *vcpu); 816 struct kvm_vcpu *vcpu);
751 817
818unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
819unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
820unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
821unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
822
752/* Dynamic binary translation */ 823/* Dynamic binary translation */
753extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, 824extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
754 struct kvm_vcpu *vcpu); 825 struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index b5dcbee01fd7..9b3b48e21c22 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -105,7 +105,7 @@ union fpureg {
105#ifdef CONFIG_CPU_LITTLE_ENDIAN 105#ifdef CONFIG_CPU_LITTLE_ENDIAN
106# define FPR_IDX(width, idx) (idx) 106# define FPR_IDX(width, idx) (idx)
107#else 107#else
108# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx)) 108# define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
109#endif 109#endif
110 110
111#define BUILD_FPR_ACCESS(width) \ 111#define BUILD_FPR_ACCESS(width) \
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index 2c04b6d9ff85..6985eb59b085 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -36,77 +36,85 @@ struct kvm_regs {
36 36
37/* 37/*
38 * for KVM_GET_FPU and KVM_SET_FPU 38 * for KVM_GET_FPU and KVM_SET_FPU
39 *
40 * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
41 * are zero filled.
42 */ 39 */
43struct kvm_fpu { 40struct kvm_fpu {
44 __u64 fpr[32];
45 __u32 fir;
46 __u32 fccr;
47 __u32 fexr;
48 __u32 fenr;
49 __u32 fcsr;
50 __u32 pad;
51}; 41};
52 42
53 43
54/* 44/*
55 * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 45 * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
56 * registers. The id field is broken down as follows: 46 * registers. The id field is broken down as follows:
57 * 47 *
58 * bits[2..0] - Register 'sel' index.
59 * bits[7..3] - Register 'rd' index.
60 * bits[15..8] - Must be zero.
61 * bits[31..16] - 1 -> CP0 registers.
62 * bits[51..32] - Must be zero.
63 * bits[63..52] - As per linux/kvm.h 48 * bits[63..52] - As per linux/kvm.h
49 * bits[51..32] - Must be zero.
50 * bits[31..16] - Register set.
51 *
52 * Register set = 0: GP registers from kvm_regs (see definitions below).
53 *
54 * Register set = 1: CP0 registers.
55 * bits[15..8] - Must be zero.
56 * bits[7..3] - Register 'rd' index.
57 * bits[2..0] - Register 'sel' index.
58 *
59 * Register set = 2: KVM specific registers (see definitions below).
60 *
61 * Register set = 3: FPU / MSA registers (see definitions below).
64 * 62 *
65 * Other sets registers may be added in the future. Each set would 63 * Other sets registers may be added in the future. Each set would
66 * have its own identifier in bits[31..16]. 64 * have its own identifier in bits[31..16].
67 *
68 * The registers defined in struct kvm_regs are also accessible, the
69 * id values for these are below.
70 */ 65 */
71 66
72#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) 67#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL)
73#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) 68#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL)
74#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) 69#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL)
75#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) 70#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL)
76#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) 71
77#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) 72
78#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) 73/*
79#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) 74 * KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
80#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) 75 */
81#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) 76
82#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) 77#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0)
83#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) 78#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1)
84#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) 79#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2)
85#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) 80#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3)
86#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) 81#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4)
87#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) 82#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5)
88#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) 83#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6)
89#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) 84#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7)
90#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) 85#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8)
91#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) 86#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9)
92#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) 87#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
93#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) 88#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
94#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) 89#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
95#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) 90#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
96#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) 91#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
97#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) 92#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
98#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) 93#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
99#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) 94#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
100#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) 95#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
101#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) 96#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
102#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) 97#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
103#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) 98#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
104 99#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
105#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) 100#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
106#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) 101#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
107#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) 102#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
108 103#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
109/* KVM specific control registers */ 104#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
105#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
106#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
107#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
108#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
109
110#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
111#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
112#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
113
114
115/*
116 * KVM_REG_MIPS_KVM - KVM specific control registers.
117 */
110 118
111/* 119/*
112 * CP0_Count control 120 * CP0_Count control
@@ -118,8 +126,7 @@ struct kvm_fpu {
118 * safely without losing time or guest timer interrupts. 126 * safely without losing time or guest timer interrupts.
119 * Other: Reserved, do not change. 127 * Other: Reserved, do not change.
120 */ 128 */
121#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ 129#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
122 0x20000 | 0)
123#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001 130#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
124 131
125/* 132/*
@@ -131,15 +138,46 @@ struct kvm_fpu {
131 * emulated. 138 * emulated.
132 * Modifications to times in the future are rejected. 139 * Modifications to times in the future are rejected.
133 */ 140 */
134#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ 141#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
135 0x20000 | 1)
136/* 142/*
137 * CP0_Count rate in Hz 143 * CP0_Count rate in Hz
138 * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without 144 * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
139 * discontinuities in CP0_Count. 145 * discontinuities in CP0_Count.
140 */ 146 */
141#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ 147#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
142 0x20000 | 2) 148
149
150/*
151 * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
152 *
153 * bits[15..8] - Register subset (see definitions below).
154 * bits[7..5] - Must be zero.
155 * bits[4..0] - Register number within register subset.
156 */
157
158#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
159#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
160#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
161
162/*
163 * KVM_REG_MIPS_FPR - Floating point / Vector registers.
164 */
165#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n))
166#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n))
167#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
168
169/*
170 * KVM_REG_MIPS_FCR - Floating point control registers.
171 */
172#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0)
173#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
174
175/*
176 * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
177 */
178#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0)
179#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1)
180
143 181
144/* 182/*
145 * KVM MIPS specific structures and definitions 183 * KVM MIPS specific structures and definitions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 750d67ac41e9..e59fd7cfac9e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -167,72 +167,6 @@ void output_thread_fpu_defines(void)
167 OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); 167 OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
168 OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); 168 OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
169 169
170 /* the least significant 64 bits of each FP register */
171 OFFSET(THREAD_FPR0_LS64, task_struct,
172 thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
173 OFFSET(THREAD_FPR1_LS64, task_struct,
174 thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
175 OFFSET(THREAD_FPR2_LS64, task_struct,
176 thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
177 OFFSET(THREAD_FPR3_LS64, task_struct,
178 thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
179 OFFSET(THREAD_FPR4_LS64, task_struct,
180 thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
181 OFFSET(THREAD_FPR5_LS64, task_struct,
182 thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
183 OFFSET(THREAD_FPR6_LS64, task_struct,
184 thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
185 OFFSET(THREAD_FPR7_LS64, task_struct,
186 thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
187 OFFSET(THREAD_FPR8_LS64, task_struct,
188 thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
189 OFFSET(THREAD_FPR9_LS64, task_struct,
190 thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
191 OFFSET(THREAD_FPR10_LS64, task_struct,
192 thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
193 OFFSET(THREAD_FPR11_LS64, task_struct,
194 thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
195 OFFSET(THREAD_FPR12_LS64, task_struct,
196 thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
197 OFFSET(THREAD_FPR13_LS64, task_struct,
198 thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
199 OFFSET(THREAD_FPR14_LS64, task_struct,
200 thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
201 OFFSET(THREAD_FPR15_LS64, task_struct,
202 thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
203 OFFSET(THREAD_FPR16_LS64, task_struct,
204 thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
205 OFFSET(THREAD_FPR17_LS64, task_struct,
206 thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
207 OFFSET(THREAD_FPR18_LS64, task_struct,
208 thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
209 OFFSET(THREAD_FPR19_LS64, task_struct,
210 thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
211 OFFSET(THREAD_FPR20_LS64, task_struct,
212 thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
213 OFFSET(THREAD_FPR21_LS64, task_struct,
214 thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
215 OFFSET(THREAD_FPR22_LS64, task_struct,
216 thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
217 OFFSET(THREAD_FPR23_LS64, task_struct,
218 thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
219 OFFSET(THREAD_FPR24_LS64, task_struct,
220 thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
221 OFFSET(THREAD_FPR25_LS64, task_struct,
222 thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
223 OFFSET(THREAD_FPR26_LS64, task_struct,
224 thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
225 OFFSET(THREAD_FPR27_LS64, task_struct,
226 thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
227 OFFSET(THREAD_FPR28_LS64, task_struct,
228 thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
229 OFFSET(THREAD_FPR29_LS64, task_struct,
230 thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
231 OFFSET(THREAD_FPR30_LS64, task_struct,
232 thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
233 OFFSET(THREAD_FPR31_LS64, task_struct,
234 thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
235
236 OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); 170 OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
237 OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); 171 OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
238 BLANK(); 172 BLANK();
@@ -470,6 +404,45 @@ void output_kvm_defines(void)
470 OFFSET(VCPU_LO, kvm_vcpu_arch, lo); 404 OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
471 OFFSET(VCPU_HI, kvm_vcpu_arch, hi); 405 OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
472 OFFSET(VCPU_PC, kvm_vcpu_arch, pc); 406 OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
407 BLANK();
408
409 OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
410 OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
411 OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
412 OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
413 OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
414 OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
415 OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
416 OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
417 OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
418 OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
419 OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
420 OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
421 OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
422 OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
423 OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
424 OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
425 OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
426 OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
427 OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
428 OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
429 OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
430 OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
431 OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
432 OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
433 OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
434 OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
435 OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
436 OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
437 OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
438 OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
439 OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
440 OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
441
442 OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
443 OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
444 BLANK();
445
473 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); 446 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
474 OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); 447 OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
475 OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); 448 OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 2ebaabe3af15..af42e7003f12 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp)
360 .set mips1 360 .set mips1
361 SET_HARDFLOAT 361 SET_HARDFLOAT
362 cfc1 a1, fcr31 362 cfc1 a1, fcr31
363 li a2, ~(0x3f << 12)
364 and a2, a1
365 ctc1 a2, fcr31
366 .set pop 363 .set pop
367 TRACE_IRQS_ON 364 CLI
368 STI 365 TRACE_IRQS_OFF
366 .endm
367
368 .macro __build_clear_msa_fpe
369 _cfcmsa a1, MSA_CSR
370 CLI
371 TRACE_IRQS_OFF
369 .endm 372 .endm
370 373
371 .macro __build_clear_ade 374 .macro __build_clear_ade
@@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
426 BUILD_HANDLER cpu cpu sti silent /* #11 */ 429 BUILD_HANDLER cpu cpu sti silent /* #11 */
427 BUILD_HANDLER ov ov sti silent /* #12 */ 430 BUILD_HANDLER ov ov sti silent /* #12 */
428 BUILD_HANDLER tr tr sti silent /* #13 */ 431 BUILD_HANDLER tr tr sti silent /* #13 */
429 BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */ 432 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
430 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 433 BUILD_HANDLER fpe fpe fpe silent /* #15 */
431 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 434 BUILD_HANDLER ftlb ftlb none silent /* #16 */
432 BUILD_HANDLER msa msa sti silent /* #21 */ 435 BUILD_HANDLER msa msa sti silent /* #21 */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 510452812594..7da6e324dd35 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -46,6 +46,26 @@
46#define CREATE_TRACE_POINTS 46#define CREATE_TRACE_POINTS
47#include <trace/events/syscalls.h> 47#include <trace/events/syscalls.h>
48 48
49static void init_fp_ctx(struct task_struct *target)
50{
51 /* If FP has been used then the target already has context */
52 if (tsk_used_math(target))
53 return;
54
55 /* Begin with data registers set to all 1s... */
56 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
57
58 /* ...and FCSR zeroed */
59 target->thread.fpu.fcr31 = 0;
60
61 /*
62 * Record that the target has "used" math, such that the context
63 * just initialised, and any modifications made by the caller,
64 * aren't discarded.
65 */
66 set_stopped_child_used_math(target);
67}
68
49/* 69/*
50 * Called by kernel/ptrace.c when detaching.. 70 * Called by kernel/ptrace.c when detaching..
51 * 71 *
@@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
142 if (!access_ok(VERIFY_READ, data, 33 * 8)) 162 if (!access_ok(VERIFY_READ, data, 33 * 8))
143 return -EIO; 163 return -EIO;
144 164
165 init_fp_ctx(child);
145 fregs = get_fpu_regs(child); 166 fregs = get_fpu_regs(child);
146 167
147 for (i = 0; i < 32; i++) { 168 for (i = 0; i < 32; i++) {
@@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target,
439 460
440 /* XXX fcr31 */ 461 /* XXX fcr31 */
441 462
463 init_fp_ctx(target);
464
442 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 465 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
443 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 466 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
444 &target->thread.fpu, 467 &target->thread.fpu,
@@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request,
660 case FPR_BASE ... FPR_BASE + 31: { 683 case FPR_BASE ... FPR_BASE + 31: {
661 union fpureg *fregs = get_fpu_regs(child); 684 union fpureg *fregs = get_fpu_regs(child);
662 685
663 if (!tsk_used_math(child)) { 686 init_fp_ctx(child);
664 /* FP not yet used */
665 memset(&child->thread.fpu, ~0,
666 sizeof(child->thread.fpu));
667 child->thread.fpu.fcr31 = 0;
668 }
669#ifdef CONFIG_32BIT 687#ifdef CONFIG_32BIT
670 if (test_thread_flag(TIF_32BIT_FPREGS)) { 688 if (test_thread_flag(TIF_32BIT_FPREGS)) {
671 /* 689 /*
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 676c5030a953..1d88af26ba82 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -34,7 +34,6 @@
34 .endm 34 .endm
35 35
36 .set noreorder 36 .set noreorder
37 .set MIPS_ISA_ARCH_LEVEL_RAW
38 37
39LEAF(_save_fp_context) 38LEAF(_save_fp_context)
40 .set push 39 .set push
@@ -103,6 +102,7 @@ LEAF(_save_fp_context)
103 /* Save 32-bit process floating point context */ 102 /* Save 32-bit process floating point context */
104LEAF(_save_fp_context32) 103LEAF(_save_fp_context32)
105 .set push 104 .set push
105 .set MIPS_ISA_ARCH_LEVEL_RAW
106 SET_HARDFLOAT 106 SET_HARDFLOAT
107 cfc1 t1, fcr31 107 cfc1 t1, fcr31
108 108
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 33984c04b60b..5b4d711f878d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs)
701 701
702int process_fpemu_return(int sig, void __user *fault_addr) 702int process_fpemu_return(int sig, void __user *fault_addr)
703{ 703{
704 /*
705 * We can't allow the emulated instruction to leave any of the cause
706 * bits set in FCSR. If they were then the kernel would take an FP
707 * exception when restoring FP context.
708 */
709 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
710
704 if (sig == SIGSEGV || sig == SIGBUS) { 711 if (sig == SIGSEGV || sig == SIGBUS) {
705 struct siginfo si = {0}; 712 struct siginfo si = {0};
706 si.si_addr = fault_addr; 713 si.si_addr = fault_addr;
@@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
781 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), 788 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
782 SIGFPE) == NOTIFY_STOP) 789 SIGFPE) == NOTIFY_STOP)
783 goto out; 790 goto out;
791
792 /* Clear FCSR.Cause before enabling interrupts */
793 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
794 local_irq_enable();
795
784 die_if_kernel("FP exception in kernel code", regs); 796 die_if_kernel("FP exception in kernel code", regs);
785 797
786 if (fcr31 & FPU_CSR_UNI_X) { 798 if (fcr31 & FPU_CSR_UNI_X) {
@@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
804 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 816 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
805 &fault_addr); 817 &fault_addr);
806 818
807 /* 819 /* If something went wrong, signal */
808 * We can't allow the emulated instruction to leave any of 820 process_fpemu_return(sig, fault_addr);
809 * the cause bit set in $fcr31.
810 */
811 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
812 821
813 /* Restore the hardware register state */ 822 /* Restore the hardware register state */
814 own_fpu(1); /* Using the FPU again. */ 823 own_fpu(1); /* Using the FPU again. */
815 824
816 /* If something went wrong, signal */
817 process_fpemu_return(sig, fault_addr);
818
819 goto out; 825 goto out;
820 } else if (fcr31 & FPU_CSR_INV_X) 826 } else if (fcr31 & FPU_CSR_INV_X)
821 info.si_code = FPE_FLTINV; 827 info.si_code = FPE_FLTINV;
@@ -1392,13 +1398,22 @@ out:
1392 exception_exit(prev_state); 1398 exception_exit(prev_state);
1393} 1399}
1394 1400
1395asmlinkage void do_msa_fpe(struct pt_regs *regs) 1401asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1396{ 1402{
1397 enum ctx_state prev_state; 1403 enum ctx_state prev_state;
1398 1404
1399 prev_state = exception_enter(); 1405 prev_state = exception_enter();
1406 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1407 regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
1408 goto out;
1409
1410 /* Clear MSACSR.Cause before enabling interrupts */
1411 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1412 local_irq_enable();
1413
1400 die_if_kernel("do_msa_fpe invoked from kernel context!", regs); 1414 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1401 force_sig(SIGFPE, current); 1415 force_sig(SIGFPE, current);
1416out:
1402 exception_exit(prev_state); 1417 exception_exit(prev_state);
1403} 1418}
1404 1419
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 401fe027c261..637ebbebd549 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -1,13 +1,15 @@
1# Makefile for KVM support for MIPS 1# Makefile for KVM support for MIPS
2# 2#
3 3
4common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 4common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
5 5
6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm 6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
7 7
8kvm-objs := $(common-objs) mips.o emulate.o locore.o \ 8common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
9
10kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
9 interrupt.o stats.o commpage.o \ 11 interrupt.o stats.o commpage.o \
10 dyntrans.o trap_emul.o 12 dyntrans.o trap_emul.o fpu.o
11 13
12obj-$(CONFIG_KVM) += kvm.o 14obj-$(CONFIG_KVM) += kvm.o
13obj-y += callback.o tlb.o 15obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index fb3e8dfd1ff6..6230f376a44e 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
884 return EMULATE_DONE; 884 return EMULATE_DONE;
885} 885}
886 886
887/**
888 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
889 * @vcpu: Virtual CPU.
890 *
891 * Finds the mask of bits which are writable in the guest's Config1 CP0
892 * register, by userland (currently read-only to the guest).
893 */
894unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
895{
896 unsigned int mask = 0;
897
898 /* Permit FPU to be present if FPU is supported */
899 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
900 mask |= MIPS_CONF1_FP;
901
902 return mask;
903}
904
905/**
906 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
907 * @vcpu: Virtual CPU.
908 *
909 * Finds the mask of bits which are writable in the guest's Config3 CP0
910 * register, by userland (currently read-only to the guest).
911 */
912unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
913{
914 /* Config4 is optional */
915 unsigned int mask = MIPS_CONF_M;
916
917 /* Permit MSA to be present if MSA is supported */
918 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
919 mask |= MIPS_CONF3_MSA;
920
921 return mask;
922}
923
924/**
925 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
926 * @vcpu: Virtual CPU.
927 *
928 * Finds the mask of bits which are writable in the guest's Config4 CP0
929 * register, by userland (currently read-only to the guest).
930 */
931unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
932{
933 /* Config5 is optional */
934 return MIPS_CONF_M;
935}
936
937/**
938 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
939 * @vcpu: Virtual CPU.
940 *
941 * Finds the mask of bits which are writable in the guest's Config5 CP0
942 * register, by the guest itself.
943 */
944unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
945{
946 unsigned int mask = 0;
947
948 /* Permit MSAEn changes if MSA supported and enabled */
949 if (kvm_mips_guest_has_msa(&vcpu->arch))
950 mask |= MIPS_CONF5_MSAEN;
951
952 /*
953 * Permit guest FPU mode changes if FPU is enabled and the relevant
954 * feature exists according to FIR register.
955 */
956 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
957 if (cpu_has_fre)
958 mask |= MIPS_CONF5_FRE;
959 /* We don't support UFR or UFE */
960 }
961
962 return mask;
963}
964
887enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, 965enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
888 uint32_t cause, struct kvm_run *run, 966 uint32_t cause, struct kvm_run *run,
889 struct kvm_vcpu *vcpu) 967 struct kvm_vcpu *vcpu)
@@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
1021 kvm_mips_write_compare(vcpu, 1099 kvm_mips_write_compare(vcpu,
1022 vcpu->arch.gprs[rt]); 1100 vcpu->arch.gprs[rt]);
1023 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1101 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1024 kvm_write_c0_guest_status(cop0, 1102 unsigned int old_val, val, change;
1025 vcpu->arch.gprs[rt]); 1103
1104 old_val = kvm_read_c0_guest_status(cop0);
1105 val = vcpu->arch.gprs[rt];
1106 change = val ^ old_val;
1107
1108 /* Make sure that the NMI bit is never set */
1109 val &= ~ST0_NMI;
1110
1111 /*
1112 * Don't allow CU1 or FR to be set unless FPU
1113 * capability enabled and exists in guest
1114 * configuration.
1115 */
1116 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1117 val &= ~(ST0_CU1 | ST0_FR);
1118
1119 /*
1120 * Also don't allow FR to be set if host doesn't
1121 * support it.
1122 */
1123 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1124 val &= ~ST0_FR;
1125
1126
1127 /* Handle changes in FPU mode */
1128 preempt_disable();
1129
1130 /*
1131 * FPU and Vector register state is made
1132 * UNPREDICTABLE by a change of FR, so don't
1133 * even bother saving it.
1134 */
1135 if (change & ST0_FR)
1136 kvm_drop_fpu(vcpu);
1137
1138 /*
1139 * If MSA state is already live, it is undefined
1140 * how it interacts with FR=0 FPU state, and we
1141 * don't want to hit reserved instruction
1142 * exceptions trying to save the MSA state later
1143 * when CU=1 && FR=1, so play it safe and save
1144 * it first.
1145 */
1146 if (change & ST0_CU1 && !(val & ST0_FR) &&
1147 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1148 kvm_lose_fpu(vcpu);
1149
1026 /* 1150 /*
1027 * Make sure that CU1 and NMI bits are 1151 * Propagate CU1 (FPU enable) changes
1028 * never set 1152 * immediately if the FPU context is already
1153 * loaded. When disabling we leave the context
1154 * loaded so it can be quickly enabled again in
1155 * the near future.
1029 */ 1156 */
1030 kvm_clear_c0_guest_status(cop0, 1157 if (change & ST0_CU1 &&
1031 (ST0_CU1 | ST0_NMI)); 1158 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1159 change_c0_status(ST0_CU1, val);
1160
1161 preempt_enable();
1162
1163 kvm_write_c0_guest_status(cop0, val);
1032 1164
1033#ifdef CONFIG_KVM_MIPS_DYN_TRANS 1165#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1034 kvm_mips_trans_mtc0(inst, opc, vcpu); 1166 /*
1167 * If FPU present, we need CU1/FR bits to take
1168 * effect fairly soon.
1169 */
1170 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1171 kvm_mips_trans_mtc0(inst, opc, vcpu);
1035#endif 1172#endif
1173 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1174 unsigned int old_val, val, change, wrmask;
1175
1176 old_val = kvm_read_c0_guest_config5(cop0);
1177 val = vcpu->arch.gprs[rt];
1178
1179 /* Only a few bits are writable in Config5 */
1180 wrmask = kvm_mips_config5_wrmask(vcpu);
1181 change = (val ^ old_val) & wrmask;
1182 val = old_val ^ change;
1183
1184
1185 /* Handle changes in FPU/MSA modes */
1186 preempt_disable();
1187
1188 /*
1189 * Propagate FRE changes immediately if the FPU
1190 * context is already loaded.
1191 */
1192 if (change & MIPS_CONF5_FRE &&
1193 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1194 change_c0_config5(MIPS_CONF5_FRE, val);
1195
1196 /*
1197 * Propagate MSAEn changes immediately if the
1198 * MSA context is already loaded. When disabling
1199 * we leave the context loaded so it can be
1200 * quickly enabled again in the near future.
1201 */
1202 if (change & MIPS_CONF5_MSAEN &&
1203 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1204 change_c0_config5(MIPS_CONF5_MSAEN,
1205 val);
1206
1207 preempt_enable();
1208
1209 kvm_write_c0_guest_config5(cop0, val);
1036 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1210 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1037 uint32_t old_cause, new_cause; 1211 uint32_t old_cause, new_cause;
1038 1212
@@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
1970 return er; 2144 return er;
1971} 2145}
1972 2146
2147enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
2148 uint32_t *opc,
2149 struct kvm_run *run,
2150 struct kvm_vcpu *vcpu)
2151{
2152 struct mips_coproc *cop0 = vcpu->arch.cop0;
2153 struct kvm_vcpu_arch *arch = &vcpu->arch;
2154 enum emulation_result er = EMULATE_DONE;
2155
2156 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2157 /* save old pc */
2158 kvm_write_c0_guest_epc(cop0, arch->pc);
2159 kvm_set_c0_guest_status(cop0, ST0_EXL);
2160
2161 if (cause & CAUSEF_BD)
2162 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2163 else
2164 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2165
2166 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2167
2168 kvm_change_c0_guest_cause(cop0, (0xff),
2169 (T_TRAP << CAUSEB_EXCCODE));
2170
2171 /* Set PC to the exception entry point */
2172 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2173
2174 } else {
2175 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2176 er = EMULATE_FAIL;
2177 }
2178
2179 return er;
2180}
2181
2182enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
2183 uint32_t *opc,
2184 struct kvm_run *run,
2185 struct kvm_vcpu *vcpu)
2186{
2187 struct mips_coproc *cop0 = vcpu->arch.cop0;
2188 struct kvm_vcpu_arch *arch = &vcpu->arch;
2189 enum emulation_result er = EMULATE_DONE;
2190
2191 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2192 /* save old pc */
2193 kvm_write_c0_guest_epc(cop0, arch->pc);
2194 kvm_set_c0_guest_status(cop0, ST0_EXL);
2195
2196 if (cause & CAUSEF_BD)
2197 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2198 else
2199 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2200
2201 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2202
2203 kvm_change_c0_guest_cause(cop0, (0xff),
2204 (T_MSAFPE << CAUSEB_EXCCODE));
2205
2206 /* Set PC to the exception entry point */
2207 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2208
2209 } else {
2210 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2211 er = EMULATE_FAIL;
2212 }
2213
2214 return er;
2215}
2216
2217enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
2218 uint32_t *opc,
2219 struct kvm_run *run,
2220 struct kvm_vcpu *vcpu)
2221{
2222 struct mips_coproc *cop0 = vcpu->arch.cop0;
2223 struct kvm_vcpu_arch *arch = &vcpu->arch;
2224 enum emulation_result er = EMULATE_DONE;
2225
2226 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2227 /* save old pc */
2228 kvm_write_c0_guest_epc(cop0, arch->pc);
2229 kvm_set_c0_guest_status(cop0, ST0_EXL);
2230
2231 if (cause & CAUSEF_BD)
2232 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2233 else
2234 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2235
2236 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2237
2238 kvm_change_c0_guest_cause(cop0, (0xff),
2239 (T_FPE << CAUSEB_EXCCODE));
2240
2241 /* Set PC to the exception entry point */
2242 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2243
2244 } else {
2245 kvm_err("Trying to deliver FPE when EXL is already set\n");
2246 er = EMULATE_FAIL;
2247 }
2248
2249 return er;
2250}
2251
2252enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
2253 uint32_t *opc,
2254 struct kvm_run *run,
2255 struct kvm_vcpu *vcpu)
2256{
2257 struct mips_coproc *cop0 = vcpu->arch.cop0;
2258 struct kvm_vcpu_arch *arch = &vcpu->arch;
2259 enum emulation_result er = EMULATE_DONE;
2260
2261 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2262 /* save old pc */
2263 kvm_write_c0_guest_epc(cop0, arch->pc);
2264 kvm_set_c0_guest_status(cop0, ST0_EXL);
2265
2266 if (cause & CAUSEF_BD)
2267 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2268 else
2269 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2270
2271 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2272
2273 kvm_change_c0_guest_cause(cop0, (0xff),
2274 (T_MSADIS << CAUSEB_EXCCODE));
2275
2276 /* Set PC to the exception entry point */
2277 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2278
2279 } else {
2280 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2281 er = EMULATE_FAIL;
2282 }
2283
2284 return er;
2285}
2286
1973/* ll/sc, rdhwr, sync emulation */ 2287/* ll/sc, rdhwr, sync emulation */
1974 2288
1975#define OPCODE 0xfc000000 2289#define OPCODE 0xfc000000
@@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2176 case T_SYSCALL: 2490 case T_SYSCALL:
2177 case T_BREAK: 2491 case T_BREAK:
2178 case T_RES_INST: 2492 case T_RES_INST:
2493 case T_TRAP:
2494 case T_MSAFPE:
2495 case T_FPE:
2496 case T_MSADIS:
2179 break; 2497 break;
2180 2498
2181 case T_COP_UNUSABLE: 2499 case T_COP_UNUSABLE:
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
new file mode 100644
index 000000000000..531fbf5131c0
--- /dev/null
+++ b/arch/mips/kvm/fpu.S
@@ -0,0 +1,122 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * FPU context handling code for KVM.
7 *
8 * Copyright (C) 2015 Imagination Technologies Ltd.
9 */
10
11#include <asm/asm.h>
12#include <asm/asm-offsets.h>
13#include <asm/fpregdef.h>
14#include <asm/mipsregs.h>
15#include <asm/regdef.h>
16
17 .set noreorder
18 .set noat
19
20LEAF(__kvm_save_fpu)
21 .set push
22 .set mips64r2
23 SET_HARDFLOAT
24 mfc0 t0, CP0_STATUS
25 sll t0, t0, 5 # is Status.FR set?
26 bgez t0, 1f # no: skip odd doubles
27 nop
28 sdc1 $f1, VCPU_FPR1(a0)
29 sdc1 $f3, VCPU_FPR3(a0)
30 sdc1 $f5, VCPU_FPR5(a0)
31 sdc1 $f7, VCPU_FPR7(a0)
32 sdc1 $f9, VCPU_FPR9(a0)
33 sdc1 $f11, VCPU_FPR11(a0)
34 sdc1 $f13, VCPU_FPR13(a0)
35 sdc1 $f15, VCPU_FPR15(a0)
36 sdc1 $f17, VCPU_FPR17(a0)
37 sdc1 $f19, VCPU_FPR19(a0)
38 sdc1 $f21, VCPU_FPR21(a0)
39 sdc1 $f23, VCPU_FPR23(a0)
40 sdc1 $f25, VCPU_FPR25(a0)
41 sdc1 $f27, VCPU_FPR27(a0)
42 sdc1 $f29, VCPU_FPR29(a0)
43 sdc1 $f31, VCPU_FPR31(a0)
441: sdc1 $f0, VCPU_FPR0(a0)
45 sdc1 $f2, VCPU_FPR2(a0)
46 sdc1 $f4, VCPU_FPR4(a0)
47 sdc1 $f6, VCPU_FPR6(a0)
48 sdc1 $f8, VCPU_FPR8(a0)
49 sdc1 $f10, VCPU_FPR10(a0)
50 sdc1 $f12, VCPU_FPR12(a0)
51 sdc1 $f14, VCPU_FPR14(a0)
52 sdc1 $f16, VCPU_FPR16(a0)
53 sdc1 $f18, VCPU_FPR18(a0)
54 sdc1 $f20, VCPU_FPR20(a0)
55 sdc1 $f22, VCPU_FPR22(a0)
56 sdc1 $f24, VCPU_FPR24(a0)
57 sdc1 $f26, VCPU_FPR26(a0)
58 sdc1 $f28, VCPU_FPR28(a0)
59 jr ra
60 sdc1 $f30, VCPU_FPR30(a0)
61 .set pop
62 END(__kvm_save_fpu)
63
64LEAF(__kvm_restore_fpu)
65 .set push
66 .set mips64r2
67 SET_HARDFLOAT
68 mfc0 t0, CP0_STATUS
69 sll t0, t0, 5 # is Status.FR set?
70 bgez t0, 1f # no: skip odd doubles
71 nop
72 ldc1 $f1, VCPU_FPR1(a0)
73 ldc1 $f3, VCPU_FPR3(a0)
74 ldc1 $f5, VCPU_FPR5(a0)
75 ldc1 $f7, VCPU_FPR7(a0)
76 ldc1 $f9, VCPU_FPR9(a0)
77 ldc1 $f11, VCPU_FPR11(a0)
78 ldc1 $f13, VCPU_FPR13(a0)
79 ldc1 $f15, VCPU_FPR15(a0)
80 ldc1 $f17, VCPU_FPR17(a0)
81 ldc1 $f19, VCPU_FPR19(a0)
82 ldc1 $f21, VCPU_FPR21(a0)
83 ldc1 $f23, VCPU_FPR23(a0)
84 ldc1 $f25, VCPU_FPR25(a0)
85 ldc1 $f27, VCPU_FPR27(a0)
86 ldc1 $f29, VCPU_FPR29(a0)
87 ldc1 $f31, VCPU_FPR31(a0)
881: ldc1 $f0, VCPU_FPR0(a0)
89 ldc1 $f2, VCPU_FPR2(a0)
90 ldc1 $f4, VCPU_FPR4(a0)
91 ldc1 $f6, VCPU_FPR6(a0)
92 ldc1 $f8, VCPU_FPR8(a0)
93 ldc1 $f10, VCPU_FPR10(a0)
94 ldc1 $f12, VCPU_FPR12(a0)
95 ldc1 $f14, VCPU_FPR14(a0)
96 ldc1 $f16, VCPU_FPR16(a0)
97 ldc1 $f18, VCPU_FPR18(a0)
98 ldc1 $f20, VCPU_FPR20(a0)
99 ldc1 $f22, VCPU_FPR22(a0)
100 ldc1 $f24, VCPU_FPR24(a0)
101 ldc1 $f26, VCPU_FPR26(a0)
102 ldc1 $f28, VCPU_FPR28(a0)
103 jr ra
104 ldc1 $f30, VCPU_FPR30(a0)
105 .set pop
106 END(__kvm_restore_fpu)
107
108LEAF(__kvm_restore_fcsr)
109 .set push
110 SET_HARDFLOAT
111 lw t0, VCPU_FCR31(a0)
112 /*
113 * The ctc1 must stay at this offset in __kvm_restore_fcsr.
114 * See kvm_mips_csr_die_notify() which handles t0 containing a value
115 * which triggers an FP Exception, which must be stepped over and
116 * ignored since the set cause bits must remain there for the guest.
117 */
118 ctc1 t0, fcr31
119 jr ra
120 nop
121 .set pop
122 END(__kvm_restore_fcsr)
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 4a68b176d6e4..c567240386a0 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -36,6 +36,8 @@
36#define PT_HOST_USERLOCAL PT_EPC 36#define PT_HOST_USERLOCAL PT_EPC
37 37
38#define CP0_DDATA_LO $28,3 38#define CP0_DDATA_LO $28,3
39#define CP0_CONFIG3 $16,3
40#define CP0_CONFIG5 $16,5
39#define CP0_EBASE $15,1 41#define CP0_EBASE $15,1
40 42
41#define CP0_INTCTL $12,1 43#define CP0_INTCTL $12,1
@@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
353 LONG_L k0, VCPU_HOST_EBASE(k1) 355 LONG_L k0, VCPU_HOST_EBASE(k1)
354 mtc0 k0,CP0_EBASE 356 mtc0 k0,CP0_EBASE
355 357
358 /*
359 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
360 * trigger FPE for pending exceptions.
361 */
362 .set at
363 and v1, v0, ST0_CU1
364 beqz v1, 1f
365 nop
366 .set push
367 SET_HARDFLOAT
368 cfc1 t0, fcr31
369 sw t0, VCPU_FCR31(k1)
370 ctc1 zero,fcr31
371 .set pop
372 .set noat
3731:
374
375#ifdef CONFIG_CPU_HAS_MSA
376 /*
377 * If MSA is enabled, save MSACSR and clear it so that later
378 * instructions don't trigger MSAFPE for pending exceptions.
379 */
380 mfc0 t0, CP0_CONFIG3
381 ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
382 beqz t0, 1f
383 nop
384 mfc0 t0, CP0_CONFIG5
385 ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
386 beqz t0, 1f
387 nop
388 _cfcmsa t0, MSA_CSR
389 sw t0, VCPU_MSA_CSR(k1)
390 _ctcmsa MSA_CSR, zero
3911:
392#endif
393
356 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 394 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
357 .set at 395 .set at
358 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) 396 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index c9eccf5df912..bb68e8d520e8 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/kdebug.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
@@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
48 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, 49 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
49 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, 50 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
50 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, 51 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
52 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
53 { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
54 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
55 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
51 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, 56 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
52 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, 57 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
53 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, 58 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
@@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = {
504 KVM_REG_MIPS_CP0_STATUS, 509 KVM_REG_MIPS_CP0_STATUS,
505 KVM_REG_MIPS_CP0_CAUSE, 510 KVM_REG_MIPS_CP0_CAUSE,
506 KVM_REG_MIPS_CP0_EPC, 511 KVM_REG_MIPS_CP0_EPC,
512 KVM_REG_MIPS_CP0_PRID,
507 KVM_REG_MIPS_CP0_CONFIG, 513 KVM_REG_MIPS_CP0_CONFIG,
508 KVM_REG_MIPS_CP0_CONFIG1, 514 KVM_REG_MIPS_CP0_CONFIG1,
509 KVM_REG_MIPS_CP0_CONFIG2, 515 KVM_REG_MIPS_CP0_CONFIG2,
510 KVM_REG_MIPS_CP0_CONFIG3, 516 KVM_REG_MIPS_CP0_CONFIG3,
517 KVM_REG_MIPS_CP0_CONFIG4,
518 KVM_REG_MIPS_CP0_CONFIG5,
511 KVM_REG_MIPS_CP0_CONFIG7, 519 KVM_REG_MIPS_CP0_CONFIG7,
512 KVM_REG_MIPS_CP0_ERROREPC, 520 KVM_REG_MIPS_CP0_ERROREPC,
513 521
@@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
520 const struct kvm_one_reg *reg) 528 const struct kvm_one_reg *reg)
521{ 529{
522 struct mips_coproc *cop0 = vcpu->arch.cop0; 530 struct mips_coproc *cop0 = vcpu->arch.cop0;
531 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
523 int ret; 532 int ret;
524 s64 v; 533 s64 v;
534 s64 vs[2];
535 unsigned int idx;
525 536
526 switch (reg->id) { 537 switch (reg->id) {
538 /* General purpose registers */
527 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: 539 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
528 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; 540 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
529 break; 541 break;
@@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
537 v = (long)vcpu->arch.pc; 549 v = (long)vcpu->arch.pc;
538 break; 550 break;
539 551
552 /* Floating point registers */
553 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
554 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
555 return -EINVAL;
556 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
557 /* Odd singles in top of even double when FR=0 */
558 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
559 v = get_fpr32(&fpu->fpr[idx], 0);
560 else
561 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
562 break;
563 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
564 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
565 return -EINVAL;
566 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
567 /* Can't access odd doubles in FR=0 mode */
568 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
569 return -EINVAL;
570 v = get_fpr64(&fpu->fpr[idx], 0);
571 break;
572 case KVM_REG_MIPS_FCR_IR:
573 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
574 return -EINVAL;
575 v = boot_cpu_data.fpu_id;
576 break;
577 case KVM_REG_MIPS_FCR_CSR:
578 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
579 return -EINVAL;
580 v = fpu->fcr31;
581 break;
582
583 /* MIPS SIMD Architecture (MSA) registers */
584 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
585 if (!kvm_mips_guest_has_msa(&vcpu->arch))
586 return -EINVAL;
587 /* Can't access MSA registers in FR=0 mode */
588 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
589 return -EINVAL;
590 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
591#ifdef CONFIG_CPU_LITTLE_ENDIAN
592 /* least significant byte first */
593 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
594 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
595#else
596 /* most significant byte first */
597 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
598 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
599#endif
600 break;
601 case KVM_REG_MIPS_MSA_IR:
602 if (!kvm_mips_guest_has_msa(&vcpu->arch))
603 return -EINVAL;
604 v = boot_cpu_data.msa_id;
605 break;
606 case KVM_REG_MIPS_MSA_CSR:
607 if (!kvm_mips_guest_has_msa(&vcpu->arch))
608 return -EINVAL;
609 v = fpu->msacsr;
610 break;
611
612 /* Co-processor 0 registers */
540 case KVM_REG_MIPS_CP0_INDEX: 613 case KVM_REG_MIPS_CP0_INDEX:
541 v = (long)kvm_read_c0_guest_index(cop0); 614 v = (long)kvm_read_c0_guest_index(cop0);
542 break; 615 break;
@@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
573 case KVM_REG_MIPS_CP0_EPC: 646 case KVM_REG_MIPS_CP0_EPC:
574 v = (long)kvm_read_c0_guest_epc(cop0); 647 v = (long)kvm_read_c0_guest_epc(cop0);
575 break; 648 break;
576 case KVM_REG_MIPS_CP0_ERROREPC: 649 case KVM_REG_MIPS_CP0_PRID:
577 v = (long)kvm_read_c0_guest_errorepc(cop0); 650 v = (long)kvm_read_c0_guest_prid(cop0);
578 break; 651 break;
579 case KVM_REG_MIPS_CP0_CONFIG: 652 case KVM_REG_MIPS_CP0_CONFIG:
580 v = (long)kvm_read_c0_guest_config(cop0); 653 v = (long)kvm_read_c0_guest_config(cop0);
@@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
588 case KVM_REG_MIPS_CP0_CONFIG3: 661 case KVM_REG_MIPS_CP0_CONFIG3:
589 v = (long)kvm_read_c0_guest_config3(cop0); 662 v = (long)kvm_read_c0_guest_config3(cop0);
590 break; 663 break;
664 case KVM_REG_MIPS_CP0_CONFIG4:
665 v = (long)kvm_read_c0_guest_config4(cop0);
666 break;
667 case KVM_REG_MIPS_CP0_CONFIG5:
668 v = (long)kvm_read_c0_guest_config5(cop0);
669 break;
591 case KVM_REG_MIPS_CP0_CONFIG7: 670 case KVM_REG_MIPS_CP0_CONFIG7:
592 v = (long)kvm_read_c0_guest_config7(cop0); 671 v = (long)kvm_read_c0_guest_config7(cop0);
593 break; 672 break;
673 case KVM_REG_MIPS_CP0_ERROREPC:
674 v = (long)kvm_read_c0_guest_errorepc(cop0);
675 break;
594 /* registers to be handled specially */ 676 /* registers to be handled specially */
595 case KVM_REG_MIPS_CP0_COUNT: 677 case KVM_REG_MIPS_CP0_COUNT:
596 case KVM_REG_MIPS_COUNT_CTL: 678 case KVM_REG_MIPS_COUNT_CTL:
@@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
612 u32 v32 = (u32)v; 694 u32 v32 = (u32)v;
613 695
614 return put_user(v32, uaddr32); 696 return put_user(v32, uaddr32);
697 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
698 void __user *uaddr = (void __user *)(long)reg->addr;
699
700 return copy_to_user(uaddr, vs, 16);
615 } else { 701 } else {
616 return -EINVAL; 702 return -EINVAL;
617 } 703 }
@@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
621 const struct kvm_one_reg *reg) 707 const struct kvm_one_reg *reg)
622{ 708{
623 struct mips_coproc *cop0 = vcpu->arch.cop0; 709 struct mips_coproc *cop0 = vcpu->arch.cop0;
624 u64 v; 710 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
711 s64 v;
712 s64 vs[2];
713 unsigned int idx;
625 714
626 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 715 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
627 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 716 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
@@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
635 if (get_user(v32, uaddr32) != 0) 724 if (get_user(v32, uaddr32) != 0)
636 return -EFAULT; 725 return -EFAULT;
637 v = (s64)v32; 726 v = (s64)v32;
727 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
728 void __user *uaddr = (void __user *)(long)reg->addr;
729
730 return copy_from_user(vs, uaddr, 16);
638 } else { 731 } else {
639 return -EINVAL; 732 return -EINVAL;
640 } 733 }
641 734
642 switch (reg->id) { 735 switch (reg->id) {
736 /* General purpose registers */
643 case KVM_REG_MIPS_R0: 737 case KVM_REG_MIPS_R0:
644 /* Silently ignore requests to set $0 */ 738 /* Silently ignore requests to set $0 */
645 break; 739 break;
@@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
656 vcpu->arch.pc = v; 750 vcpu->arch.pc = v;
657 break; 751 break;
658 752
753 /* Floating point registers */
754 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
755 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
756 return -EINVAL;
757 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
758 /* Odd singles in top of even double when FR=0 */
759 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
760 set_fpr32(&fpu->fpr[idx], 0, v);
761 else
762 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
763 break;
764 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
765 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
766 return -EINVAL;
767 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
768 /* Can't access odd doubles in FR=0 mode */
769 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
770 return -EINVAL;
771 set_fpr64(&fpu->fpr[idx], 0, v);
772 break;
773 case KVM_REG_MIPS_FCR_IR:
774 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
775 return -EINVAL;
776 /* Read-only */
777 break;
778 case KVM_REG_MIPS_FCR_CSR:
779 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
780 return -EINVAL;
781 fpu->fcr31 = v;
782 break;
783
784 /* MIPS SIMD Architecture (MSA) registers */
785 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
786 if (!kvm_mips_guest_has_msa(&vcpu->arch))
787 return -EINVAL;
788 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
789#ifdef CONFIG_CPU_LITTLE_ENDIAN
790 /* least significant byte first */
791 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
792 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
793#else
794 /* most significant byte first */
795 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
796 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
797#endif
798 break;
799 case KVM_REG_MIPS_MSA_IR:
800 if (!kvm_mips_guest_has_msa(&vcpu->arch))
801 return -EINVAL;
802 /* Read-only */
803 break;
804 case KVM_REG_MIPS_MSA_CSR:
805 if (!kvm_mips_guest_has_msa(&vcpu->arch))
806 return -EINVAL;
807 fpu->msacsr = v;
808 break;
809
810 /* Co-processor 0 registers */
659 case KVM_REG_MIPS_CP0_INDEX: 811 case KVM_REG_MIPS_CP0_INDEX:
660 kvm_write_c0_guest_index(cop0, v); 812 kvm_write_c0_guest_index(cop0, v);
661 break; 813 break;
@@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
686 case KVM_REG_MIPS_CP0_EPC: 838 case KVM_REG_MIPS_CP0_EPC:
687 kvm_write_c0_guest_epc(cop0, v); 839 kvm_write_c0_guest_epc(cop0, v);
688 break; 840 break;
841 case KVM_REG_MIPS_CP0_PRID:
842 kvm_write_c0_guest_prid(cop0, v);
843 break;
689 case KVM_REG_MIPS_CP0_ERROREPC: 844 case KVM_REG_MIPS_CP0_ERROREPC:
690 kvm_write_c0_guest_errorepc(cop0, v); 845 kvm_write_c0_guest_errorepc(cop0, v);
691 break; 846 break;
@@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
693 case KVM_REG_MIPS_CP0_COUNT: 848 case KVM_REG_MIPS_CP0_COUNT:
694 case KVM_REG_MIPS_CP0_COMPARE: 849 case KVM_REG_MIPS_CP0_COMPARE:
695 case KVM_REG_MIPS_CP0_CAUSE: 850 case KVM_REG_MIPS_CP0_CAUSE:
851 case KVM_REG_MIPS_CP0_CONFIG:
852 case KVM_REG_MIPS_CP0_CONFIG1:
853 case KVM_REG_MIPS_CP0_CONFIG2:
854 case KVM_REG_MIPS_CP0_CONFIG3:
855 case KVM_REG_MIPS_CP0_CONFIG4:
856 case KVM_REG_MIPS_CP0_CONFIG5:
696 case KVM_REG_MIPS_COUNT_CTL: 857 case KVM_REG_MIPS_COUNT_CTL:
697 case KVM_REG_MIPS_COUNT_RESUME: 858 case KVM_REG_MIPS_COUNT_RESUME:
698 case KVM_REG_MIPS_COUNT_HZ: 859 case KVM_REG_MIPS_COUNT_HZ:
@@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
703 return 0; 864 return 0;
704} 865}
705 866
867static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
868 struct kvm_enable_cap *cap)
869{
870 int r = 0;
871
872 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
873 return -EINVAL;
874 if (cap->flags)
875 return -EINVAL;
876 if (cap->args[0])
877 return -EINVAL;
878
879 switch (cap->cap) {
880 case KVM_CAP_MIPS_FPU:
881 vcpu->arch.fpu_enabled = true;
882 break;
883 case KVM_CAP_MIPS_MSA:
884 vcpu->arch.msa_enabled = true;
885 break;
886 default:
887 r = -EINVAL;
888 break;
889 }
890
891 return r;
892}
893
706long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, 894long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
707 unsigned long arg) 895 unsigned long arg)
708{ 896{
@@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
760 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 948 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
761 break; 949 break;
762 } 950 }
951 case KVM_ENABLE_CAP: {
952 struct kvm_enable_cap cap;
953
954 r = -EFAULT;
955 if (copy_from_user(&cap, argp, sizeof(cap)))
956 goto out;
957 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
958 break;
959 }
763 default: 960 default:
764 r = -ENOIOCTLCMD; 961 r = -ENOIOCTLCMD;
765 } 962 }
@@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
868 1065
869 switch (ext) { 1066 switch (ext) {
870 case KVM_CAP_ONE_REG: 1067 case KVM_CAP_ONE_REG:
1068 case KVM_CAP_ENABLE_CAP:
871 r = 1; 1069 r = 1;
872 break; 1070 break;
873 case KVM_CAP_COALESCED_MMIO: 1071 case KVM_CAP_COALESCED_MMIO:
874 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 1072 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
875 break; 1073 break;
1074 case KVM_CAP_MIPS_FPU:
1075 r = !!cpu_has_fpu;
1076 break;
1077 case KVM_CAP_MIPS_MSA:
1078 /*
1079 * We don't support MSA vector partitioning yet:
1080 * 1) It would require explicit support which can't be tested
1081 * yet due to lack of support in current hardware.
1082 * 2) It extends the state that would need to be saved/restored
1083 * by e.g. QEMU for migration.
1084 *
1085 * When vector partitioning hardware becomes available, support
1086 * could be added by requiring a flag when enabling
1087 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1088 * to save/restore the appropriate extra state.
1089 */
1090 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1091 break;
876 default: 1092 default:
877 r = 0; 1093 r = 0;
878 break; 1094 break;
@@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1119 ret = kvm_mips_callbacks->handle_break(vcpu); 1335 ret = kvm_mips_callbacks->handle_break(vcpu);
1120 break; 1336 break;
1121 1337
1338 case T_TRAP:
1339 ++vcpu->stat.trap_inst_exits;
1340 trace_kvm_exit(vcpu, TRAP_INST_EXITS);
1341 ret = kvm_mips_callbacks->handle_trap(vcpu);
1342 break;
1343
1344 case T_MSAFPE:
1345 ++vcpu->stat.msa_fpe_exits;
1346 trace_kvm_exit(vcpu, MSA_FPE_EXITS);
1347 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1348 break;
1349
1350 case T_FPE:
1351 ++vcpu->stat.fpe_exits;
1352 trace_kvm_exit(vcpu, FPE_EXITS);
1353 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1354 break;
1355
1356 case T_MSADIS:
1357 ++vcpu->stat.msa_disabled_exits;
1358 trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
1359 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1360 break;
1361
1122 default: 1362 default:
1123 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", 1363 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1124 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, 1364 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
@@ -1146,12 +1386,233 @@ skip_emul:
1146 } 1386 }
1147 } 1387 }
1148 1388
1389 if (ret == RESUME_GUEST) {
1390 /*
1391 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1392 * is live), restore FCR31 / MSACSR.
1393 *
1394 * This should be before returning to the guest exception
1395 * vector, as it may well cause an [MSA] FP exception if there
1396 * are pending exception bits unmasked. (see
1397 * kvm_mips_csr_die_notifier() for how that is handled).
1398 */
1399 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1400 read_c0_status() & ST0_CU1)
1401 __kvm_restore_fcsr(&vcpu->arch);
1402
1403 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1404 read_c0_config5() & MIPS_CONF5_MSAEN)
1405 __kvm_restore_msacsr(&vcpu->arch);
1406 }
1407
1149 /* Disable HTW before returning to guest or host */ 1408 /* Disable HTW before returning to guest or host */
1150 htw_stop(); 1409 htw_stop();
1151 1410
1152 return ret; 1411 return ret;
1153} 1412}
1154 1413
1414/* Enable FPU for guest and restore context */
1415void kvm_own_fpu(struct kvm_vcpu *vcpu)
1416{
1417 struct mips_coproc *cop0 = vcpu->arch.cop0;
1418 unsigned int sr, cfg5;
1419
1420 preempt_disable();
1421
1422 sr = kvm_read_c0_guest_status(cop0);
1423
1424 /*
1425 * If MSA state is already live, it is undefined how it interacts with
1426 * FR=0 FPU state, and we don't want to hit reserved instruction
1427 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1428 * play it safe and save it first.
1429 *
1430 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1431 * get called when guest CU1 is set, however we can't trust the guest
1432 * not to clobber the status register directly via the commpage.
1433 */
1434 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1435 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1436 kvm_lose_fpu(vcpu);
1437
1438 /*
1439 * Enable FPU for guest
1440 * We set FR and FRE according to guest context
1441 */
1442 change_c0_status(ST0_CU1 | ST0_FR, sr);
1443 if (cpu_has_fre) {
1444 cfg5 = kvm_read_c0_guest_config5(cop0);
1445 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1446 }
1447 enable_fpu_hazard();
1448
1449 /* If guest FPU state not active, restore it now */
1450 if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
1451 __kvm_restore_fpu(&vcpu->arch);
1452 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1453 }
1454
1455 preempt_enable();
1456}
1457
1458#ifdef CONFIG_CPU_HAS_MSA
1459/* Enable MSA for guest and restore context */
1460void kvm_own_msa(struct kvm_vcpu *vcpu)
1461{
1462 struct mips_coproc *cop0 = vcpu->arch.cop0;
1463 unsigned int sr, cfg5;
1464
1465 preempt_disable();
1466
1467 /*
1468 * Enable FPU if enabled in guest, since we're restoring FPU context
1469 * anyway. We set FR and FRE according to guest context.
1470 */
1471 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1472 sr = kvm_read_c0_guest_status(cop0);
1473
1474 /*
1475 * If FR=0 FPU state is already live, it is undefined how it
1476 * interacts with MSA state, so play it safe and save it first.
1477 */
1478 if (!(sr & ST0_FR) &&
1479 (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
1480 KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
1481 kvm_lose_fpu(vcpu);
1482
1483 change_c0_status(ST0_CU1 | ST0_FR, sr);
1484 if (sr & ST0_CU1 && cpu_has_fre) {
1485 cfg5 = kvm_read_c0_guest_config5(cop0);
1486 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1487 }
1488 }
1489
1490 /* Enable MSA for guest */
1491 set_c0_config5(MIPS_CONF5_MSAEN);
1492 enable_fpu_hazard();
1493
1494 switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
1495 case KVM_MIPS_FPU_FPU:
1496 /*
1497 * Guest FPU state already loaded, only restore upper MSA state
1498 */
1499 __kvm_restore_msa_upper(&vcpu->arch);
1500 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1501 break;
1502 case 0:
1503 /* Neither FPU or MSA already active, restore full MSA state */
1504 __kvm_restore_msa(&vcpu->arch);
1505 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1506 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1507 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1508 break;
1509 default:
1510 break;
1511 }
1512
1513 preempt_enable();
1514}
1515#endif
1516
1517/* Drop FPU & MSA without saving it */
1518void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1519{
1520 preempt_disable();
1521 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1522 disable_msa();
1523 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
1524 }
1525 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1526 clear_c0_status(ST0_CU1 | ST0_FR);
1527 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
1528 }
1529 preempt_enable();
1530}
1531
1532/* Save and disable FPU & MSA */
1533void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1534{
1535 /*
1536 * FPU & MSA get disabled in root context (hardware) when it is disabled
1537 * in guest context (software), but the register state in the hardware
1538 * may still be in use. This is why we explicitly re-enable the hardware
1539 * before saving.
1540 */
1541
1542 preempt_disable();
1543 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1544 set_c0_config5(MIPS_CONF5_MSAEN);
1545 enable_fpu_hazard();
1546
1547 __kvm_save_msa(&vcpu->arch);
1548
1549 /* Disable MSA & FPU */
1550 disable_msa();
1551 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1552 clear_c0_status(ST0_CU1 | ST0_FR);
1553 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
1554 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1555 set_c0_status(ST0_CU1);
1556 enable_fpu_hazard();
1557
1558 __kvm_save_fpu(&vcpu->arch);
1559 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
1560
1561 /* Disable FPU */
1562 clear_c0_status(ST0_CU1 | ST0_FR);
1563 }
1564 preempt_enable();
1565}
1566
1567/*
1568 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1569 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1570 * exception if cause bits are set in the value being written.
1571 */
1572static int kvm_mips_csr_die_notify(struct notifier_block *self,
1573 unsigned long cmd, void *ptr)
1574{
1575 struct die_args *args = (struct die_args *)ptr;
1576 struct pt_regs *regs = args->regs;
1577 unsigned long pc;
1578
1579 /* Only interested in FPE and MSAFPE */
1580 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1581 return NOTIFY_DONE;
1582
1583 /* Return immediately if guest context isn't active */
1584 if (!(current->flags & PF_VCPU))
1585 return NOTIFY_DONE;
1586
1587 /* Should never get here from user mode */
1588 BUG_ON(user_mode(regs));
1589
1590 pc = instruction_pointer(regs);
1591 switch (cmd) {
1592 case DIE_FP:
1593 /* match 2nd instruction in __kvm_restore_fcsr */
1594 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1595 return NOTIFY_DONE;
1596 break;
1597 case DIE_MSAFP:
1598 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1599 if (!cpu_has_msa ||
1600 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1601 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1602 return NOTIFY_DONE;
1603 break;
1604 }
1605
1606 /* Move PC forward a little and continue executing */
1607 instruction_pointer(regs) += 4;
1608
1609 return NOTIFY_STOP;
1610}
1611
1612static struct notifier_block kvm_mips_csr_die_notifier = {
1613 .notifier_call = kvm_mips_csr_die_notify,
1614};
1615
1155int __init kvm_mips_init(void) 1616int __init kvm_mips_init(void)
1156{ 1617{
1157 int ret; 1618 int ret;
@@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void)
1161 if (ret) 1622 if (ret)
1162 return ret; 1623 return ret;
1163 1624
1625 register_die_notifier(&kvm_mips_csr_die_notifier);
1626
1164 /* 1627 /*
1165 * On MIPS, kernel modules are executed from "mapped space", which 1628 * On MIPS, kernel modules are executed from "mapped space", which
1166 * requires TLBs. The TLB handling code is statically linked with 1629 * requires TLBs. The TLB handling code is statically linked with
@@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void)
1173 kvm_mips_release_pfn_clean = kvm_release_pfn_clean; 1636 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1174 kvm_mips_is_error_pfn = is_error_pfn; 1637 kvm_mips_is_error_pfn = is_error_pfn;
1175 1638
1176 pr_info("KVM/MIPS Initialized\n");
1177 return 0; 1639 return 0;
1178} 1640}
1179 1641
@@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void)
1185 kvm_mips_release_pfn_clean = NULL; 1647 kvm_mips_release_pfn_clean = NULL;
1186 kvm_mips_is_error_pfn = NULL; 1648 kvm_mips_is_error_pfn = NULL;
1187 1649
1188 pr_info("KVM/MIPS unloaded\n"); 1650 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1189} 1651}
1190 1652
1191module_init(kvm_mips_init); 1653module_init(kvm_mips_init);
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S
new file mode 100644
index 000000000000..d02f0c6cc2cc
--- /dev/null
+++ b/arch/mips/kvm/msa.S
@@ -0,0 +1,161 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * MIPS SIMD Architecture (MSA) context handling code for KVM.
7 *
8 * Copyright (C) 2015 Imagination Technologies Ltd.
9 */
10
11#include <asm/asm.h>
12#include <asm/asm-offsets.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15
16 .set noreorder
17 .set noat
18
19LEAF(__kvm_save_msa)
20 st_d 0, VCPU_FPR0, a0
21 st_d 1, VCPU_FPR1, a0
22 st_d 2, VCPU_FPR2, a0
23 st_d 3, VCPU_FPR3, a0
24 st_d 4, VCPU_FPR4, a0
25 st_d 5, VCPU_FPR5, a0
26 st_d 6, VCPU_FPR6, a0
27 st_d 7, VCPU_FPR7, a0
28 st_d 8, VCPU_FPR8, a0
29 st_d 9, VCPU_FPR9, a0
30 st_d 10, VCPU_FPR10, a0
31 st_d 11, VCPU_FPR11, a0
32 st_d 12, VCPU_FPR12, a0
33 st_d 13, VCPU_FPR13, a0
34 st_d 14, VCPU_FPR14, a0
35 st_d 15, VCPU_FPR15, a0
36 st_d 16, VCPU_FPR16, a0
37 st_d 17, VCPU_FPR17, a0
38 st_d 18, VCPU_FPR18, a0
39 st_d 19, VCPU_FPR19, a0
40 st_d 20, VCPU_FPR20, a0
41 st_d 21, VCPU_FPR21, a0
42 st_d 22, VCPU_FPR22, a0
43 st_d 23, VCPU_FPR23, a0
44 st_d 24, VCPU_FPR24, a0
45 st_d 25, VCPU_FPR25, a0
46 st_d 26, VCPU_FPR26, a0
47 st_d 27, VCPU_FPR27, a0
48 st_d 28, VCPU_FPR28, a0
49 st_d 29, VCPU_FPR29, a0
50 st_d 30, VCPU_FPR30, a0
51 st_d 31, VCPU_FPR31, a0
52 jr ra
53 nop
54 END(__kvm_save_msa)
55
56LEAF(__kvm_restore_msa)
57 ld_d 0, VCPU_FPR0, a0
58 ld_d 1, VCPU_FPR1, a0
59 ld_d 2, VCPU_FPR2, a0
60 ld_d 3, VCPU_FPR3, a0
61 ld_d 4, VCPU_FPR4, a0
62 ld_d 5, VCPU_FPR5, a0
63 ld_d 6, VCPU_FPR6, a0
64 ld_d 7, VCPU_FPR7, a0
65 ld_d 8, VCPU_FPR8, a0
66 ld_d 9, VCPU_FPR9, a0
67 ld_d 10, VCPU_FPR10, a0
68 ld_d 11, VCPU_FPR11, a0
69 ld_d 12, VCPU_FPR12, a0
70 ld_d 13, VCPU_FPR13, a0
71 ld_d 14, VCPU_FPR14, a0
72 ld_d 15, VCPU_FPR15, a0
73 ld_d 16, VCPU_FPR16, a0
74 ld_d 17, VCPU_FPR17, a0
75 ld_d 18, VCPU_FPR18, a0
76 ld_d 19, VCPU_FPR19, a0
77 ld_d 20, VCPU_FPR20, a0
78 ld_d 21, VCPU_FPR21, a0
79 ld_d 22, VCPU_FPR22, a0
80 ld_d 23, VCPU_FPR23, a0
81 ld_d 24, VCPU_FPR24, a0
82 ld_d 25, VCPU_FPR25, a0
83 ld_d 26, VCPU_FPR26, a0
84 ld_d 27, VCPU_FPR27, a0
85 ld_d 28, VCPU_FPR28, a0
86 ld_d 29, VCPU_FPR29, a0
87 ld_d 30, VCPU_FPR30, a0
88 ld_d 31, VCPU_FPR31, a0
89 jr ra
90 nop
91 END(__kvm_restore_msa)
92
93 .macro kvm_restore_msa_upper wr, off, base
94 .set push
95 .set noat
96#ifdef CONFIG_64BIT
97 ld $1, \off(\base)
98 insert_d \wr, 1
99#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
100 lw $1, \off(\base)
101 insert_w \wr, 2
102 lw $1, (\off+4)(\base)
103 insert_w \wr, 3
104#else /* CONFIG_CPU_BIG_ENDIAN */
105 lw $1, (\off+4)(\base)
106 insert_w \wr, 2
107 lw $1, \off(\base)
108 insert_w \wr, 3
109#endif
110 .set pop
111 .endm
112
113LEAF(__kvm_restore_msa_upper)
114 kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
115 kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
116 kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
117 kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
118 kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
119 kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
120 kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
121 kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
122 kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
123 kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
124 kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
125 kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
126 kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
127 kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
128 kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
129 kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
130 kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
131 kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
132 kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
133 kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
134 kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
135 kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
136 kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
137 kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
138 kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
139 kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
140 kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
141 kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
142 kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
143 kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
144 kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
145 kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
146 jr ra
147 nop
148 END(__kvm_restore_msa_upper)
149
150LEAF(__kvm_restore_msacsr)
151 lw t0, VCPU_MSA_CSR(a0)
152 /*
153 * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
154 * See kvm_mips_csr_die_notify() which handles t0 containing a value
155 * which triggers an MSA FP Exception, which must be stepped over and
156 * ignored since the set cause bits must remain there for the guest.
157 */
158 _ctcmsa MSA_CSR, t0
159 jr ra
160 nop
161 END(__kvm_restore_msacsr)
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
index a74d6024c5ad..888bb67070ac 100644
--- a/arch/mips/kvm/stats.c
+++ b/arch/mips/kvm/stats.c
@@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
25 "System Call", 25 "System Call",
26 "Reserved Inst", 26 "Reserved Inst",
27 "Break Inst", 27 "Break Inst",
28 "Trap Inst",
29 "MSA FPE",
30 "FPE",
31 "MSA Disabled",
28 "D-Cache Flushes", 32 "D-Cache Flushes",
29}; 33};
30 34
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index b6beb0e07b1b..aed0ac2a4972 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
733 } 733 }
734 } 734 }
735 735
736 /* restore guest state to registers */
737 kvm_mips_callbacks->vcpu_set_regs(vcpu);
738
736 local_irq_restore(flags); 739 local_irq_restore(flags);
737 740
738} 741}
@@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
751 vcpu->arch.preempt_entryhi = read_c0_entryhi(); 754 vcpu->arch.preempt_entryhi = read_c0_entryhi();
752 vcpu->arch.last_sched_cpu = cpu; 755 vcpu->arch.last_sched_cpu = cpu;
753 756
757 /* save guest state in registers */
758 kvm_mips_callbacks->vcpu_get_regs(vcpu);
759
754 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & 760 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
755 ASID_VERSION_MASK)) { 761 ASID_VERSION_MASK)) {
756 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, 762 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index fd7257b70e65..d836ed5b0bc7 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
39 39
40static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) 40static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
41{ 41{
42 struct mips_coproc *cop0 = vcpu->arch.cop0;
42 struct kvm_run *run = vcpu->run; 43 struct kvm_run *run = vcpu->run;
43 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; 44 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
44 unsigned long cause = vcpu->arch.host_cp0_cause; 45 unsigned long cause = vcpu->arch.host_cp0_cause;
45 enum emulation_result er = EMULATE_DONE; 46 enum emulation_result er = EMULATE_DONE;
46 int ret = RESUME_GUEST; 47 int ret = RESUME_GUEST;
47 48
48 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) 49 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
49 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); 50 /* FPU Unusable */
50 else 51 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
52 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
53 /*
54 * Unusable/no FPU in guest:
55 * deliver guest COP1 Unusable Exception
56 */
57 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
58 } else {
59 /* Restore FPU state */
60 kvm_own_fpu(vcpu);
61 er = EMULATE_DONE;
62 }
63 } else {
51 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); 64 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
65 }
52 66
53 switch (er) { 67 switch (er) {
54 case EMULATE_DONE: 68 case EMULATE_DONE:
@@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
330 return ret; 344 return ret;
331} 345}
332 346
347static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
348{
349 struct kvm_run *run = vcpu->run;
350 uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
351 unsigned long cause = vcpu->arch.host_cp0_cause;
352 enum emulation_result er = EMULATE_DONE;
353 int ret = RESUME_GUEST;
354
355 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
356 if (er == EMULATE_DONE) {
357 ret = RESUME_GUEST;
358 } else {
359 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
360 ret = RESUME_HOST;
361 }
362 return ret;
363}
364
365static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
366{
367 struct kvm_run *run = vcpu->run;
368 uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
369 unsigned long cause = vcpu->arch.host_cp0_cause;
370 enum emulation_result er = EMULATE_DONE;
371 int ret = RESUME_GUEST;
372
373 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
374 if (er == EMULATE_DONE) {
375 ret = RESUME_GUEST;
376 } else {
377 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
378 ret = RESUME_HOST;
379 }
380 return ret;
381}
382
383static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
384{
385 struct kvm_run *run = vcpu->run;
386 uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
387 unsigned long cause = vcpu->arch.host_cp0_cause;
388 enum emulation_result er = EMULATE_DONE;
389 int ret = RESUME_GUEST;
390
391 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
392 if (er == EMULATE_DONE) {
393 ret = RESUME_GUEST;
394 } else {
395 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
396 ret = RESUME_HOST;
397 }
398 return ret;
399}
400
401/**
402 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
403 * @vcpu: Virtual CPU context.
404 *
405 * Handle when the guest attempts to use MSA when it is disabled.
406 */
407static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
408{
409 struct mips_coproc *cop0 = vcpu->arch.cop0;
410 struct kvm_run *run = vcpu->run;
411 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
412 unsigned long cause = vcpu->arch.host_cp0_cause;
413 enum emulation_result er = EMULATE_DONE;
414 int ret = RESUME_GUEST;
415
416 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
417 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
418 /*
419 * No MSA in guest, or FPU enabled and not in FR=1 mode,
420 * guest reserved instruction exception
421 */
422 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
423 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
424 /* MSA disabled by guest, guest MSA disabled exception */
425 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
426 } else {
427 /* Restore MSA/FPU state */
428 kvm_own_msa(vcpu);
429 er = EMULATE_DONE;
430 }
431
432 switch (er) {
433 case EMULATE_DONE:
434 ret = RESUME_GUEST;
435 break;
436
437 case EMULATE_FAIL:
438 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
439 ret = RESUME_HOST;
440 break;
441
442 default:
443 BUG();
444 }
445 return ret;
446}
447
333static int kvm_trap_emul_vm_init(struct kvm *kvm) 448static int kvm_trap_emul_vm_init(struct kvm *kvm)
334{ 449{
335 return 0; 450 return 0;
@@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
351 * guest will come up as expected, for now we simulate a MIPS 24kc 466 * guest will come up as expected, for now we simulate a MIPS 24kc
352 */ 467 */
353 kvm_write_c0_guest_prid(cop0, 0x00019300); 468 kvm_write_c0_guest_prid(cop0, 0x00019300);
354 kvm_write_c0_guest_config(cop0, 469 /* Have config1, Cacheable, noncoherent, write-back, write allocate */
355 MIPS_CONFIG0 | (0x1 << CP0C0_AR) | 470 kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
471 (0x1 << CP0C0_AR) |
356 (MMU_TYPE_R4000 << CP0C0_MT)); 472 (MMU_TYPE_R4000 << CP0C0_MT));
357 473
358 /* Read the cache characteristics from the host Config1 Register */ 474 /* Read the cache characteristics from the host Config1 Register */
@@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
368 (1 << CP0C1_WR) | (1 << CP0C1_CA)); 484 (1 << CP0C1_WR) | (1 << CP0C1_CA));
369 kvm_write_c0_guest_config1(cop0, config1); 485 kvm_write_c0_guest_config1(cop0, config1);
370 486
371 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); 487 /* Have config3, no tertiary/secondary caches implemented */
372 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ 488 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
373 kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) | 489 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
374 (1 << CP0C3_ULRI)); 490
491 /* Have config4, UserLocal */
492 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
493
494 /* Have config5 */
495 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
496
497 /* No config6 */
498 kvm_write_c0_guest_config5(cop0, 0);
375 499
376 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ 500 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
377 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); 501 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
@@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
416{ 540{
417 struct mips_coproc *cop0 = vcpu->arch.cop0; 541 struct mips_coproc *cop0 = vcpu->arch.cop0;
418 int ret = 0; 542 int ret = 0;
543 unsigned int cur, change;
419 544
420 switch (reg->id) { 545 switch (reg->id) {
421 case KVM_REG_MIPS_CP0_COUNT: 546 case KVM_REG_MIPS_CP0_COUNT:
@@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
444 kvm_write_c0_guest_cause(cop0, v); 569 kvm_write_c0_guest_cause(cop0, v);
445 } 570 }
446 break; 571 break;
572 case KVM_REG_MIPS_CP0_CONFIG:
573 /* read-only for now */
574 break;
575 case KVM_REG_MIPS_CP0_CONFIG1:
576 cur = kvm_read_c0_guest_config1(cop0);
577 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
578 if (change) {
579 v = cur ^ change;
580 kvm_write_c0_guest_config1(cop0, v);
581 }
582 break;
583 case KVM_REG_MIPS_CP0_CONFIG2:
584 /* read-only for now */
585 break;
586 case KVM_REG_MIPS_CP0_CONFIG3:
587 cur = kvm_read_c0_guest_config3(cop0);
588 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
589 if (change) {
590 v = cur ^ change;
591 kvm_write_c0_guest_config3(cop0, v);
592 }
593 break;
594 case KVM_REG_MIPS_CP0_CONFIG4:
595 cur = kvm_read_c0_guest_config4(cop0);
596 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
597 if (change) {
598 v = cur ^ change;
599 kvm_write_c0_guest_config4(cop0, v);
600 }
601 break;
602 case KVM_REG_MIPS_CP0_CONFIG5:
603 cur = kvm_read_c0_guest_config5(cop0);
604 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
605 if (change) {
606 v = cur ^ change;
607 kvm_write_c0_guest_config5(cop0, v);
608 }
609 break;
447 case KVM_REG_MIPS_COUNT_CTL: 610 case KVM_REG_MIPS_COUNT_CTL:
448 ret = kvm_mips_set_count_ctl(vcpu, v); 611 ret = kvm_mips_set_count_ctl(vcpu, v);
449 break; 612 break;
@@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
459 return ret; 622 return ret;
460} 623}
461 624
625static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
626{
627 kvm_lose_fpu(vcpu);
628
629 return 0;
630}
631
632static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
633{
634 return 0;
635}
636
462static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { 637static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
463 /* exit handlers */ 638 /* exit handlers */
464 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, 639 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
@@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
470 .handle_syscall = kvm_trap_emul_handle_syscall, 645 .handle_syscall = kvm_trap_emul_handle_syscall,
471 .handle_res_inst = kvm_trap_emul_handle_res_inst, 646 .handle_res_inst = kvm_trap_emul_handle_res_inst,
472 .handle_break = kvm_trap_emul_handle_break, 647 .handle_break = kvm_trap_emul_handle_break,
648 .handle_trap = kvm_trap_emul_handle_trap,
649 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
650 .handle_fpe = kvm_trap_emul_handle_fpe,
651 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
473 652
474 .vm_init = kvm_trap_emul_vm_init, 653 .vm_init = kvm_trap_emul_vm_init,
475 .vcpu_init = kvm_trap_emul_vcpu_init, 654 .vcpu_init = kvm_trap_emul_vcpu_init,
@@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
483 .irq_clear = kvm_mips_irq_clear_cb, 662 .irq_clear = kvm_mips_irq_clear_cb,
484 .get_one_reg = kvm_trap_emul_get_one_reg, 663 .get_one_reg = kvm_trap_emul_get_one_reg,
485 .set_one_reg = kvm_trap_emul_set_one_reg, 664 .set_one_reg = kvm_trap_emul_set_one_reg,
665 .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
666 .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
486}; 667};
487 668
488int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 669int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)