diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 21:15:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 21:15:06 -0400 |
commit | ae7a835cc546fc67df90edaaa0c48ae2b22a29fe (patch) | |
tree | b1235437fde066ab0f272f164d75dc1b98a244cf /arch/mips/kvm | |
parent | cf39c8e5352b4fb9efedfe7e9acb566a85ed847c (diff) | |
parent | 6b9e4fa07443f5baf5bbd7ab043abd6976f8d7bc (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Gleb Natapov:
"The highlights of the release are nested EPT and pv-ticketlocks
support (hypervisor part, guest part, which is most of the code, goes
through tip tree). Apart of that there are many fixes for all arches"
Fix up semantic conflicts as discussed in the pull request thread..
* 'next' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (88 commits)
ARM: KVM: Add newlines to panic strings
ARM: KVM: Work around older compiler bug
ARM: KVM: Simplify tracepoint text
ARM: KVM: Fix kvm_set_pte assignment
ARM: KVM: vgic: Bump VGIC_NR_IRQS to 256
ARM: KVM: Bugfix: vgic_bytemap_get_reg per cpu regs
ARM: KVM: vgic: fix GICD_ICFGRn access
ARM: KVM: vgic: simplify vgic_get_target_reg
KVM: MMU: remove unused parameter
KVM: PPC: Book3S PR: Rework kvmppc_mmu_book3s_64_xlate()
KVM: PPC: Book3S PR: Make instruction fetch fallback work for system calls
KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX
KVM: x86: update masterclock when kvmclock_offset is calculated (v2)
KVM: PPC: Book3S: Fix compile error in XICS emulation
KVM: PPC: Book3S PR: return appropriate error when allocation fails
arch: powerpc: kvm: add signed type cast for comparation
KVM: x86: add comments where MMIO does not return to the emulator
KVM: vmx: count exits to userspace during invalid guest emulation
KVM: rename __kvm_io_bus_sort_cmp to kvm_io_bus_cmp
kvm: optimize away THP checks in kvm_is_mmio_pfn()
...
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r-- | arch/mips/kvm/kvm_locore.S | 969 | ||||
-rw-r--r-- | arch/mips/kvm/kvm_mips.c | 4 |
2 files changed, 486 insertions, 487 deletions
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S index dca2aa665993..bbace092ad0a 100644 --- a/arch/mips/kvm/kvm_locore.S +++ b/arch/mips/kvm/kvm_locore.S | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Main entry point for the guest, exception handling. | 6 | * Main entry point for the guest, exception handling. |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <asm/asm.h> | 12 | #include <asm/asm.h> |
13 | #include <asm/asmmacro.h> | 13 | #include <asm/asmmacro.h> |
@@ -55,195 +55,193 @@ | |||
55 | * a0: run | 55 | * a0: run |
56 | * a1: vcpu | 56 | * a1: vcpu |
57 | */ | 57 | */ |
58 | .set noreorder | ||
59 | .set noat | ||
58 | 60 | ||
59 | FEXPORT(__kvm_mips_vcpu_run) | 61 | FEXPORT(__kvm_mips_vcpu_run) |
60 | .set push | 62 | /* k0/k1 not being used in host kernel context */ |
61 | .set noreorder | 63 | INT_ADDIU k1, sp, -PT_SIZE |
62 | .set noat | 64 | LONG_S $0, PT_R0(k1) |
63 | 65 | LONG_S $1, PT_R1(k1) | |
64 | /* k0/k1 not being used in host kernel context */ | 66 | LONG_S $2, PT_R2(k1) |
65 | addiu k1,sp, -PT_SIZE | 67 | LONG_S $3, PT_R3(k1) |
66 | LONG_S $0, PT_R0(k1) | 68 | |
67 | LONG_S $1, PT_R1(k1) | 69 | LONG_S $4, PT_R4(k1) |
68 | LONG_S $2, PT_R2(k1) | 70 | LONG_S $5, PT_R5(k1) |
69 | LONG_S $3, PT_R3(k1) | 71 | LONG_S $6, PT_R6(k1) |
70 | 72 | LONG_S $7, PT_R7(k1) | |
71 | LONG_S $4, PT_R4(k1) | 73 | |
72 | LONG_S $5, PT_R5(k1) | 74 | LONG_S $8, PT_R8(k1) |
73 | LONG_S $6, PT_R6(k1) | 75 | LONG_S $9, PT_R9(k1) |
74 | LONG_S $7, PT_R7(k1) | 76 | LONG_S $10, PT_R10(k1) |
75 | 77 | LONG_S $11, PT_R11(k1) | |
76 | LONG_S $8, PT_R8(k1) | 78 | LONG_S $12, PT_R12(k1) |
77 | LONG_S $9, PT_R9(k1) | 79 | LONG_S $13, PT_R13(k1) |
78 | LONG_S $10, PT_R10(k1) | 80 | LONG_S $14, PT_R14(k1) |
79 | LONG_S $11, PT_R11(k1) | 81 | LONG_S $15, PT_R15(k1) |
80 | LONG_S $12, PT_R12(k1) | 82 | LONG_S $16, PT_R16(k1) |
81 | LONG_S $13, PT_R13(k1) | 83 | LONG_S $17, PT_R17(k1) |
82 | LONG_S $14, PT_R14(k1) | 84 | |
83 | LONG_S $15, PT_R15(k1) | 85 | LONG_S $18, PT_R18(k1) |
84 | LONG_S $16, PT_R16(k1) | 86 | LONG_S $19, PT_R19(k1) |
85 | LONG_S $17, PT_R17(k1) | 87 | LONG_S $20, PT_R20(k1) |
86 | 88 | LONG_S $21, PT_R21(k1) | |
87 | LONG_S $18, PT_R18(k1) | 89 | LONG_S $22, PT_R22(k1) |
88 | LONG_S $19, PT_R19(k1) | 90 | LONG_S $23, PT_R23(k1) |
89 | LONG_S $20, PT_R20(k1) | 91 | LONG_S $24, PT_R24(k1) |
90 | LONG_S $21, PT_R21(k1) | 92 | LONG_S $25, PT_R25(k1) |
91 | LONG_S $22, PT_R22(k1) | ||
92 | LONG_S $23, PT_R23(k1) | ||
93 | LONG_S $24, PT_R24(k1) | ||
94 | LONG_S $25, PT_R25(k1) | ||
95 | 93 | ||
96 | /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ | 94 | /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ |
97 | 95 | ||
98 | LONG_S $28, PT_R28(k1) | 96 | LONG_S $28, PT_R28(k1) |
99 | LONG_S $29, PT_R29(k1) | 97 | LONG_S $29, PT_R29(k1) |
100 | LONG_S $30, PT_R30(k1) | 98 | LONG_S $30, PT_R30(k1) |
101 | LONG_S $31, PT_R31(k1) | 99 | LONG_S $31, PT_R31(k1) |
102 | 100 | ||
103 | /* Save hi/lo */ | 101 | /* Save hi/lo */ |
104 | mflo v0 | 102 | mflo v0 |
105 | LONG_S v0, PT_LO(k1) | 103 | LONG_S v0, PT_LO(k1) |
106 | mfhi v1 | 104 | mfhi v1 |
107 | LONG_S v1, PT_HI(k1) | 105 | LONG_S v1, PT_HI(k1) |
108 | 106 | ||
109 | /* Save host status */ | 107 | /* Save host status */ |
110 | mfc0 v0, CP0_STATUS | 108 | mfc0 v0, CP0_STATUS |
111 | LONG_S v0, PT_STATUS(k1) | 109 | LONG_S v0, PT_STATUS(k1) |
112 | 110 | ||
113 | /* Save host ASID, shove it into the BVADDR location */ | 111 | /* Save host ASID, shove it into the BVADDR location */ |
114 | mfc0 v1,CP0_ENTRYHI | 112 | mfc0 v1, CP0_ENTRYHI |
115 | andi v1, 0xff | 113 | andi v1, 0xff |
116 | LONG_S v1, PT_HOST_ASID(k1) | 114 | LONG_S v1, PT_HOST_ASID(k1) |
117 | 115 | ||
118 | /* Save DDATA_LO, will be used to store pointer to vcpu */ | 116 | /* Save DDATA_LO, will be used to store pointer to vcpu */ |
119 | mfc0 v1, CP0_DDATA_LO | 117 | mfc0 v1, CP0_DDATA_LO |
120 | LONG_S v1, PT_HOST_USERLOCAL(k1) | 118 | LONG_S v1, PT_HOST_USERLOCAL(k1) |
121 | 119 | ||
122 | /* DDATA_LO has pointer to vcpu */ | 120 | /* DDATA_LO has pointer to vcpu */ |
123 | mtc0 a1,CP0_DDATA_LO | 121 | mtc0 a1, CP0_DDATA_LO |
124 | 122 | ||
125 | /* Offset into vcpu->arch */ | 123 | /* Offset into vcpu->arch */ |
126 | addiu k1, a1, VCPU_HOST_ARCH | 124 | INT_ADDIU k1, a1, VCPU_HOST_ARCH |
127 | 125 | ||
128 | /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */ | 126 | /* |
129 | LONG_S sp, VCPU_HOST_STACK(k1) | 127 | * Save the host stack to VCPU, used for exception processing |
128 | * when we exit from the Guest | ||
129 | */ | ||
130 | LONG_S sp, VCPU_HOST_STACK(k1) | ||
130 | 131 | ||
131 | /* Save the kernel gp as well */ | 132 | /* Save the kernel gp as well */ |
132 | LONG_S gp, VCPU_HOST_GP(k1) | 133 | LONG_S gp, VCPU_HOST_GP(k1) |
133 | 134 | ||
134 | /* Setup status register for running the guest in UM, interrupts are disabled */ | 135 | /* Setup status register for running the guest in UM, interrupts are disabled */ |
135 | li k0,(ST0_EXL | KSU_USER| ST0_BEV) | 136 | li k0, (ST0_EXL | KSU_USER | ST0_BEV) |
136 | mtc0 k0,CP0_STATUS | 137 | mtc0 k0, CP0_STATUS |
137 | ehb | 138 | ehb |
138 | 139 | ||
139 | /* load up the new EBASE */ | 140 | /* load up the new EBASE */ |
140 | LONG_L k0, VCPU_GUEST_EBASE(k1) | 141 | LONG_L k0, VCPU_GUEST_EBASE(k1) |
141 | mtc0 k0,CP0_EBASE | 142 | mtc0 k0, CP0_EBASE |
142 | 143 | ||
143 | /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was | 144 | /* |
144 | * but make sure that timer interrupts are enabled | 145 | * Now that the new EBASE has been loaded, unset BEV, set |
145 | */ | 146 | * interrupt mask as it was but make sure that timer interrupts |
146 | li k0,(ST0_EXL | KSU_USER | ST0_IE) | 147 | * are enabled |
147 | andi v0, v0, ST0_IM | 148 | */ |
148 | or k0, k0, v0 | 149 | li k0, (ST0_EXL | KSU_USER | ST0_IE) |
149 | mtc0 k0,CP0_STATUS | 150 | andi v0, v0, ST0_IM |
150 | ehb | 151 | or k0, k0, v0 |
152 | mtc0 k0, CP0_STATUS | ||
153 | ehb | ||
151 | 154 | ||
152 | 155 | ||
153 | /* Set Guest EPC */ | 156 | /* Set Guest EPC */ |
154 | LONG_L t0, VCPU_PC(k1) | 157 | LONG_L t0, VCPU_PC(k1) |
155 | mtc0 t0, CP0_EPC | 158 | mtc0 t0, CP0_EPC |
156 | 159 | ||
157 | FEXPORT(__kvm_mips_load_asid) | 160 | FEXPORT(__kvm_mips_load_asid) |
158 | /* Set the ASID for the Guest Kernel */ | 161 | /* Set the ASID for the Guest Kernel */ |
159 | sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ | 162 | INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ |
160 | /* addresses shift to 0x80000000 */ | 163 | /* addresses shift to 0x80000000 */ |
161 | bltz t0, 1f /* If kernel */ | 164 | bltz t0, 1f /* If kernel */ |
162 | addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 165 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
163 | addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 166 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
164 | 1: | 167 | 1: |
165 | /* t1: contains the base of the ASID array, need to get the cpu id */ | 168 | /* t1: contains the base of the ASID array, need to get the cpu id */ |
166 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ | 169 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ |
167 | sll t2, t2, 2 /* x4 */ | 170 | INT_SLL t2, t2, 2 /* x4 */ |
168 | addu t3, t1, t2 | 171 | REG_ADDU t3, t1, t2 |
169 | LONG_L k0, (t3) | 172 | LONG_L k0, (t3) |
170 | andi k0, k0, 0xff | 173 | andi k0, k0, 0xff |
171 | mtc0 k0,CP0_ENTRYHI | 174 | mtc0 k0, CP0_ENTRYHI |
172 | ehb | 175 | ehb |
173 | 176 | ||
174 | /* Disable RDHWR access */ | 177 | /* Disable RDHWR access */ |
175 | mtc0 zero, CP0_HWRENA | 178 | mtc0 zero, CP0_HWRENA |
176 | 179 | ||
177 | /* Now load up the Guest Context from VCPU */ | 180 | /* Now load up the Guest Context from VCPU */ |
178 | LONG_L $1, VCPU_R1(k1) | 181 | LONG_L $1, VCPU_R1(k1) |
179 | LONG_L $2, VCPU_R2(k1) | 182 | LONG_L $2, VCPU_R2(k1) |
180 | LONG_L $3, VCPU_R3(k1) | 183 | LONG_L $3, VCPU_R3(k1) |
181 | 184 | ||
182 | LONG_L $4, VCPU_R4(k1) | 185 | LONG_L $4, VCPU_R4(k1) |
183 | LONG_L $5, VCPU_R5(k1) | 186 | LONG_L $5, VCPU_R5(k1) |
184 | LONG_L $6, VCPU_R6(k1) | 187 | LONG_L $6, VCPU_R6(k1) |
185 | LONG_L $7, VCPU_R7(k1) | 188 | LONG_L $7, VCPU_R7(k1) |
186 | 189 | ||
187 | LONG_L $8, VCPU_R8(k1) | 190 | LONG_L $8, VCPU_R8(k1) |
188 | LONG_L $9, VCPU_R9(k1) | 191 | LONG_L $9, VCPU_R9(k1) |
189 | LONG_L $10, VCPU_R10(k1) | 192 | LONG_L $10, VCPU_R10(k1) |
190 | LONG_L $11, VCPU_R11(k1) | 193 | LONG_L $11, VCPU_R11(k1) |
191 | LONG_L $12, VCPU_R12(k1) | 194 | LONG_L $12, VCPU_R12(k1) |
192 | LONG_L $13, VCPU_R13(k1) | 195 | LONG_L $13, VCPU_R13(k1) |
193 | LONG_L $14, VCPU_R14(k1) | 196 | LONG_L $14, VCPU_R14(k1) |
194 | LONG_L $15, VCPU_R15(k1) | 197 | LONG_L $15, VCPU_R15(k1) |
195 | LONG_L $16, VCPU_R16(k1) | 198 | LONG_L $16, VCPU_R16(k1) |
196 | LONG_L $17, VCPU_R17(k1) | 199 | LONG_L $17, VCPU_R17(k1) |
197 | LONG_L $18, VCPU_R18(k1) | 200 | LONG_L $18, VCPU_R18(k1) |
198 | LONG_L $19, VCPU_R19(k1) | 201 | LONG_L $19, VCPU_R19(k1) |
199 | LONG_L $20, VCPU_R20(k1) | 202 | LONG_L $20, VCPU_R20(k1) |
200 | LONG_L $21, VCPU_R21(k1) | 203 | LONG_L $21, VCPU_R21(k1) |
201 | LONG_L $22, VCPU_R22(k1) | 204 | LONG_L $22, VCPU_R22(k1) |
202 | LONG_L $23, VCPU_R23(k1) | 205 | LONG_L $23, VCPU_R23(k1) |
203 | LONG_L $24, VCPU_R24(k1) | 206 | LONG_L $24, VCPU_R24(k1) |
204 | LONG_L $25, VCPU_R25(k1) | 207 | LONG_L $25, VCPU_R25(k1) |
205 | 208 | ||
206 | /* k0/k1 loaded up later */ | 209 | /* k0/k1 loaded up later */ |
207 | 210 | ||
208 | LONG_L $28, VCPU_R28(k1) | 211 | LONG_L $28, VCPU_R28(k1) |
209 | LONG_L $29, VCPU_R29(k1) | 212 | LONG_L $29, VCPU_R29(k1) |
210 | LONG_L $30, VCPU_R30(k1) | 213 | LONG_L $30, VCPU_R30(k1) |
211 | LONG_L $31, VCPU_R31(k1) | 214 | LONG_L $31, VCPU_R31(k1) |
212 | 215 | ||
213 | /* Restore hi/lo */ | 216 | /* Restore hi/lo */ |
214 | LONG_L k0, VCPU_LO(k1) | 217 | LONG_L k0, VCPU_LO(k1) |
215 | mtlo k0 | 218 | mtlo k0 |
216 | 219 | ||
217 | LONG_L k0, VCPU_HI(k1) | 220 | LONG_L k0, VCPU_HI(k1) |
218 | mthi k0 | 221 | mthi k0 |
219 | 222 | ||
220 | FEXPORT(__kvm_mips_load_k0k1) | 223 | FEXPORT(__kvm_mips_load_k0k1) |
221 | /* Restore the guest's k0/k1 registers */ | 224 | /* Restore the guest's k0/k1 registers */ |
222 | LONG_L k0, VCPU_R26(k1) | 225 | LONG_L k0, VCPU_R26(k1) |
223 | LONG_L k1, VCPU_R27(k1) | 226 | LONG_L k1, VCPU_R27(k1) |
224 | 227 | ||
225 | /* Jump to guest */ | 228 | /* Jump to guest */ |
226 | eret | 229 | eret |
227 | .set pop | ||
228 | 230 | ||
229 | VECTOR(MIPSX(exception), unknown) | 231 | VECTOR(MIPSX(exception), unknown) |
230 | /* | 232 | /* |
231 | * Find out what mode we came from and jump to the proper handler. | 233 | * Find out what mode we came from and jump to the proper handler. |
232 | */ | 234 | */ |
233 | .set push | 235 | mtc0 k0, CP0_ERROREPC #01: Save guest k0 |
234 | .set noat | 236 | ehb #02: |
235 | .set noreorder | 237 | |
236 | mtc0 k0, CP0_ERROREPC #01: Save guest k0 | 238 | mfc0 k0, CP0_EBASE #02: Get EBASE |
237 | ehb #02: | 239 | INT_SRL k0, k0, 10 #03: Get rid of CPUNum |
238 | 240 | INT_SLL k0, k0, 10 #04 | |
239 | mfc0 k0, CP0_EBASE #02: Get EBASE | 241 | LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 |
240 | srl k0, k0, 10 #03: Get rid of CPUNum | 242 | INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 |
241 | sll k0, k0, 10 #04 | 243 | j k0 #07: jump to the function |
242 | LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 | 244 | nop #08: branch delay slot |
243 | addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 | ||
244 | j k0 #07: jump to the function | ||
245 | nop #08: branch delay slot | ||
246 | .set push | ||
247 | VECTOR_END(MIPSX(exceptionEnd)) | 245 | VECTOR_END(MIPSX(exceptionEnd)) |
248 | .end MIPSX(exception) | 246 | .end MIPSX(exception) |
249 | 247 | ||
@@ -253,329 +251,327 @@ VECTOR_END(MIPSX(exceptionEnd)) | |||
253 | * | 251 | * |
254 | */ | 252 | */ |
255 | NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | 253 | NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) |
256 | .set push | 254 | /* Get the VCPU pointer from DDTATA_LO */ |
257 | .set noat | 255 | mfc0 k1, CP0_DDATA_LO |
258 | .set noreorder | 256 | INT_ADDIU k1, k1, VCPU_HOST_ARCH |
259 | 257 | ||
260 | /* Get the VCPU pointer from DDTATA_LO */ | 258 | /* Start saving Guest context to VCPU */ |
261 | mfc0 k1, CP0_DDATA_LO | 259 | LONG_S $0, VCPU_R0(k1) |
262 | addiu k1, k1, VCPU_HOST_ARCH | 260 | LONG_S $1, VCPU_R1(k1) |
263 | 261 | LONG_S $2, VCPU_R2(k1) | |
264 | /* Start saving Guest context to VCPU */ | 262 | LONG_S $3, VCPU_R3(k1) |
265 | LONG_S $0, VCPU_R0(k1) | 263 | LONG_S $4, VCPU_R4(k1) |
266 | LONG_S $1, VCPU_R1(k1) | 264 | LONG_S $5, VCPU_R5(k1) |
267 | LONG_S $2, VCPU_R2(k1) | 265 | LONG_S $6, VCPU_R6(k1) |
268 | LONG_S $3, VCPU_R3(k1) | 266 | LONG_S $7, VCPU_R7(k1) |
269 | LONG_S $4, VCPU_R4(k1) | 267 | LONG_S $8, VCPU_R8(k1) |
270 | LONG_S $5, VCPU_R5(k1) | 268 | LONG_S $9, VCPU_R9(k1) |
271 | LONG_S $6, VCPU_R6(k1) | 269 | LONG_S $10, VCPU_R10(k1) |
272 | LONG_S $7, VCPU_R7(k1) | 270 | LONG_S $11, VCPU_R11(k1) |
273 | LONG_S $8, VCPU_R8(k1) | 271 | LONG_S $12, VCPU_R12(k1) |
274 | LONG_S $9, VCPU_R9(k1) | 272 | LONG_S $13, VCPU_R13(k1) |
275 | LONG_S $10, VCPU_R10(k1) | 273 | LONG_S $14, VCPU_R14(k1) |
276 | LONG_S $11, VCPU_R11(k1) | 274 | LONG_S $15, VCPU_R15(k1) |
277 | LONG_S $12, VCPU_R12(k1) | 275 | LONG_S $16, VCPU_R16(k1) |
278 | LONG_S $13, VCPU_R13(k1) | 276 | LONG_S $17, VCPU_R17(k1) |
279 | LONG_S $14, VCPU_R14(k1) | 277 | LONG_S $18, VCPU_R18(k1) |
280 | LONG_S $15, VCPU_R15(k1) | 278 | LONG_S $19, VCPU_R19(k1) |
281 | LONG_S $16, VCPU_R16(k1) | 279 | LONG_S $20, VCPU_R20(k1) |
282 | LONG_S $17,VCPU_R17(k1) | 280 | LONG_S $21, VCPU_R21(k1) |
283 | LONG_S $18, VCPU_R18(k1) | 281 | LONG_S $22, VCPU_R22(k1) |
284 | LONG_S $19, VCPU_R19(k1) | 282 | LONG_S $23, VCPU_R23(k1) |
285 | LONG_S $20, VCPU_R20(k1) | 283 | LONG_S $24, VCPU_R24(k1) |
286 | LONG_S $21, VCPU_R21(k1) | 284 | LONG_S $25, VCPU_R25(k1) |
287 | LONG_S $22, VCPU_R22(k1) | 285 | |
288 | LONG_S $23, VCPU_R23(k1) | 286 | /* Guest k0/k1 saved later */ |
289 | LONG_S $24, VCPU_R24(k1) | 287 | |
290 | LONG_S $25, VCPU_R25(k1) | 288 | LONG_S $28, VCPU_R28(k1) |
291 | 289 | LONG_S $29, VCPU_R29(k1) | |
292 | /* Guest k0/k1 saved later */ | 290 | LONG_S $30, VCPU_R30(k1) |
293 | 291 | LONG_S $31, VCPU_R31(k1) | |
294 | LONG_S $28, VCPU_R28(k1) | 292 | |
295 | LONG_S $29, VCPU_R29(k1) | 293 | /* We need to save hi/lo and restore them on |
296 | LONG_S $30, VCPU_R30(k1) | 294 | * the way out |
297 | LONG_S $31, VCPU_R31(k1) | 295 | */ |
298 | 296 | mfhi t0 | |
299 | /* We need to save hi/lo and restore them on | 297 | LONG_S t0, VCPU_HI(k1) |
300 | * the way out | 298 | |
301 | */ | 299 | mflo t0 |
302 | mfhi t0 | 300 | LONG_S t0, VCPU_LO(k1) |
303 | LONG_S t0, VCPU_HI(k1) | 301 | |
304 | 302 | /* Finally save guest k0/k1 to VCPU */ | |
305 | mflo t0 | 303 | mfc0 t0, CP0_ERROREPC |
306 | LONG_S t0, VCPU_LO(k1) | 304 | LONG_S t0, VCPU_R26(k1) |
307 | 305 | ||
308 | /* Finally save guest k0/k1 to VCPU */ | 306 | /* Get GUEST k1 and save it in VCPU */ |
309 | mfc0 t0, CP0_ERROREPC | 307 | PTR_LI t1, ~0x2ff |
310 | LONG_S t0, VCPU_R26(k1) | 308 | mfc0 t0, CP0_EBASE |
311 | 309 | and t0, t0, t1 | |
312 | /* Get GUEST k1 and save it in VCPU */ | 310 | LONG_L t0, 0x3000(t0) |
313 | la t1, ~0x2ff | 311 | LONG_S t0, VCPU_R27(k1) |
314 | mfc0 t0, CP0_EBASE | 312 | |
315 | and t0, t0, t1 | 313 | /* Now that context has been saved, we can use other registers */ |
316 | LONG_L t0, 0x3000(t0) | 314 | |
317 | LONG_S t0, VCPU_R27(k1) | 315 | /* Restore vcpu */ |
318 | 316 | mfc0 a1, CP0_DDATA_LO | |
319 | /* Now that context has been saved, we can use other registers */ | 317 | move s1, a1 |
320 | 318 | ||
321 | /* Restore vcpu */ | 319 | /* Restore run (vcpu->run) */ |
322 | mfc0 a1, CP0_DDATA_LO | 320 | LONG_L a0, VCPU_RUN(a1) |
323 | move s1, a1 | 321 | /* Save pointer to run in s0, will be saved by the compiler */ |
324 | 322 | move s0, a0 | |
325 | /* Restore run (vcpu->run) */ | 323 | |
326 | LONG_L a0, VCPU_RUN(a1) | 324 | /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to |
327 | /* Save pointer to run in s0, will be saved by the compiler */ | 325 | * process the exception */ |
328 | move s0, a0 | 326 | mfc0 k0,CP0_EPC |
329 | 327 | LONG_S k0, VCPU_PC(k1) | |
330 | 328 | ||
331 | /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */ | 329 | mfc0 k0, CP0_BADVADDR |
332 | mfc0 k0,CP0_EPC | 330 | LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) |
333 | LONG_S k0, VCPU_PC(k1) | 331 | |
334 | 332 | mfc0 k0, CP0_CAUSE | |
335 | mfc0 k0, CP0_BADVADDR | 333 | LONG_S k0, VCPU_HOST_CP0_CAUSE(k1) |
336 | LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) | 334 | |
337 | 335 | mfc0 k0, CP0_ENTRYHI | |
338 | mfc0 k0, CP0_CAUSE | 336 | LONG_S k0, VCPU_HOST_ENTRYHI(k1) |
339 | LONG_S k0, VCPU_HOST_CP0_CAUSE(k1) | 337 | |
340 | 338 | /* Now restore the host state just enough to run the handlers */ | |
341 | mfc0 k0, CP0_ENTRYHI | 339 | |
342 | LONG_S k0, VCPU_HOST_ENTRYHI(k1) | 340 | /* Swtich EBASE to the one used by Linux */ |
343 | 341 | /* load up the host EBASE */ | |
344 | /* Now restore the host state just enough to run the handlers */ | 342 | mfc0 v0, CP0_STATUS |
345 | 343 | ||
346 | /* Swtich EBASE to the one used by Linux */ | 344 | .set at |
347 | /* load up the host EBASE */ | 345 | or k0, v0, ST0_BEV |
348 | mfc0 v0, CP0_STATUS | 346 | .set noat |
349 | 347 | ||
350 | .set at | 348 | mtc0 k0, CP0_STATUS |
351 | or k0, v0, ST0_BEV | 349 | ehb |
352 | .set noat | 350 | |
353 | 351 | LONG_L k0, VCPU_HOST_EBASE(k1) | |
354 | mtc0 k0, CP0_STATUS | 352 | mtc0 k0,CP0_EBASE |
355 | ehb | 353 | |
356 | |||
357 | LONG_L k0, VCPU_HOST_EBASE(k1) | ||
358 | mtc0 k0,CP0_EBASE | ||
359 | |||
360 | |||
361 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ | ||
362 | .set at | ||
363 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) | ||
364 | or v0, v0, ST0_CU0 | ||
365 | .set noat | ||
366 | mtc0 v0, CP0_STATUS | ||
367 | ehb | ||
368 | |||
369 | /* Load up host GP */ | ||
370 | LONG_L gp, VCPU_HOST_GP(k1) | ||
371 | |||
372 | /* Need a stack before we can jump to "C" */ | ||
373 | LONG_L sp, VCPU_HOST_STACK(k1) | ||
374 | |||
375 | /* Saved host state */ | ||
376 | addiu sp,sp, -PT_SIZE | ||
377 | 354 | ||
378 | /* XXXKYMA do we need to load the host ASID, maybe not because the | 355 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ |
379 | * kernel entries are marked GLOBAL, need to verify | 356 | .set at |
380 | */ | 357 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) |
358 | or v0, v0, ST0_CU0 | ||
359 | .set noat | ||
360 | mtc0 v0, CP0_STATUS | ||
361 | ehb | ||
362 | |||
363 | /* Load up host GP */ | ||
364 | LONG_L gp, VCPU_HOST_GP(k1) | ||
365 | |||
366 | /* Need a stack before we can jump to "C" */ | ||
367 | LONG_L sp, VCPU_HOST_STACK(k1) | ||
368 | |||
369 | /* Saved host state */ | ||
370 | INT_ADDIU sp, sp, -PT_SIZE | ||
381 | 371 | ||
382 | /* Restore host DDATA_LO */ | 372 | /* XXXKYMA do we need to load the host ASID, maybe not because the |
383 | LONG_L k0, PT_HOST_USERLOCAL(sp) | 373 | * kernel entries are marked GLOBAL, need to verify |
384 | mtc0 k0, CP0_DDATA_LO | 374 | */ |
385 | 375 | ||
386 | /* Restore RDHWR access */ | 376 | /* Restore host DDATA_LO */ |
387 | la k0, 0x2000000F | 377 | LONG_L k0, PT_HOST_USERLOCAL(sp) |
388 | mtc0 k0, CP0_HWRENA | 378 | mtc0 k0, CP0_DDATA_LO |
389 | 379 | ||
390 | /* Jump to handler */ | 380 | /* Restore RDHWR access */ |
381 | PTR_LI k0, 0x2000000F | ||
382 | mtc0 k0, CP0_HWRENA | ||
383 | |||
384 | /* Jump to handler */ | ||
391 | FEXPORT(__kvm_mips_jump_to_handler) | 385 | FEXPORT(__kvm_mips_jump_to_handler) |
392 | /* XXXKYMA: not sure if this is safe, how large is the stack?? */ | 386 | /* XXXKYMA: not sure if this is safe, how large is the stack?? |
393 | /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */ | 387 | * Now jump to the kvm_mips_handle_exit() to see if we can deal |
394 | la t9,kvm_mips_handle_exit | 388 | * with this in the kernel */ |
395 | jalr.hb t9 | 389 | PTR_LA t9, kvm_mips_handle_exit |
396 | addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */ | 390 | jalr.hb t9 |
397 | 391 | INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ | |
398 | /* Return from handler Make sure interrupts are disabled */ | 392 | |
399 | di | 393 | /* Return from handler Make sure interrupts are disabled */ |
400 | ehb | 394 | di |
401 | 395 | ehb | |
402 | /* XXXKYMA: k0/k1 could have been blown away if we processed an exception | 396 | |
403 | * while we were handling the exception from the guest, reload k1 | 397 | /* XXXKYMA: k0/k1 could have been blown away if we processed |
404 | */ | 398 | * an exception while we were handling the exception from the |
405 | move k1, s1 | 399 | * guest, reload k1 |
406 | addiu k1, k1, VCPU_HOST_ARCH | 400 | */ |
407 | 401 | ||
408 | /* Check return value, should tell us if we are returning to the host (handle I/O etc) | 402 | move k1, s1 |
409 | * or resuming the guest | 403 | INT_ADDIU k1, k1, VCPU_HOST_ARCH |
410 | */ | 404 | |
411 | andi t0, v0, RESUME_HOST | 405 | /* Check return value, should tell us if we are returning to the |
412 | bnez t0, __kvm_mips_return_to_host | 406 | * host (handle I/O etc)or resuming the guest |
413 | nop | 407 | */ |
408 | andi t0, v0, RESUME_HOST | ||
409 | bnez t0, __kvm_mips_return_to_host | ||
410 | nop | ||
414 | 411 | ||
415 | __kvm_mips_return_to_guest: | 412 | __kvm_mips_return_to_guest: |
416 | /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ | 413 | /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ |
417 | mtc0 s1, CP0_DDATA_LO | 414 | mtc0 s1, CP0_DDATA_LO |
418 | |||
419 | /* Load up the Guest EBASE to minimize the window where BEV is set */ | ||
420 | LONG_L t0, VCPU_GUEST_EBASE(k1) | ||
421 | |||
422 | /* Switch EBASE back to the one used by KVM */ | ||
423 | mfc0 v1, CP0_STATUS | ||
424 | .set at | ||
425 | or k0, v1, ST0_BEV | ||
426 | .set noat | ||
427 | mtc0 k0, CP0_STATUS | ||
428 | ehb | ||
429 | mtc0 t0,CP0_EBASE | ||
430 | |||
431 | /* Setup status register for running guest in UM */ | ||
432 | .set at | ||
433 | or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) | ||
434 | and v1, v1, ~ST0_CU0 | ||
435 | .set noat | ||
436 | mtc0 v1, CP0_STATUS | ||
437 | ehb | ||
438 | 415 | ||
416 | /* Load up the Guest EBASE to minimize the window where BEV is set */ | ||
417 | LONG_L t0, VCPU_GUEST_EBASE(k1) | ||
418 | |||
419 | /* Switch EBASE back to the one used by KVM */ | ||
420 | mfc0 v1, CP0_STATUS | ||
421 | .set at | ||
422 | or k0, v1, ST0_BEV | ||
423 | .set noat | ||
424 | mtc0 k0, CP0_STATUS | ||
425 | ehb | ||
426 | mtc0 t0, CP0_EBASE | ||
427 | |||
428 | /* Setup status register for running guest in UM */ | ||
429 | .set at | ||
430 | or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) | ||
431 | and v1, v1, ~ST0_CU0 | ||
432 | .set noat | ||
433 | mtc0 v1, CP0_STATUS | ||
434 | ehb | ||
439 | 435 | ||
440 | /* Set Guest EPC */ | 436 | /* Set Guest EPC */ |
441 | LONG_L t0, VCPU_PC(k1) | 437 | LONG_L t0, VCPU_PC(k1) |
442 | mtc0 t0, CP0_EPC | 438 | mtc0 t0, CP0_EPC |
443 | 439 | ||
444 | /* Set the ASID for the Guest Kernel */ | 440 | /* Set the ASID for the Guest Kernel */ |
445 | sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ | 441 | INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ |
446 | /* addresses shift to 0x80000000 */ | 442 | /* addresses shift to 0x80000000 */ |
447 | bltz t0, 1f /* If kernel */ | 443 | bltz t0, 1f /* If kernel */ |
448 | addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 444 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
449 | addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 445 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
450 | 1: | 446 | 1: |
451 | /* t1: contains the base of the ASID array, need to get the cpu id */ | 447 | /* t1: contains the base of the ASID array, need to get the cpu id */ |
452 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ | 448 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ |
453 | sll t2, t2, 2 /* x4 */ | 449 | INT_SLL t2, t2, 2 /* x4 */ |
454 | addu t3, t1, t2 | 450 | REG_ADDU t3, t1, t2 |
455 | LONG_L k0, (t3) | 451 | LONG_L k0, (t3) |
456 | andi k0, k0, 0xff | 452 | andi k0, k0, 0xff |
457 | mtc0 k0,CP0_ENTRYHI | 453 | mtc0 k0,CP0_ENTRYHI |
458 | ehb | 454 | ehb |
459 | 455 | ||
460 | /* Disable RDHWR access */ | 456 | /* Disable RDHWR access */ |
461 | mtc0 zero, CP0_HWRENA | 457 | mtc0 zero, CP0_HWRENA |
462 | 458 | ||
463 | /* load the guest context from VCPU and return */ | 459 | /* load the guest context from VCPU and return */ |
464 | LONG_L $0, VCPU_R0(k1) | 460 | LONG_L $0, VCPU_R0(k1) |
465 | LONG_L $1, VCPU_R1(k1) | 461 | LONG_L $1, VCPU_R1(k1) |
466 | LONG_L $2, VCPU_R2(k1) | 462 | LONG_L $2, VCPU_R2(k1) |
467 | LONG_L $3, VCPU_R3(k1) | 463 | LONG_L $3, VCPU_R3(k1) |
468 | LONG_L $4, VCPU_R4(k1) | 464 | LONG_L $4, VCPU_R4(k1) |
469 | LONG_L $5, VCPU_R5(k1) | 465 | LONG_L $5, VCPU_R5(k1) |
470 | LONG_L $6, VCPU_R6(k1) | 466 | LONG_L $6, VCPU_R6(k1) |
471 | LONG_L $7, VCPU_R7(k1) | 467 | LONG_L $7, VCPU_R7(k1) |
472 | LONG_L $8, VCPU_R8(k1) | 468 | LONG_L $8, VCPU_R8(k1) |
473 | LONG_L $9, VCPU_R9(k1) | 469 | LONG_L $9, VCPU_R9(k1) |
474 | LONG_L $10, VCPU_R10(k1) | 470 | LONG_L $10, VCPU_R10(k1) |
475 | LONG_L $11, VCPU_R11(k1) | 471 | LONG_L $11, VCPU_R11(k1) |
476 | LONG_L $12, VCPU_R12(k1) | 472 | LONG_L $12, VCPU_R12(k1) |
477 | LONG_L $13, VCPU_R13(k1) | 473 | LONG_L $13, VCPU_R13(k1) |
478 | LONG_L $14, VCPU_R14(k1) | 474 | LONG_L $14, VCPU_R14(k1) |
479 | LONG_L $15, VCPU_R15(k1) | 475 | LONG_L $15, VCPU_R15(k1) |
480 | LONG_L $16, VCPU_R16(k1) | 476 | LONG_L $16, VCPU_R16(k1) |
481 | LONG_L $17, VCPU_R17(k1) | 477 | LONG_L $17, VCPU_R17(k1) |
482 | LONG_L $18, VCPU_R18(k1) | 478 | LONG_L $18, VCPU_R18(k1) |
483 | LONG_L $19, VCPU_R19(k1) | 479 | LONG_L $19, VCPU_R19(k1) |
484 | LONG_L $20, VCPU_R20(k1) | 480 | LONG_L $20, VCPU_R20(k1) |
485 | LONG_L $21, VCPU_R21(k1) | 481 | LONG_L $21, VCPU_R21(k1) |
486 | LONG_L $22, VCPU_R22(k1) | 482 | LONG_L $22, VCPU_R22(k1) |
487 | LONG_L $23, VCPU_R23(k1) | 483 | LONG_L $23, VCPU_R23(k1) |
488 | LONG_L $24, VCPU_R24(k1) | 484 | LONG_L $24, VCPU_R24(k1) |
489 | LONG_L $25, VCPU_R25(k1) | 485 | LONG_L $25, VCPU_R25(k1) |
490 | 486 | ||
491 | /* $/k1 loaded later */ | 487 | /* $/k1 loaded later */ |
492 | LONG_L $28, VCPU_R28(k1) | 488 | LONG_L $28, VCPU_R28(k1) |
493 | LONG_L $29, VCPU_R29(k1) | 489 | LONG_L $29, VCPU_R29(k1) |
494 | LONG_L $30, VCPU_R30(k1) | 490 | LONG_L $30, VCPU_R30(k1) |
495 | LONG_L $31, VCPU_R31(k1) | 491 | LONG_L $31, VCPU_R31(k1) |
496 | 492 | ||
497 | FEXPORT(__kvm_mips_skip_guest_restore) | 493 | FEXPORT(__kvm_mips_skip_guest_restore) |
498 | LONG_L k0, VCPU_HI(k1) | 494 | LONG_L k0, VCPU_HI(k1) |
499 | mthi k0 | 495 | mthi k0 |
500 | 496 | ||
501 | LONG_L k0, VCPU_LO(k1) | 497 | LONG_L k0, VCPU_LO(k1) |
502 | mtlo k0 | 498 | mtlo k0 |
503 | 499 | ||
504 | LONG_L k0, VCPU_R26(k1) | 500 | LONG_L k0, VCPU_R26(k1) |
505 | LONG_L k1, VCPU_R27(k1) | 501 | LONG_L k1, VCPU_R27(k1) |
506 | 502 | ||
507 | eret | 503 | eret |
508 | 504 | ||
509 | __kvm_mips_return_to_host: | 505 | __kvm_mips_return_to_host: |
510 | /* EBASE is already pointing to Linux */ | 506 | /* EBASE is already pointing to Linux */ |
511 | LONG_L k1, VCPU_HOST_STACK(k1) | 507 | LONG_L k1, VCPU_HOST_STACK(k1) |
512 | addiu k1,k1, -PT_SIZE | 508 | INT_ADDIU k1,k1, -PT_SIZE |
513 | 509 | ||
514 | /* Restore host DDATA_LO */ | 510 | /* Restore host DDATA_LO */ |
515 | LONG_L k0, PT_HOST_USERLOCAL(k1) | 511 | LONG_L k0, PT_HOST_USERLOCAL(k1) |
516 | mtc0 k0, CP0_DDATA_LO | 512 | mtc0 k0, CP0_DDATA_LO |
517 | 513 | ||
518 | /* Restore host ASID */ | 514 | /* Restore host ASID */ |
519 | LONG_L k0, PT_HOST_ASID(sp) | 515 | LONG_L k0, PT_HOST_ASID(sp) |
520 | andi k0, 0xff | 516 | andi k0, 0xff |
521 | mtc0 k0,CP0_ENTRYHI | 517 | mtc0 k0,CP0_ENTRYHI |
522 | ehb | 518 | ehb |
523 | 519 | ||
524 | /* Load context saved on the host stack */ | 520 | /* Load context saved on the host stack */ |
525 | LONG_L $0, PT_R0(k1) | 521 | LONG_L $0, PT_R0(k1) |
526 | LONG_L $1, PT_R1(k1) | 522 | LONG_L $1, PT_R1(k1) |
527 | 523 | ||
528 | /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */ | 524 | /* r2/v0 is the return code, shift it down by 2 (arithmetic) |
529 | sra k0, v0, 2 | 525 | * to recover the err code */ |
530 | move $2, k0 | 526 | INT_SRA k0, v0, 2 |
531 | 527 | move $2, k0 | |
532 | LONG_L $3, PT_R3(k1) | 528 | |
533 | LONG_L $4, PT_R4(k1) | 529 | LONG_L $3, PT_R3(k1) |
534 | LONG_L $5, PT_R5(k1) | 530 | LONG_L $4, PT_R4(k1) |
535 | LONG_L $6, PT_R6(k1) | 531 | LONG_L $5, PT_R5(k1) |
536 | LONG_L $7, PT_R7(k1) | 532 | LONG_L $6, PT_R6(k1) |
537 | LONG_L $8, PT_R8(k1) | 533 | LONG_L $7, PT_R7(k1) |
538 | LONG_L $9, PT_R9(k1) | 534 | LONG_L $8, PT_R8(k1) |
539 | LONG_L $10, PT_R10(k1) | 535 | LONG_L $9, PT_R9(k1) |
540 | LONG_L $11, PT_R11(k1) | 536 | LONG_L $10, PT_R10(k1) |
541 | LONG_L $12, PT_R12(k1) | 537 | LONG_L $11, PT_R11(k1) |
542 | LONG_L $13, PT_R13(k1) | 538 | LONG_L $12, PT_R12(k1) |
543 | LONG_L $14, PT_R14(k1) | 539 | LONG_L $13, PT_R13(k1) |
544 | LONG_L $15, PT_R15(k1) | 540 | LONG_L $14, PT_R14(k1) |
545 | LONG_L $16, PT_R16(k1) | 541 | LONG_L $15, PT_R15(k1) |
546 | LONG_L $17, PT_R17(k1) | 542 | LONG_L $16, PT_R16(k1) |
547 | LONG_L $18, PT_R18(k1) | 543 | LONG_L $17, PT_R17(k1) |
548 | LONG_L $19, PT_R19(k1) | 544 | LONG_L $18, PT_R18(k1) |
549 | LONG_L $20, PT_R20(k1) | 545 | LONG_L $19, PT_R19(k1) |
550 | LONG_L $21, PT_R21(k1) | 546 | LONG_L $20, PT_R20(k1) |
551 | LONG_L $22, PT_R22(k1) | 547 | LONG_L $21, PT_R21(k1) |
552 | LONG_L $23, PT_R23(k1) | 548 | LONG_L $22, PT_R22(k1) |
553 | LONG_L $24, PT_R24(k1) | 549 | LONG_L $23, PT_R23(k1) |
554 | LONG_L $25, PT_R25(k1) | 550 | LONG_L $24, PT_R24(k1) |
555 | 551 | LONG_L $25, PT_R25(k1) | |
556 | /* Host k0/k1 were not saved */ | 552 | |
557 | 553 | /* Host k0/k1 were not saved */ | |
558 | LONG_L $28, PT_R28(k1) | 554 | |
559 | LONG_L $29, PT_R29(k1) | 555 | LONG_L $28, PT_R28(k1) |
560 | LONG_L $30, PT_R30(k1) | 556 | LONG_L $29, PT_R29(k1) |
561 | 557 | LONG_L $30, PT_R30(k1) | |
562 | LONG_L k0, PT_HI(k1) | 558 | |
563 | mthi k0 | 559 | LONG_L k0, PT_HI(k1) |
564 | 560 | mthi k0 | |
565 | LONG_L k0, PT_LO(k1) | 561 | |
566 | mtlo k0 | 562 | LONG_L k0, PT_LO(k1) |
567 | 563 | mtlo k0 | |
568 | /* Restore RDHWR access */ | 564 | |
569 | la k0, 0x2000000F | 565 | /* Restore RDHWR access */ |
570 | mtc0 k0, CP0_HWRENA | 566 | PTR_LI k0, 0x2000000F |
571 | 567 | mtc0 k0, CP0_HWRENA | |
572 | 568 | ||
573 | /* Restore RA, which is the address we will return to */ | 569 | |
574 | LONG_L ra, PT_R31(k1) | 570 | /* Restore RA, which is the address we will return to */ |
575 | j ra | 571 | LONG_L ra, PT_R31(k1) |
576 | nop | 572 | j ra |
577 | 573 | nop | |
578 | .set pop | 574 | |
579 | VECTOR_END(MIPSX(GuestExceptionEnd)) | 575 | VECTOR_END(MIPSX(GuestExceptionEnd)) |
580 | .end MIPSX(GuestException) | 576 | .end MIPSX(GuestException) |
581 | 577 | ||
@@ -627,24 +623,23 @@ MIPSX(exceptions): | |||
627 | 623 | ||
628 | #define HW_SYNCI_Step $1 | 624 | #define HW_SYNCI_Step $1 |
629 | LEAF(MIPSX(SyncICache)) | 625 | LEAF(MIPSX(SyncICache)) |
630 | .set push | 626 | .set push |
631 | .set mips32r2 | 627 | .set mips32r2 |
632 | beq a1, zero, 20f | 628 | beq a1, zero, 20f |
633 | nop | 629 | nop |
634 | addu a1, a0, a1 | 630 | REG_ADDU a1, a0, a1 |
635 | rdhwr v0, HW_SYNCI_Step | 631 | rdhwr v0, HW_SYNCI_Step |
636 | beq v0, zero, 20f | 632 | beq v0, zero, 20f |
637 | nop | 633 | nop |
638 | |||
639 | 10: | 634 | 10: |
640 | synci 0(a0) | 635 | synci 0(a0) |
641 | addu a0, a0, v0 | 636 | REG_ADDU a0, a0, v0 |
642 | sltu v1, a0, a1 | 637 | sltu v1, a0, a1 |
643 | bne v1, zero, 10b | 638 | bne v1, zero, 10b |
644 | nop | 639 | nop |
645 | sync | 640 | sync |
646 | 20: | 641 | 20: |
647 | jr.hb ra | 642 | jr.hb ra |
648 | nop | 643 | nop |
649 | .set pop | 644 | .set pop |
650 | END(MIPSX(SyncICache)) | 645 | END(MIPSX(SyncICache)) |
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index dd203e59e6fd..a7b044536de4 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c | |||
@@ -208,6 +208,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) | |||
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | 210 | ||
211 | void kvm_arch_memslots_updated(struct kvm *kvm) | ||
212 | { | ||
213 | } | ||
214 | |||
211 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 215 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
212 | struct kvm_memory_slot *memslot, | 216 | struct kvm_memory_slot *memslot, |
213 | struct kvm_userspace_memory_region *mem, | 217 | struct kvm_userspace_memory_region *mem, |