diff options
-rw-r--r-- | arch/mips/kvm/kvm_locore.S | 975 |
1 files changed, 491 insertions, 484 deletions
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S index dca2aa665993..301b9ad1a905 100644 --- a/arch/mips/kvm/kvm_locore.S +++ b/arch/mips/kvm/kvm_locore.S | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Main entry point for the guest, exception handling. | 6 | * Main entry point for the guest, exception handling. |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <asm/asm.h> | 12 | #include <asm/asm.h> |
13 | #include <asm/asmmacro.h> | 13 | #include <asm/asmmacro.h> |
@@ -57,172 +57,177 @@ | |||
57 | */ | 57 | */ |
58 | 58 | ||
59 | FEXPORT(__kvm_mips_vcpu_run) | 59 | FEXPORT(__kvm_mips_vcpu_run) |
60 | .set push | 60 | .set push |
61 | .set noreorder | 61 | .set noreorder |
62 | .set noat | 62 | .set noat |
63 | 63 | ||
64 | /* k0/k1 not being used in host kernel context */ | 64 | /* k0/k1 not being used in host kernel context */ |
65 | addiu k1,sp, -PT_SIZE | 65 | addiu k1, sp, -PT_SIZE |
66 | LONG_S $0, PT_R0(k1) | 66 | LONG_S $0, PT_R0(k1) |
67 | LONG_S $1, PT_R1(k1) | 67 | LONG_S $1, PT_R1(k1) |
68 | LONG_S $2, PT_R2(k1) | 68 | LONG_S $2, PT_R2(k1) |
69 | LONG_S $3, PT_R3(k1) | 69 | LONG_S $3, PT_R3(k1) |
70 | 70 | ||
71 | LONG_S $4, PT_R4(k1) | 71 | LONG_S $4, PT_R4(k1) |
72 | LONG_S $5, PT_R5(k1) | 72 | LONG_S $5, PT_R5(k1) |
73 | LONG_S $6, PT_R6(k1) | 73 | LONG_S $6, PT_R6(k1) |
74 | LONG_S $7, PT_R7(k1) | 74 | LONG_S $7, PT_R7(k1) |
75 | 75 | ||
76 | LONG_S $8, PT_R8(k1) | 76 | LONG_S $8, PT_R8(k1) |
77 | LONG_S $9, PT_R9(k1) | 77 | LONG_S $9, PT_R9(k1) |
78 | LONG_S $10, PT_R10(k1) | 78 | LONG_S $10, PT_R10(k1) |
79 | LONG_S $11, PT_R11(k1) | 79 | LONG_S $11, PT_R11(k1) |
80 | LONG_S $12, PT_R12(k1) | 80 | LONG_S $12, PT_R12(k1) |
81 | LONG_S $13, PT_R13(k1) | 81 | LONG_S $13, PT_R13(k1) |
82 | LONG_S $14, PT_R14(k1) | 82 | LONG_S $14, PT_R14(k1) |
83 | LONG_S $15, PT_R15(k1) | 83 | LONG_S $15, PT_R15(k1) |
84 | LONG_S $16, PT_R16(k1) | 84 | LONG_S $16, PT_R16(k1) |
85 | LONG_S $17, PT_R17(k1) | 85 | LONG_S $17, PT_R17(k1) |
86 | 86 | ||
87 | LONG_S $18, PT_R18(k1) | 87 | LONG_S $18, PT_R18(k1) |
88 | LONG_S $19, PT_R19(k1) | 88 | LONG_S $19, PT_R19(k1) |
89 | LONG_S $20, PT_R20(k1) | 89 | LONG_S $20, PT_R20(k1) |
90 | LONG_S $21, PT_R21(k1) | 90 | LONG_S $21, PT_R21(k1) |
91 | LONG_S $22, PT_R22(k1) | 91 | LONG_S $22, PT_R22(k1) |
92 | LONG_S $23, PT_R23(k1) | 92 | LONG_S $23, PT_R23(k1) |
93 | LONG_S $24, PT_R24(k1) | 93 | LONG_S $24, PT_R24(k1) |
94 | LONG_S $25, PT_R25(k1) | 94 | LONG_S $25, PT_R25(k1) |
95 | 95 | ||
96 | /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ | 96 | /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ |
97 | 97 | ||
98 | LONG_S $28, PT_R28(k1) | 98 | LONG_S $28, PT_R28(k1) |
99 | LONG_S $29, PT_R29(k1) | 99 | LONG_S $29, PT_R29(k1) |
100 | LONG_S $30, PT_R30(k1) | 100 | LONG_S $30, PT_R30(k1) |
101 | LONG_S $31, PT_R31(k1) | 101 | LONG_S $31, PT_R31(k1) |
102 | 102 | ||
103 | /* Save hi/lo */ | 103 | /* Save hi/lo */ |
104 | mflo v0 | 104 | mflo v0 |
105 | LONG_S v0, PT_LO(k1) | 105 | LONG_S v0, PT_LO(k1) |
106 | mfhi v1 | 106 | mfhi v1 |
107 | LONG_S v1, PT_HI(k1) | 107 | LONG_S v1, PT_HI(k1) |
108 | 108 | ||
109 | /* Save host status */ | 109 | /* Save host status */ |
110 | mfc0 v0, CP0_STATUS | 110 | mfc0 v0, CP0_STATUS |
111 | LONG_S v0, PT_STATUS(k1) | 111 | LONG_S v0, PT_STATUS(k1) |
112 | 112 | ||
113 | /* Save host ASID, shove it into the BVADDR location */ | 113 | /* Save host ASID, shove it into the BVADDR location */ |
114 | mfc0 v1,CP0_ENTRYHI | 114 | mfc0 v1, CP0_ENTRYHI |
115 | andi v1, 0xff | 115 | andi v1, 0xff |
116 | LONG_S v1, PT_HOST_ASID(k1) | 116 | LONG_S v1, PT_HOST_ASID(k1) |
117 | 117 | ||
118 | /* Save DDATA_LO, will be used to store pointer to vcpu */ | 118 | /* Save DDATA_LO, will be used to store pointer to vcpu */ |
119 | mfc0 v1, CP0_DDATA_LO | 119 | mfc0 v1, CP0_DDATA_LO |
120 | LONG_S v1, PT_HOST_USERLOCAL(k1) | 120 | LONG_S v1, PT_HOST_USERLOCAL(k1) |
121 | 121 | ||
122 | /* DDATA_LO has pointer to vcpu */ | 122 | /* DDATA_LO has pointer to vcpu */ |
123 | mtc0 a1,CP0_DDATA_LO | 123 | mtc0 a1, CP0_DDATA_LO |
124 | 124 | ||
125 | /* Offset into vcpu->arch */ | 125 | /* Offset into vcpu->arch */ |
126 | addiu k1, a1, VCPU_HOST_ARCH | 126 | addiu k1, a1, VCPU_HOST_ARCH |
127 | 127 | ||
128 | /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */ | 128 | /* |
129 | LONG_S sp, VCPU_HOST_STACK(k1) | 129 | * Save the host stack to VCPU, used for exception processing |
130 | * when we exit from the Guest | ||
131 | */ | ||
132 | LONG_S sp, VCPU_HOST_STACK(k1) | ||
130 | 133 | ||
131 | /* Save the kernel gp as well */ | 134 | /* Save the kernel gp as well */ |
132 | LONG_S gp, VCPU_HOST_GP(k1) | 135 | LONG_S gp, VCPU_HOST_GP(k1) |
133 | 136 | ||
134 | /* Setup status register for running the guest in UM, interrupts are disabled */ | 137 | /* Setup status register for running the guest in UM, interrupts are disabled */ |
135 | li k0,(ST0_EXL | KSU_USER| ST0_BEV) | 138 | li k0, (ST0_EXL | KSU_USER | ST0_BEV) |
136 | mtc0 k0,CP0_STATUS | 139 | mtc0 k0, CP0_STATUS |
137 | ehb | 140 | ehb |
138 | 141 | ||
139 | /* load up the new EBASE */ | 142 | /* load up the new EBASE */ |
140 | LONG_L k0, VCPU_GUEST_EBASE(k1) | 143 | LONG_L k0, VCPU_GUEST_EBASE(k1) |
141 | mtc0 k0,CP0_EBASE | 144 | mtc0 k0, CP0_EBASE |
142 | 145 | ||
143 | /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was | 146 | /* |
144 | * but make sure that timer interrupts are enabled | 147 | * Now that the new EBASE has been loaded, unset BEV, set |
145 | */ | 148 | * interrupt mask as it was but make sure that timer interrupts |
146 | li k0,(ST0_EXL | KSU_USER | ST0_IE) | 149 | * are enabled |
147 | andi v0, v0, ST0_IM | 150 | */ |
148 | or k0, k0, v0 | 151 | li k0, (ST0_EXL | KSU_USER | ST0_IE) |
149 | mtc0 k0,CP0_STATUS | 152 | andi v0, v0, ST0_IM |
150 | ehb | 153 | or k0, k0, v0 |
154 | mtc0 k0, CP0_STATUS | ||
155 | ehb | ||
151 | 156 | ||
152 | 157 | ||
153 | /* Set Guest EPC */ | 158 | /* Set Guest EPC */ |
154 | LONG_L t0, VCPU_PC(k1) | 159 | LONG_L t0, VCPU_PC(k1) |
155 | mtc0 t0, CP0_EPC | 160 | mtc0 t0, CP0_EPC |
156 | 161 | ||
157 | FEXPORT(__kvm_mips_load_asid) | 162 | FEXPORT(__kvm_mips_load_asid) |
158 | /* Set the ASID for the Guest Kernel */ | 163 | /* Set the ASID for the Guest Kernel */ |
159 | sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ | 164 | sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ |
160 | /* addresses shift to 0x80000000 */ | 165 | /* addresses shift to 0x80000000 */ |
161 | bltz t0, 1f /* If kernel */ | 166 | bltz t0, 1f /* If kernel */ |
162 | addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 167 | addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
163 | addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 168 | addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
164 | 1: | 169 | 1: |
165 | /* t1: contains the base of the ASID array, need to get the cpu id */ | 170 | /* t1: contains the base of the ASID array, need to get the cpu id */ |
166 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ | 171 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ |
167 | sll t2, t2, 2 /* x4 */ | 172 | sll t2, t2, 2 /* x4 */ |
168 | addu t3, t1, t2 | 173 | addu t3, t1, t2 |
169 | LONG_L k0, (t3) | 174 | LONG_L k0, (t3) |
170 | andi k0, k0, 0xff | 175 | andi k0, k0, 0xff |
171 | mtc0 k0,CP0_ENTRYHI | 176 | mtc0 k0, CP0_ENTRYHI |
172 | ehb | 177 | ehb |
173 | 178 | ||
174 | /* Disable RDHWR access */ | 179 | /* Disable RDHWR access */ |
175 | mtc0 zero, CP0_HWRENA | 180 | mtc0 zero, CP0_HWRENA |
176 | 181 | ||
177 | /* Now load up the Guest Context from VCPU */ | 182 | /* Now load up the Guest Context from VCPU */ |
178 | LONG_L $1, VCPU_R1(k1) | 183 | LONG_L $1, VCPU_R1(k1) |
179 | LONG_L $2, VCPU_R2(k1) | 184 | LONG_L $2, VCPU_R2(k1) |
180 | LONG_L $3, VCPU_R3(k1) | 185 | LONG_L $3, VCPU_R3(k1) |
181 | 186 | ||
182 | LONG_L $4, VCPU_R4(k1) | 187 | LONG_L $4, VCPU_R4(k1) |
183 | LONG_L $5, VCPU_R5(k1) | 188 | LONG_L $5, VCPU_R5(k1) |
184 | LONG_L $6, VCPU_R6(k1) | 189 | LONG_L $6, VCPU_R6(k1) |
185 | LONG_L $7, VCPU_R7(k1) | 190 | LONG_L $7, VCPU_R7(k1) |
186 | 191 | ||
187 | LONG_L $8, VCPU_R8(k1) | 192 | LONG_L $8, VCPU_R8(k1) |
188 | LONG_L $9, VCPU_R9(k1) | 193 | LONG_L $9, VCPU_R9(k1) |
189 | LONG_L $10, VCPU_R10(k1) | 194 | LONG_L $10, VCPU_R10(k1) |
190 | LONG_L $11, VCPU_R11(k1) | 195 | LONG_L $11, VCPU_R11(k1) |
191 | LONG_L $12, VCPU_R12(k1) | 196 | LONG_L $12, VCPU_R12(k1) |
192 | LONG_L $13, VCPU_R13(k1) | 197 | LONG_L $13, VCPU_R13(k1) |
193 | LONG_L $14, VCPU_R14(k1) | 198 | LONG_L $14, VCPU_R14(k1) |
194 | LONG_L $15, VCPU_R15(k1) | 199 | LONG_L $15, VCPU_R15(k1) |
195 | LONG_L $16, VCPU_R16(k1) | 200 | LONG_L $16, VCPU_R16(k1) |
196 | LONG_L $17, VCPU_R17(k1) | 201 | LONG_L $17, VCPU_R17(k1) |
197 | LONG_L $18, VCPU_R18(k1) | 202 | LONG_L $18, VCPU_R18(k1) |
198 | LONG_L $19, VCPU_R19(k1) | 203 | LONG_L $19, VCPU_R19(k1) |
199 | LONG_L $20, VCPU_R20(k1) | 204 | LONG_L $20, VCPU_R20(k1) |
200 | LONG_L $21, VCPU_R21(k1) | 205 | LONG_L $21, VCPU_R21(k1) |
201 | LONG_L $22, VCPU_R22(k1) | 206 | LONG_L $22, VCPU_R22(k1) |
202 | LONG_L $23, VCPU_R23(k1) | 207 | LONG_L $23, VCPU_R23(k1) |
203 | LONG_L $24, VCPU_R24(k1) | 208 | LONG_L $24, VCPU_R24(k1) |
204 | LONG_L $25, VCPU_R25(k1) | 209 | LONG_L $25, VCPU_R25(k1) |
205 | 210 | ||
206 | /* k0/k1 loaded up later */ | 211 | /* k0/k1 loaded up later */ |
207 | 212 | ||
208 | LONG_L $28, VCPU_R28(k1) | 213 | LONG_L $28, VCPU_R28(k1) |
209 | LONG_L $29, VCPU_R29(k1) | 214 | LONG_L $29, VCPU_R29(k1) |
210 | LONG_L $30, VCPU_R30(k1) | 215 | LONG_L $30, VCPU_R30(k1) |
211 | LONG_L $31, VCPU_R31(k1) | 216 | LONG_L $31, VCPU_R31(k1) |
212 | 217 | ||
213 | /* Restore hi/lo */ | 218 | /* Restore hi/lo */ |
214 | LONG_L k0, VCPU_LO(k1) | 219 | LONG_L k0, VCPU_LO(k1) |
215 | mtlo k0 | 220 | mtlo k0 |
216 | 221 | ||
217 | LONG_L k0, VCPU_HI(k1) | 222 | LONG_L k0, VCPU_HI(k1) |
218 | mthi k0 | 223 | mthi k0 |
219 | 224 | ||
220 | FEXPORT(__kvm_mips_load_k0k1) | 225 | FEXPORT(__kvm_mips_load_k0k1) |
221 | /* Restore the guest's k0/k1 registers */ | 226 | /* Restore the guest's k0/k1 registers */ |
222 | LONG_L k0, VCPU_R26(k1) | 227 | LONG_L k0, VCPU_R26(k1) |
223 | LONG_L k1, VCPU_R27(k1) | 228 | LONG_L k1, VCPU_R27(k1) |
224 | 229 | ||
225 | /* Jump to guest */ | 230 | /* Jump to guest */ |
226 | eret | 231 | eret |
227 | .set pop | 232 | .set pop |
228 | 233 | ||
@@ -230,19 +235,19 @@ VECTOR(MIPSX(exception), unknown) | |||
230 | /* | 235 | /* |
231 | * Find out what mode we came from and jump to the proper handler. | 236 | * Find out what mode we came from and jump to the proper handler. |
232 | */ | 237 | */ |
233 | .set push | 238 | .set push |
234 | .set noat | 239 | .set noat |
235 | .set noreorder | 240 | .set noreorder |
236 | mtc0 k0, CP0_ERROREPC #01: Save guest k0 | 241 | mtc0 k0, CP0_ERROREPC #01: Save guest k0 |
237 | ehb #02: | 242 | ehb #02: |
238 | 243 | ||
239 | mfc0 k0, CP0_EBASE #02: Get EBASE | 244 | mfc0 k0, CP0_EBASE #02: Get EBASE |
240 | srl k0, k0, 10 #03: Get rid of CPUNum | 245 | srl k0, k0, 10 #03: Get rid of CPUNum |
241 | sll k0, k0, 10 #04 | 246 | sll k0, k0, 10 #04 |
242 | LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 | 247 | LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 |
243 | addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 | 248 | addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 |
244 | j k0 #07: jump to the function | 249 | j k0 #07: jump to the function |
245 | nop #08: branch delay slot | 250 | nop #08: branch delay slot |
246 | .set push | 251 | .set push |
247 | VECTOR_END(MIPSX(exceptionEnd)) | 252 | VECTOR_END(MIPSX(exceptionEnd)) |
248 | .end MIPSX(exception) | 253 | .end MIPSX(exception) |
@@ -253,329 +258,332 @@ VECTOR_END(MIPSX(exceptionEnd)) | |||
253 | * | 258 | * |
254 | */ | 259 | */ |
255 | NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | 260 | NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) |
256 | .set push | 261 | .set push |
257 | .set noat | 262 | .set noat |
258 | .set noreorder | 263 | .set noreorder |
259 | 264 | ||
260 | /* Get the VCPU pointer from DDTATA_LO */ | 265 | /* Get the VCPU pointer from DDTATA_LO */ |
261 | mfc0 k1, CP0_DDATA_LO | 266 | mfc0 k1, CP0_DDATA_LO |
262 | addiu k1, k1, VCPU_HOST_ARCH | 267 | addiu k1, k1, VCPU_HOST_ARCH |
263 | 268 | ||
264 | /* Start saving Guest context to VCPU */ | 269 | /* Start saving Guest context to VCPU */ |
265 | LONG_S $0, VCPU_R0(k1) | 270 | LONG_S $0, VCPU_R0(k1) |
266 | LONG_S $1, VCPU_R1(k1) | 271 | LONG_S $1, VCPU_R1(k1) |
267 | LONG_S $2, VCPU_R2(k1) | 272 | LONG_S $2, VCPU_R2(k1) |
268 | LONG_S $3, VCPU_R3(k1) | 273 | LONG_S $3, VCPU_R3(k1) |
269 | LONG_S $4, VCPU_R4(k1) | 274 | LONG_S $4, VCPU_R4(k1) |
270 | LONG_S $5, VCPU_R5(k1) | 275 | LONG_S $5, VCPU_R5(k1) |
271 | LONG_S $6, VCPU_R6(k1) | 276 | LONG_S $6, VCPU_R6(k1) |
272 | LONG_S $7, VCPU_R7(k1) | 277 | LONG_S $7, VCPU_R7(k1) |
273 | LONG_S $8, VCPU_R8(k1) | 278 | LONG_S $8, VCPU_R8(k1) |
274 | LONG_S $9, VCPU_R9(k1) | 279 | LONG_S $9, VCPU_R9(k1) |
275 | LONG_S $10, VCPU_R10(k1) | 280 | LONG_S $10, VCPU_R10(k1) |
276 | LONG_S $11, VCPU_R11(k1) | 281 | LONG_S $11, VCPU_R11(k1) |
277 | LONG_S $12, VCPU_R12(k1) | 282 | LONG_S $12, VCPU_R12(k1) |
278 | LONG_S $13, VCPU_R13(k1) | 283 | LONG_S $13, VCPU_R13(k1) |
279 | LONG_S $14, VCPU_R14(k1) | 284 | LONG_S $14, VCPU_R14(k1) |
280 | LONG_S $15, VCPU_R15(k1) | 285 | LONG_S $15, VCPU_R15(k1) |
281 | LONG_S $16, VCPU_R16(k1) | 286 | LONG_S $16, VCPU_R16(k1) |
282 | LONG_S $17,VCPU_R17(k1) | 287 | LONG_S $17, VCPU_R17(k1) |
283 | LONG_S $18, VCPU_R18(k1) | 288 | LONG_S $18, VCPU_R18(k1) |
284 | LONG_S $19, VCPU_R19(k1) | 289 | LONG_S $19, VCPU_R19(k1) |
285 | LONG_S $20, VCPU_R20(k1) | 290 | LONG_S $20, VCPU_R20(k1) |
286 | LONG_S $21, VCPU_R21(k1) | 291 | LONG_S $21, VCPU_R21(k1) |
287 | LONG_S $22, VCPU_R22(k1) | 292 | LONG_S $22, VCPU_R22(k1) |
288 | LONG_S $23, VCPU_R23(k1) | 293 | LONG_S $23, VCPU_R23(k1) |
289 | LONG_S $24, VCPU_R24(k1) | 294 | LONG_S $24, VCPU_R24(k1) |
290 | LONG_S $25, VCPU_R25(k1) | 295 | LONG_S $25, VCPU_R25(k1) |
291 | 296 | ||
292 | /* Guest k0/k1 saved later */ | 297 | /* Guest k0/k1 saved later */ |
293 | 298 | ||
294 | LONG_S $28, VCPU_R28(k1) | 299 | LONG_S $28, VCPU_R28(k1) |
295 | LONG_S $29, VCPU_R29(k1) | 300 | LONG_S $29, VCPU_R29(k1) |
296 | LONG_S $30, VCPU_R30(k1) | 301 | LONG_S $30, VCPU_R30(k1) |
297 | LONG_S $31, VCPU_R31(k1) | 302 | LONG_S $31, VCPU_R31(k1) |
298 | 303 | ||
299 | /* We need to save hi/lo and restore them on | 304 | /* We need to save hi/lo and restore them on |
300 | * the way out | 305 | * the way out |
301 | */ | 306 | */ |
302 | mfhi t0 | 307 | mfhi t0 |
303 | LONG_S t0, VCPU_HI(k1) | 308 | LONG_S t0, VCPU_HI(k1) |
304 | 309 | ||
305 | mflo t0 | 310 | mflo t0 |
306 | LONG_S t0, VCPU_LO(k1) | 311 | LONG_S t0, VCPU_LO(k1) |
307 | 312 | ||
308 | /* Finally save guest k0/k1 to VCPU */ | 313 | /* Finally save guest k0/k1 to VCPU */ |
309 | mfc0 t0, CP0_ERROREPC | 314 | mfc0 t0, CP0_ERROREPC |
310 | LONG_S t0, VCPU_R26(k1) | 315 | LONG_S t0, VCPU_R26(k1) |
311 | 316 | ||
312 | /* Get GUEST k1 and save it in VCPU */ | 317 | /* Get GUEST k1 and save it in VCPU */ |
313 | la t1, ~0x2ff | 318 | la t1, ~0x2ff |
314 | mfc0 t0, CP0_EBASE | 319 | mfc0 t0, CP0_EBASE |
315 | and t0, t0, t1 | 320 | and t0, t0, t1 |
316 | LONG_L t0, 0x3000(t0) | 321 | LONG_L t0, 0x3000(t0) |
317 | LONG_S t0, VCPU_R27(k1) | 322 | LONG_S t0, VCPU_R27(k1) |
318 | 323 | ||
319 | /* Now that context has been saved, we can use other registers */ | 324 | /* Now that context has been saved, we can use other registers */ |
320 | 325 | ||
321 | /* Restore vcpu */ | 326 | /* Restore vcpu */ |
322 | mfc0 a1, CP0_DDATA_LO | 327 | mfc0 a1, CP0_DDATA_LO |
323 | move s1, a1 | 328 | move s1, a1 |
324 | 329 | ||
325 | /* Restore run (vcpu->run) */ | 330 | /* Restore run (vcpu->run) */ |
326 | LONG_L a0, VCPU_RUN(a1) | 331 | LONG_L a0, VCPU_RUN(a1) |
327 | /* Save pointer to run in s0, will be saved by the compiler */ | 332 | /* Save pointer to run in s0, will be saved by the compiler */ |
328 | move s0, a0 | 333 | move s0, a0 |
329 | 334 | ||
330 | 335 | /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to | |
331 | /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */ | 336 | * process the exception */ |
332 | mfc0 k0,CP0_EPC | 337 | mfc0 k0,CP0_EPC |
333 | LONG_S k0, VCPU_PC(k1) | 338 | LONG_S k0, VCPU_PC(k1) |
334 | 339 | ||
335 | mfc0 k0, CP0_BADVADDR | 340 | mfc0 k0, CP0_BADVADDR |
336 | LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) | 341 | LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) |
337 | 342 | ||
338 | mfc0 k0, CP0_CAUSE | 343 | mfc0 k0, CP0_CAUSE |
339 | LONG_S k0, VCPU_HOST_CP0_CAUSE(k1) | 344 | LONG_S k0, VCPU_HOST_CP0_CAUSE(k1) |
340 | 345 | ||
341 | mfc0 k0, CP0_ENTRYHI | 346 | mfc0 k0, CP0_ENTRYHI |
342 | LONG_S k0, VCPU_HOST_ENTRYHI(k1) | 347 | LONG_S k0, VCPU_HOST_ENTRYHI(k1) |
343 | 348 | ||
344 | /* Now restore the host state just enough to run the handlers */ | 349 | /* Now restore the host state just enough to run the handlers */ |
345 | 350 | ||
346 | /* Swtich EBASE to the one used by Linux */ | 351 | /* Swtich EBASE to the one used by Linux */ |
347 | /* load up the host EBASE */ | 352 | /* load up the host EBASE */ |
348 | mfc0 v0, CP0_STATUS | 353 | mfc0 v0, CP0_STATUS |
349 | 354 | ||
350 | .set at | 355 | .set at |
351 | or k0, v0, ST0_BEV | 356 | or k0, v0, ST0_BEV |
352 | .set noat | 357 | .set noat |
353 | 358 | ||
354 | mtc0 k0, CP0_STATUS | 359 | mtc0 k0, CP0_STATUS |
355 | ehb | 360 | ehb |
356 | |||
357 | LONG_L k0, VCPU_HOST_EBASE(k1) | ||
358 | mtc0 k0,CP0_EBASE | ||
359 | |||
360 | |||
361 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ | ||
362 | .set at | ||
363 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) | ||
364 | or v0, v0, ST0_CU0 | ||
365 | .set noat | ||
366 | mtc0 v0, CP0_STATUS | ||
367 | ehb | ||
368 | |||
369 | /* Load up host GP */ | ||
370 | LONG_L gp, VCPU_HOST_GP(k1) | ||
371 | |||
372 | /* Need a stack before we can jump to "C" */ | ||
373 | LONG_L sp, VCPU_HOST_STACK(k1) | ||
374 | |||
375 | /* Saved host state */ | ||
376 | addiu sp,sp, -PT_SIZE | ||
377 | 361 | ||
378 | /* XXXKYMA do we need to load the host ASID, maybe not because the | 362 | LONG_L k0, VCPU_HOST_EBASE(k1) |
379 | * kernel entries are marked GLOBAL, need to verify | 363 | mtc0 k0,CP0_EBASE |
380 | */ | ||
381 | 364 | ||
382 | /* Restore host DDATA_LO */ | ||
383 | LONG_L k0, PT_HOST_USERLOCAL(sp) | ||
384 | mtc0 k0, CP0_DDATA_LO | ||
385 | 365 | ||
386 | /* Restore RDHWR access */ | 366 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ |
387 | la k0, 0x2000000F | 367 | .set at |
388 | mtc0 k0, CP0_HWRENA | 368 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) |
369 | or v0, v0, ST0_CU0 | ||
370 | .set noat | ||
371 | mtc0 v0, CP0_STATUS | ||
372 | ehb | ||
389 | 373 | ||
390 | /* Jump to handler */ | 374 | /* Load up host GP */ |
375 | LONG_L gp, VCPU_HOST_GP(k1) | ||
376 | |||
377 | /* Need a stack before we can jump to "C" */ | ||
378 | LONG_L sp, VCPU_HOST_STACK(k1) | ||
379 | |||
380 | /* Saved host state */ | ||
381 | addiu sp, sp, -PT_SIZE | ||
382 | |||
383 | /* XXXKYMA do we need to load the host ASID, maybe not because the | ||
384 | * kernel entries are marked GLOBAL, need to verify | ||
385 | */ | ||
386 | |||
387 | /* Restore host DDATA_LO */ | ||
388 | LONG_L k0, PT_HOST_USERLOCAL(sp) | ||
389 | mtc0 k0, CP0_DDATA_LO | ||
390 | |||
391 | /* Restore RDHWR access */ | ||
392 | la k0, 0x2000000F | ||
393 | mtc0 k0, CP0_HWRENA | ||
394 | |||
395 | /* Jump to handler */ | ||
391 | FEXPORT(__kvm_mips_jump_to_handler) | 396 | FEXPORT(__kvm_mips_jump_to_handler) |
392 | /* XXXKYMA: not sure if this is safe, how large is the stack?? */ | 397 | /* XXXKYMA: not sure if this is safe, how large is the stack?? |
393 | /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */ | 398 | * Now jump to the kvm_mips_handle_exit() to see if we can deal |
394 | la t9,kvm_mips_handle_exit | 399 | * with this in the kernel */ |
395 | jalr.hb t9 | 400 | la t9, kvm_mips_handle_exit |
396 | addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */ | 401 | jalr.hb t9 |
397 | 402 | addiu sp, sp, -CALLFRAME_SIZ /* BD Slot */ | |
398 | /* Return from handler Make sure interrupts are disabled */ | 403 | |
399 | di | 404 | /* Return from handler Make sure interrupts are disabled */ |
400 | ehb | 405 | di |
401 | 406 | ehb | |
402 | /* XXXKYMA: k0/k1 could have been blown away if we processed an exception | 407 | |
403 | * while we were handling the exception from the guest, reload k1 | 408 | /* XXXKYMA: k0/k1 could have been blown away if we processed |
404 | */ | 409 | * an exception while we were handling the exception from the |
405 | move k1, s1 | 410 | * guest, reload k1 |
406 | addiu k1, k1, VCPU_HOST_ARCH | 411 | */ |
407 | 412 | ||
408 | /* Check return value, should tell us if we are returning to the host (handle I/O etc) | 413 | move k1, s1 |
409 | * or resuming the guest | 414 | addiu k1, k1, VCPU_HOST_ARCH |
410 | */ | 415 | |
411 | andi t0, v0, RESUME_HOST | 416 | /* Check return value, should tell us if we are returning to the |
412 | bnez t0, __kvm_mips_return_to_host | 417 | * host (handle I/O etc)or resuming the guest |
413 | nop | 418 | */ |
419 | andi t0, v0, RESUME_HOST | ||
420 | bnez t0, __kvm_mips_return_to_host | ||
421 | nop | ||
414 | 422 | ||
415 | __kvm_mips_return_to_guest: | 423 | __kvm_mips_return_to_guest: |
416 | /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ | 424 | /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ |
417 | mtc0 s1, CP0_DDATA_LO | 425 | mtc0 s1, CP0_DDATA_LO |
418 | |||
419 | /* Load up the Guest EBASE to minimize the window where BEV is set */ | ||
420 | LONG_L t0, VCPU_GUEST_EBASE(k1) | ||
421 | |||
422 | /* Switch EBASE back to the one used by KVM */ | ||
423 | mfc0 v1, CP0_STATUS | ||
424 | .set at | ||
425 | or k0, v1, ST0_BEV | ||
426 | .set noat | ||
427 | mtc0 k0, CP0_STATUS | ||
428 | ehb | ||
429 | mtc0 t0,CP0_EBASE | ||
430 | |||
431 | /* Setup status register for running guest in UM */ | ||
432 | .set at | ||
433 | or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) | ||
434 | and v1, v1, ~ST0_CU0 | ||
435 | .set noat | ||
436 | mtc0 v1, CP0_STATUS | ||
437 | ehb | ||
438 | 426 | ||
427 | /* Load up the Guest EBASE to minimize the window where BEV is set */ | ||
428 | LONG_L t0, VCPU_GUEST_EBASE(k1) | ||
429 | |||
430 | /* Switch EBASE back to the one used by KVM */ | ||
431 | mfc0 v1, CP0_STATUS | ||
432 | .set at | ||
433 | or k0, v1, ST0_BEV | ||
434 | .set noat | ||
435 | mtc0 k0, CP0_STATUS | ||
436 | ehb | ||
437 | mtc0 t0, CP0_EBASE | ||
438 | |||
439 | /* Setup status register for running guest in UM */ | ||
440 | .set at | ||
441 | or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) | ||
442 | and v1, v1, ~ST0_CU0 | ||
443 | .set noat | ||
444 | mtc0 v1, CP0_STATUS | ||
445 | ehb | ||
439 | 446 | ||
440 | /* Set Guest EPC */ | 447 | /* Set Guest EPC */ |
441 | LONG_L t0, VCPU_PC(k1) | 448 | LONG_L t0, VCPU_PC(k1) |
442 | mtc0 t0, CP0_EPC | 449 | mtc0 t0, CP0_EPC |
443 | 450 | ||
444 | /* Set the ASID for the Guest Kernel */ | 451 | /* Set the ASID for the Guest Kernel */ |
445 | sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ | 452 | sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ |
446 | /* addresses shift to 0x80000000 */ | 453 | /* addresses shift to 0x80000000 */ |
447 | bltz t0, 1f /* If kernel */ | 454 | bltz t0, 1f /* If kernel */ |
448 | addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 455 | addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
449 | addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 456 | addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
450 | 1: | 457 | 1: |
451 | /* t1: contains the base of the ASID array, need to get the cpu id */ | 458 | /* t1: contains the base of the ASID array, need to get the cpu id */ |
452 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ | 459 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ |
453 | sll t2, t2, 2 /* x4 */ | 460 | sll t2, t2, 2 /* x4 */ |
454 | addu t3, t1, t2 | 461 | addu t3, t1, t2 |
455 | LONG_L k0, (t3) | 462 | LONG_L k0, (t3) |
456 | andi k0, k0, 0xff | 463 | andi k0, k0, 0xff |
457 | mtc0 k0,CP0_ENTRYHI | 464 | mtc0 k0,CP0_ENTRYHI |
458 | ehb | 465 | ehb |
459 | 466 | ||
460 | /* Disable RDHWR access */ | 467 | /* Disable RDHWR access */ |
461 | mtc0 zero, CP0_HWRENA | 468 | mtc0 zero, CP0_HWRENA |
462 | 469 | ||
463 | /* load the guest context from VCPU and return */ | 470 | /* load the guest context from VCPU and return */ |
464 | LONG_L $0, VCPU_R0(k1) | 471 | LONG_L $0, VCPU_R0(k1) |
465 | LONG_L $1, VCPU_R1(k1) | 472 | LONG_L $1, VCPU_R1(k1) |
466 | LONG_L $2, VCPU_R2(k1) | 473 | LONG_L $2, VCPU_R2(k1) |
467 | LONG_L $3, VCPU_R3(k1) | 474 | LONG_L $3, VCPU_R3(k1) |
468 | LONG_L $4, VCPU_R4(k1) | 475 | LONG_L $4, VCPU_R4(k1) |
469 | LONG_L $5, VCPU_R5(k1) | 476 | LONG_L $5, VCPU_R5(k1) |
470 | LONG_L $6, VCPU_R6(k1) | 477 | LONG_L $6, VCPU_R6(k1) |
471 | LONG_L $7, VCPU_R7(k1) | 478 | LONG_L $7, VCPU_R7(k1) |
472 | LONG_L $8, VCPU_R8(k1) | 479 | LONG_L $8, VCPU_R8(k1) |
473 | LONG_L $9, VCPU_R9(k1) | 480 | LONG_L $9, VCPU_R9(k1) |
474 | LONG_L $10, VCPU_R10(k1) | 481 | LONG_L $10, VCPU_R10(k1) |
475 | LONG_L $11, VCPU_R11(k1) | 482 | LONG_L $11, VCPU_R11(k1) |
476 | LONG_L $12, VCPU_R12(k1) | 483 | LONG_L $12, VCPU_R12(k1) |
477 | LONG_L $13, VCPU_R13(k1) | 484 | LONG_L $13, VCPU_R13(k1) |
478 | LONG_L $14, VCPU_R14(k1) | 485 | LONG_L $14, VCPU_R14(k1) |
479 | LONG_L $15, VCPU_R15(k1) | 486 | LONG_L $15, VCPU_R15(k1) |
480 | LONG_L $16, VCPU_R16(k1) | 487 | LONG_L $16, VCPU_R16(k1) |
481 | LONG_L $17, VCPU_R17(k1) | 488 | LONG_L $17, VCPU_R17(k1) |
482 | LONG_L $18, VCPU_R18(k1) | 489 | LONG_L $18, VCPU_R18(k1) |
483 | LONG_L $19, VCPU_R19(k1) | 490 | LONG_L $19, VCPU_R19(k1) |
484 | LONG_L $20, VCPU_R20(k1) | 491 | LONG_L $20, VCPU_R20(k1) |
485 | LONG_L $21, VCPU_R21(k1) | 492 | LONG_L $21, VCPU_R21(k1) |
486 | LONG_L $22, VCPU_R22(k1) | 493 | LONG_L $22, VCPU_R22(k1) |
487 | LONG_L $23, VCPU_R23(k1) | 494 | LONG_L $23, VCPU_R23(k1) |
488 | LONG_L $24, VCPU_R24(k1) | 495 | LONG_L $24, VCPU_R24(k1) |
489 | LONG_L $25, VCPU_R25(k1) | 496 | LONG_L $25, VCPU_R25(k1) |
490 | 497 | ||
491 | /* $/k1 loaded later */ | 498 | /* $/k1 loaded later */ |
492 | LONG_L $28, VCPU_R28(k1) | 499 | LONG_L $28, VCPU_R28(k1) |
493 | LONG_L $29, VCPU_R29(k1) | 500 | LONG_L $29, VCPU_R29(k1) |
494 | LONG_L $30, VCPU_R30(k1) | 501 | LONG_L $30, VCPU_R30(k1) |
495 | LONG_L $31, VCPU_R31(k1) | 502 | LONG_L $31, VCPU_R31(k1) |
496 | 503 | ||
497 | FEXPORT(__kvm_mips_skip_guest_restore) | 504 | FEXPORT(__kvm_mips_skip_guest_restore) |
498 | LONG_L k0, VCPU_HI(k1) | 505 | LONG_L k0, VCPU_HI(k1) |
499 | mthi k0 | 506 | mthi k0 |
500 | 507 | ||
501 | LONG_L k0, VCPU_LO(k1) | 508 | LONG_L k0, VCPU_LO(k1) |
502 | mtlo k0 | 509 | mtlo k0 |
503 | 510 | ||
504 | LONG_L k0, VCPU_R26(k1) | 511 | LONG_L k0, VCPU_R26(k1) |
505 | LONG_L k1, VCPU_R27(k1) | 512 | LONG_L k1, VCPU_R27(k1) |
506 | 513 | ||
507 | eret | 514 | eret |
508 | 515 | ||
509 | __kvm_mips_return_to_host: | 516 | __kvm_mips_return_to_host: |
510 | /* EBASE is already pointing to Linux */ | 517 | /* EBASE is already pointing to Linux */ |
511 | LONG_L k1, VCPU_HOST_STACK(k1) | 518 | LONG_L k1, VCPU_HOST_STACK(k1) |
512 | addiu k1,k1, -PT_SIZE | 519 | addiu k1,k1, -PT_SIZE |
513 | 520 | ||
514 | /* Restore host DDATA_LO */ | 521 | /* Restore host DDATA_LO */ |
515 | LONG_L k0, PT_HOST_USERLOCAL(k1) | 522 | LONG_L k0, PT_HOST_USERLOCAL(k1) |
516 | mtc0 k0, CP0_DDATA_LO | 523 | mtc0 k0, CP0_DDATA_LO |
517 | 524 | ||
518 | /* Restore host ASID */ | 525 | /* Restore host ASID */ |
519 | LONG_L k0, PT_HOST_ASID(sp) | 526 | LONG_L k0, PT_HOST_ASID(sp) |
520 | andi k0, 0xff | 527 | andi k0, 0xff |
521 | mtc0 k0,CP0_ENTRYHI | 528 | mtc0 k0,CP0_ENTRYHI |
522 | ehb | 529 | ehb |
523 | 530 | ||
524 | /* Load context saved on the host stack */ | 531 | /* Load context saved on the host stack */ |
525 | LONG_L $0, PT_R0(k1) | 532 | LONG_L $0, PT_R0(k1) |
526 | LONG_L $1, PT_R1(k1) | 533 | LONG_L $1, PT_R1(k1) |
527 | 534 | ||
528 | /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */ | 535 | /* r2/v0 is the return code, shift it down by 2 (arithmetic) |
529 | sra k0, v0, 2 | 536 | * to recover the err code */ |
530 | move $2, k0 | 537 | sra k0, v0, 2 |
531 | 538 | move $2, k0 | |
532 | LONG_L $3, PT_R3(k1) | 539 | |
533 | LONG_L $4, PT_R4(k1) | 540 | LONG_L $3, PT_R3(k1) |
534 | LONG_L $5, PT_R5(k1) | 541 | LONG_L $4, PT_R4(k1) |
535 | LONG_L $6, PT_R6(k1) | 542 | LONG_L $5, PT_R5(k1) |
536 | LONG_L $7, PT_R7(k1) | 543 | LONG_L $6, PT_R6(k1) |
537 | LONG_L $8, PT_R8(k1) | 544 | LONG_L $7, PT_R7(k1) |
538 | LONG_L $9, PT_R9(k1) | 545 | LONG_L $8, PT_R8(k1) |
539 | LONG_L $10, PT_R10(k1) | 546 | LONG_L $9, PT_R9(k1) |
540 | LONG_L $11, PT_R11(k1) | 547 | LONG_L $10, PT_R10(k1) |
541 | LONG_L $12, PT_R12(k1) | 548 | LONG_L $11, PT_R11(k1) |
542 | LONG_L $13, PT_R13(k1) | 549 | LONG_L $12, PT_R12(k1) |
543 | LONG_L $14, PT_R14(k1) | 550 | LONG_L $13, PT_R13(k1) |
544 | LONG_L $15, PT_R15(k1) | 551 | LONG_L $14, PT_R14(k1) |
545 | LONG_L $16, PT_R16(k1) | 552 | LONG_L $15, PT_R15(k1) |
546 | LONG_L $17, PT_R17(k1) | 553 | LONG_L $16, PT_R16(k1) |
547 | LONG_L $18, PT_R18(k1) | 554 | LONG_L $17, PT_R17(k1) |
548 | LONG_L $19, PT_R19(k1) | 555 | LONG_L $18, PT_R18(k1) |
549 | LONG_L $20, PT_R20(k1) | 556 | LONG_L $19, PT_R19(k1) |
550 | LONG_L $21, PT_R21(k1) | 557 | LONG_L $20, PT_R20(k1) |
551 | LONG_L $22, PT_R22(k1) | 558 | LONG_L $21, PT_R21(k1) |
552 | LONG_L $23, PT_R23(k1) | 559 | LONG_L $22, PT_R22(k1) |
553 | LONG_L $24, PT_R24(k1) | 560 | LONG_L $23, PT_R23(k1) |
554 | LONG_L $25, PT_R25(k1) | 561 | LONG_L $24, PT_R24(k1) |
555 | 562 | LONG_L $25, PT_R25(k1) | |
556 | /* Host k0/k1 were not saved */ | 563 | |
557 | 564 | /* Host k0/k1 were not saved */ | |
558 | LONG_L $28, PT_R28(k1) | 565 | |
559 | LONG_L $29, PT_R29(k1) | 566 | LONG_L $28, PT_R28(k1) |
560 | LONG_L $30, PT_R30(k1) | 567 | LONG_L $29, PT_R29(k1) |
561 | 568 | LONG_L $30, PT_R30(k1) | |
562 | LONG_L k0, PT_HI(k1) | 569 | |
563 | mthi k0 | 570 | LONG_L k0, PT_HI(k1) |
564 | 571 | mthi k0 | |
565 | LONG_L k0, PT_LO(k1) | 572 | |
566 | mtlo k0 | 573 | LONG_L k0, PT_LO(k1) |
567 | 574 | mtlo k0 | |
568 | /* Restore RDHWR access */ | 575 | |
569 | la k0, 0x2000000F | 576 | /* Restore RDHWR access */ |
570 | mtc0 k0, CP0_HWRENA | 577 | la k0, 0x2000000F |
571 | 578 | mtc0 k0, CP0_HWRENA | |
572 | 579 | ||
573 | /* Restore RA, which is the address we will return to */ | 580 | |
574 | LONG_L ra, PT_R31(k1) | 581 | /* Restore RA, which is the address we will return to */ |
575 | j ra | 582 | LONG_L ra, PT_R31(k1) |
576 | nop | 583 | j ra |
577 | 584 | nop | |
578 | .set pop | 585 | |
586 | .set pop | ||
579 | VECTOR_END(MIPSX(GuestExceptionEnd)) | 587 | VECTOR_END(MIPSX(GuestExceptionEnd)) |
580 | .end MIPSX(GuestException) | 588 | .end MIPSX(GuestException) |
581 | 589 | ||
@@ -627,24 +635,23 @@ MIPSX(exceptions): | |||
627 | 635 | ||
628 | #define HW_SYNCI_Step $1 | 636 | #define HW_SYNCI_Step $1 |
629 | LEAF(MIPSX(SyncICache)) | 637 | LEAF(MIPSX(SyncICache)) |
630 | .set push | 638 | .set push |
631 | .set mips32r2 | 639 | .set mips32r2 |
632 | beq a1, zero, 20f | 640 | beq a1, zero, 20f |
633 | nop | 641 | nop |
634 | addu a1, a0, a1 | 642 | addu a1, a0, a1 |
635 | rdhwr v0, HW_SYNCI_Step | 643 | rdhwr v0, HW_SYNCI_Step |
636 | beq v0, zero, 20f | 644 | beq v0, zero, 20f |
637 | nop | 645 | nop |
638 | |||
639 | 10: | 646 | 10: |
640 | synci 0(a0) | 647 | synci 0(a0) |
641 | addu a0, a0, v0 | 648 | addu a0, a0, v0 |
642 | sltu v1, a0, a1 | 649 | sltu v1, a0, a1 |
643 | bne v1, zero, 10b | 650 | bne v1, zero, 10b |
644 | nop | 651 | nop |
645 | sync | 652 | sync |
646 | 20: | 653 | 20: |
647 | jr.hb ra | 654 | jr.hb ra |
648 | nop | 655 | nop |
649 | .set pop | 656 | .set pop |
650 | END(MIPSX(SyncICache)) | 657 | END(MIPSX(SyncICache)) |