diff options
author | Michael Neuling <mikey@neuling.org> | 2012-06-25 09:33:10 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-07-10 05:17:55 -0400 |
commit | c75df6f96c59beed8632e3aced5fb4faabaa6c5b (patch) | |
tree | b21ce9394028ec4520a71d87391dad8ab29edd67 /arch/powerpc/kvm/bookehv_interrupts.S | |
parent | 564aa5cfd3e33ef69a5ca6c170a0fe79c6805e52 (diff) |
powerpc: Fix usage of register macros getting ready for %r0 change
Anything that uses a constructed instruction (ie. from ppc-opcode.h),
need to use the new R0 macro, as %r0 is not going to work.
Also convert usages of macros where we are just determining an offset
(usually for a load/store), like:
std r14,STK_REG(r14)(r1)
Can't use STK_REG(r14) as %r14 doesn't work in the STK_REG macro since
it's just calculating an offset.
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kvm/bookehv_interrupts.S')
-rw-r--r-- | arch/powerpc/kvm/bookehv_interrupts.S | 222 |
1 files changed, 111 insertions, 111 deletions
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 6048a00515d7..a623b1d32d3e 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -67,15 +67,15 @@ | |||
67 | */ | 67 | */ |
68 | .macro kvm_handler_common intno, srr0, flags | 68 | .macro kvm_handler_common intno, srr0, flags |
69 | /* Restore host stack pointer */ | 69 | /* Restore host stack pointer */ |
70 | PPC_STL r1, VCPU_GPR(r1)(r4) | 70 | PPC_STL r1, VCPU_GPR(R1)(r4) |
71 | PPC_STL r2, VCPU_GPR(r2)(r4) | 71 | PPC_STL r2, VCPU_GPR(R2)(r4) |
72 | PPC_LL r1, VCPU_HOST_STACK(r4) | 72 | PPC_LL r1, VCPU_HOST_STACK(r4) |
73 | PPC_LL r2, HOST_R2(r1) | 73 | PPC_LL r2, HOST_R2(r1) |
74 | 74 | ||
75 | mfspr r10, SPRN_PID | 75 | mfspr r10, SPRN_PID |
76 | lwz r8, VCPU_HOST_PID(r4) | 76 | lwz r8, VCPU_HOST_PID(r4) |
77 | PPC_LL r11, VCPU_SHARED(r4) | 77 | PPC_LL r11, VCPU_SHARED(r4) |
78 | PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ | 78 | PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */ |
79 | li r14, \intno | 79 | li r14, \intno |
80 | 80 | ||
81 | stw r10, VCPU_GUEST_PID(r4) | 81 | stw r10, VCPU_GUEST_PID(r4) |
@@ -137,27 +137,27 @@ | |||
137 | */ | 137 | */ |
138 | 138 | ||
139 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ | 139 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ |
140 | PPC_STL r15, VCPU_GPR(r15)(r4) | 140 | PPC_STL r15, VCPU_GPR(R15)(r4) |
141 | PPC_STL r16, VCPU_GPR(r16)(r4) | 141 | PPC_STL r16, VCPU_GPR(R16)(r4) |
142 | PPC_STL r17, VCPU_GPR(r17)(r4) | 142 | PPC_STL r17, VCPU_GPR(R17)(r4) |
143 | PPC_STL r18, VCPU_GPR(r18)(r4) | 143 | PPC_STL r18, VCPU_GPR(R18)(r4) |
144 | PPC_STL r19, VCPU_GPR(r19)(r4) | 144 | PPC_STL r19, VCPU_GPR(R19)(r4) |
145 | mr r8, r3 | 145 | mr r8, r3 |
146 | PPC_STL r20, VCPU_GPR(r20)(r4) | 146 | PPC_STL r20, VCPU_GPR(R20)(r4) |
147 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS | 147 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS |
148 | PPC_STL r21, VCPU_GPR(r21)(r4) | 148 | PPC_STL r21, VCPU_GPR(R21)(r4) |
149 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR | 149 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR |
150 | PPC_STL r22, VCPU_GPR(r22)(r4) | 150 | PPC_STL r22, VCPU_GPR(R22)(r4) |
151 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID | 151 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID |
152 | PPC_STL r23, VCPU_GPR(r23)(r4) | 152 | PPC_STL r23, VCPU_GPR(R23)(r4) |
153 | PPC_STL r24, VCPU_GPR(r24)(r4) | 153 | PPC_STL r24, VCPU_GPR(R24)(r4) |
154 | PPC_STL r25, VCPU_GPR(r25)(r4) | 154 | PPC_STL r25, VCPU_GPR(R25)(r4) |
155 | PPC_STL r26, VCPU_GPR(r26)(r4) | 155 | PPC_STL r26, VCPU_GPR(R26)(r4) |
156 | PPC_STL r27, VCPU_GPR(r27)(r4) | 156 | PPC_STL r27, VCPU_GPR(R27)(r4) |
157 | PPC_STL r28, VCPU_GPR(r28)(r4) | 157 | PPC_STL r28, VCPU_GPR(R28)(r4) |
158 | PPC_STL r29, VCPU_GPR(r29)(r4) | 158 | PPC_STL r29, VCPU_GPR(R29)(r4) |
159 | PPC_STL r30, VCPU_GPR(r30)(r4) | 159 | PPC_STL r30, VCPU_GPR(R30)(r4) |
160 | PPC_STL r31, VCPU_GPR(r31)(r4) | 160 | PPC_STL r31, VCPU_GPR(R31)(r4) |
161 | mtspr SPRN_EPLC, r8 | 161 | mtspr SPRN_EPLC, r8 |
162 | 162 | ||
163 | /* disable preemption, so we are sure we hit the fixup handler */ | 163 | /* disable preemption, so we are sure we hit the fixup handler */ |
@@ -211,24 +211,24 @@ | |||
211 | .macro kvm_handler intno srr0, srr1, flags | 211 | .macro kvm_handler intno srr0, srr1, flags |
212 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | 212 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) |
213 | GET_VCPU(r11, r10) | 213 | GET_VCPU(r11, r10) |
214 | PPC_STL r3, VCPU_GPR(r3)(r11) | 214 | PPC_STL r3, VCPU_GPR(R3)(r11) |
215 | mfspr r3, SPRN_SPRG_RSCRATCH0 | 215 | mfspr r3, SPRN_SPRG_RSCRATCH0 |
216 | PPC_STL r4, VCPU_GPR(r4)(r11) | 216 | PPC_STL r4, VCPU_GPR(R4)(r11) |
217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) | 217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) |
218 | PPC_STL r5, VCPU_GPR(r5)(r11) | 218 | PPC_STL r5, VCPU_GPR(R5)(r11) |
219 | stw r13, VCPU_CR(r11) | 219 | stw r13, VCPU_CR(r11) |
220 | mfspr r5, \srr0 | 220 | mfspr r5, \srr0 |
221 | PPC_STL r3, VCPU_GPR(r10)(r11) | 221 | PPC_STL r3, VCPU_GPR(R10)(r11) |
222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) | 222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) |
223 | PPC_STL r6, VCPU_GPR(r6)(r11) | 223 | PPC_STL r6, VCPU_GPR(R6)(r11) |
224 | PPC_STL r4, VCPU_GPR(r11)(r11) | 224 | PPC_STL r4, VCPU_GPR(R11)(r11) |
225 | mfspr r6, \srr1 | 225 | mfspr r6, \srr1 |
226 | PPC_STL r7, VCPU_GPR(r7)(r11) | 226 | PPC_STL r7, VCPU_GPR(R7)(r11) |
227 | PPC_STL r8, VCPU_GPR(r8)(r11) | 227 | PPC_STL r8, VCPU_GPR(R8)(r11) |
228 | PPC_STL r9, VCPU_GPR(r9)(r11) | 228 | PPC_STL r9, VCPU_GPR(R9)(r11) |
229 | PPC_STL r3, VCPU_GPR(r13)(r11) | 229 | PPC_STL r3, VCPU_GPR(R13)(r11) |
230 | mfctr r7 | 230 | mfctr r7 |
231 | PPC_STL r12, VCPU_GPR(r12)(r11) | 231 | PPC_STL r12, VCPU_GPR(R12)(r11) |
232 | PPC_STL r7, VCPU_CTR(r11) | 232 | PPC_STL r7, VCPU_CTR(r11) |
233 | mr r4, r11 | 233 | mr r4, r11 |
234 | kvm_handler_common \intno, \srr0, \flags | 234 | kvm_handler_common \intno, \srr0, \flags |
@@ -238,25 +238,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |||
238 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | 238 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) |
239 | mfspr r10, SPRN_SPRG_THREAD | 239 | mfspr r10, SPRN_SPRG_THREAD |
240 | GET_VCPU(r11, r10) | 240 | GET_VCPU(r11, r10) |
241 | PPC_STL r3, VCPU_GPR(r3)(r11) | 241 | PPC_STL r3, VCPU_GPR(R3)(r11) |
242 | mfspr r3, \scratch | 242 | mfspr r3, \scratch |
243 | PPC_STL r4, VCPU_GPR(r4)(r11) | 243 | PPC_STL r4, VCPU_GPR(R4)(r11) |
244 | PPC_LL r4, GPR9(r8) | 244 | PPC_LL r4, GPR9(r8) |
245 | PPC_STL r5, VCPU_GPR(r5)(r11) | 245 | PPC_STL r5, VCPU_GPR(R5)(r11) |
246 | stw r9, VCPU_CR(r11) | 246 | stw r9, VCPU_CR(r11) |
247 | mfspr r5, \srr0 | 247 | mfspr r5, \srr0 |
248 | PPC_STL r3, VCPU_GPR(r8)(r11) | 248 | PPC_STL r3, VCPU_GPR(R8)(r11) |
249 | PPC_LL r3, GPR10(r8) | 249 | PPC_LL r3, GPR10(r8) |
250 | PPC_STL r6, VCPU_GPR(r6)(r11) | 250 | PPC_STL r6, VCPU_GPR(R6)(r11) |
251 | PPC_STL r4, VCPU_GPR(r9)(r11) | 251 | PPC_STL r4, VCPU_GPR(R9)(r11) |
252 | mfspr r6, \srr1 | 252 | mfspr r6, \srr1 |
253 | PPC_LL r4, GPR11(r8) | 253 | PPC_LL r4, GPR11(r8) |
254 | PPC_STL r7, VCPU_GPR(r7)(r11) | 254 | PPC_STL r7, VCPU_GPR(R7)(r11) |
255 | PPC_STL r3, VCPU_GPR(r10)(r11) | 255 | PPC_STL r3, VCPU_GPR(R10)(r11) |
256 | mfctr r7 | 256 | mfctr r7 |
257 | PPC_STL r12, VCPU_GPR(r12)(r11) | 257 | PPC_STL r12, VCPU_GPR(R12)(r11) |
258 | PPC_STL r13, VCPU_GPR(r13)(r11) | 258 | PPC_STL r13, VCPU_GPR(R13)(r11) |
259 | PPC_STL r4, VCPU_GPR(r11)(r11) | 259 | PPC_STL r4, VCPU_GPR(R11)(r11) |
260 | PPC_STL r7, VCPU_CTR(r11) | 260 | PPC_STL r7, VCPU_CTR(r11) |
261 | mr r4, r11 | 261 | mr r4, r11 |
262 | kvm_handler_common \intno, \srr0, \flags | 262 | kvm_handler_common \intno, \srr0, \flags |
@@ -310,7 +310,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | |||
310 | _GLOBAL(kvmppc_resume_host) | 310 | _GLOBAL(kvmppc_resume_host) |
311 | /* Save remaining volatile guest register state to vcpu. */ | 311 | /* Save remaining volatile guest register state to vcpu. */ |
312 | mfspr r3, SPRN_VRSAVE | 312 | mfspr r3, SPRN_VRSAVE |
313 | PPC_STL r0, VCPU_GPR(r0)(r4) | 313 | PPC_STL r0, VCPU_GPR(R0)(r4) |
314 | mflr r5 | 314 | mflr r5 |
315 | mfspr r6, SPRN_SPRG4 | 315 | mfspr r6, SPRN_SPRG4 |
316 | PPC_STL r5, VCPU_LR(r4) | 316 | PPC_STL r5, VCPU_LR(r4) |
@@ -358,27 +358,27 @@ _GLOBAL(kvmppc_resume_host) | |||
358 | 358 | ||
359 | /* Restore vcpu pointer and the nonvolatiles we used. */ | 359 | /* Restore vcpu pointer and the nonvolatiles we used. */ |
360 | mr r4, r14 | 360 | mr r4, r14 |
361 | PPC_LL r14, VCPU_GPR(r14)(r4) | 361 | PPC_LL r14, VCPU_GPR(R14)(r4) |
362 | 362 | ||
363 | andi. r5, r3, RESUME_FLAG_NV | 363 | andi. r5, r3, RESUME_FLAG_NV |
364 | beq skip_nv_load | 364 | beq skip_nv_load |
365 | PPC_LL r15, VCPU_GPR(r15)(r4) | 365 | PPC_LL r15, VCPU_GPR(R15)(r4) |
366 | PPC_LL r16, VCPU_GPR(r16)(r4) | 366 | PPC_LL r16, VCPU_GPR(R16)(r4) |
367 | PPC_LL r17, VCPU_GPR(r17)(r4) | 367 | PPC_LL r17, VCPU_GPR(R17)(r4) |
368 | PPC_LL r18, VCPU_GPR(r18)(r4) | 368 | PPC_LL r18, VCPU_GPR(R18)(r4) |
369 | PPC_LL r19, VCPU_GPR(r19)(r4) | 369 | PPC_LL r19, VCPU_GPR(R19)(r4) |
370 | PPC_LL r20, VCPU_GPR(r20)(r4) | 370 | PPC_LL r20, VCPU_GPR(R20)(r4) |
371 | PPC_LL r21, VCPU_GPR(r21)(r4) | 371 | PPC_LL r21, VCPU_GPR(R21)(r4) |
372 | PPC_LL r22, VCPU_GPR(r22)(r4) | 372 | PPC_LL r22, VCPU_GPR(R22)(r4) |
373 | PPC_LL r23, VCPU_GPR(r23)(r4) | 373 | PPC_LL r23, VCPU_GPR(R23)(r4) |
374 | PPC_LL r24, VCPU_GPR(r24)(r4) | 374 | PPC_LL r24, VCPU_GPR(R24)(r4) |
375 | PPC_LL r25, VCPU_GPR(r25)(r4) | 375 | PPC_LL r25, VCPU_GPR(R25)(r4) |
376 | PPC_LL r26, VCPU_GPR(r26)(r4) | 376 | PPC_LL r26, VCPU_GPR(R26)(r4) |
377 | PPC_LL r27, VCPU_GPR(r27)(r4) | 377 | PPC_LL r27, VCPU_GPR(R27)(r4) |
378 | PPC_LL r28, VCPU_GPR(r28)(r4) | 378 | PPC_LL r28, VCPU_GPR(R28)(r4) |
379 | PPC_LL r29, VCPU_GPR(r29)(r4) | 379 | PPC_LL r29, VCPU_GPR(R29)(r4) |
380 | PPC_LL r30, VCPU_GPR(r30)(r4) | 380 | PPC_LL r30, VCPU_GPR(R30)(r4) |
381 | PPC_LL r31, VCPU_GPR(r31)(r4) | 381 | PPC_LL r31, VCPU_GPR(R31)(r4) |
382 | skip_nv_load: | 382 | skip_nv_load: |
383 | /* Should we return to the guest? */ | 383 | /* Should we return to the guest? */ |
384 | andi. r5, r3, RESUME_FLAG_HOST | 384 | andi. r5, r3, RESUME_FLAG_HOST |
@@ -396,23 +396,23 @@ heavyweight_exit: | |||
396 | * non-volatiles. | 396 | * non-volatiles. |
397 | */ | 397 | */ |
398 | 398 | ||
399 | PPC_STL r15, VCPU_GPR(r15)(r4) | 399 | PPC_STL r15, VCPU_GPR(R15)(r4) |
400 | PPC_STL r16, VCPU_GPR(r16)(r4) | 400 | PPC_STL r16, VCPU_GPR(R16)(r4) |
401 | PPC_STL r17, VCPU_GPR(r17)(r4) | 401 | PPC_STL r17, VCPU_GPR(R17)(r4) |
402 | PPC_STL r18, VCPU_GPR(r18)(r4) | 402 | PPC_STL r18, VCPU_GPR(R18)(r4) |
403 | PPC_STL r19, VCPU_GPR(r19)(r4) | 403 | PPC_STL r19, VCPU_GPR(R19)(r4) |
404 | PPC_STL r20, VCPU_GPR(r20)(r4) | 404 | PPC_STL r20, VCPU_GPR(R20)(r4) |
405 | PPC_STL r21, VCPU_GPR(r21)(r4) | 405 | PPC_STL r21, VCPU_GPR(R21)(r4) |
406 | PPC_STL r22, VCPU_GPR(r22)(r4) | 406 | PPC_STL r22, VCPU_GPR(R22)(r4) |
407 | PPC_STL r23, VCPU_GPR(r23)(r4) | 407 | PPC_STL r23, VCPU_GPR(R23)(r4) |
408 | PPC_STL r24, VCPU_GPR(r24)(r4) | 408 | PPC_STL r24, VCPU_GPR(R24)(r4) |
409 | PPC_STL r25, VCPU_GPR(r25)(r4) | 409 | PPC_STL r25, VCPU_GPR(R25)(r4) |
410 | PPC_STL r26, VCPU_GPR(r26)(r4) | 410 | PPC_STL r26, VCPU_GPR(R26)(r4) |
411 | PPC_STL r27, VCPU_GPR(r27)(r4) | 411 | PPC_STL r27, VCPU_GPR(R27)(r4) |
412 | PPC_STL r28, VCPU_GPR(r28)(r4) | 412 | PPC_STL r28, VCPU_GPR(R28)(r4) |
413 | PPC_STL r29, VCPU_GPR(r29)(r4) | 413 | PPC_STL r29, VCPU_GPR(R29)(r4) |
414 | PPC_STL r30, VCPU_GPR(r30)(r4) | 414 | PPC_STL r30, VCPU_GPR(R30)(r4) |
415 | PPC_STL r31, VCPU_GPR(r31)(r4) | 415 | PPC_STL r31, VCPU_GPR(R31)(r4) |
416 | 416 | ||
417 | /* Load host non-volatile register state from host stack. */ | 417 | /* Load host non-volatile register state from host stack. */ |
418 | PPC_LL r14, HOST_NV_GPR(r14)(r1) | 418 | PPC_LL r14, HOST_NV_GPR(r14)(r1) |
@@ -478,24 +478,24 @@ _GLOBAL(__kvmppc_vcpu_run) | |||
478 | PPC_STL r31, HOST_NV_GPR(r31)(r1) | 478 | PPC_STL r31, HOST_NV_GPR(r31)(r1) |
479 | 479 | ||
480 | /* Load guest non-volatiles. */ | 480 | /* Load guest non-volatiles. */ |
481 | PPC_LL r14, VCPU_GPR(r14)(r4) | 481 | PPC_LL r14, VCPU_GPR(R14)(r4) |
482 | PPC_LL r15, VCPU_GPR(r15)(r4) | 482 | PPC_LL r15, VCPU_GPR(R15)(r4) |
483 | PPC_LL r16, VCPU_GPR(r16)(r4) | 483 | PPC_LL r16, VCPU_GPR(R16)(r4) |
484 | PPC_LL r17, VCPU_GPR(r17)(r4) | 484 | PPC_LL r17, VCPU_GPR(R17)(r4) |
485 | PPC_LL r18, VCPU_GPR(r18)(r4) | 485 | PPC_LL r18, VCPU_GPR(R18)(r4) |
486 | PPC_LL r19, VCPU_GPR(r19)(r4) | 486 | PPC_LL r19, VCPU_GPR(R19)(r4) |
487 | PPC_LL r20, VCPU_GPR(r20)(r4) | 487 | PPC_LL r20, VCPU_GPR(R20)(r4) |
488 | PPC_LL r21, VCPU_GPR(r21)(r4) | 488 | PPC_LL r21, VCPU_GPR(R21)(r4) |
489 | PPC_LL r22, VCPU_GPR(r22)(r4) | 489 | PPC_LL r22, VCPU_GPR(R22)(r4) |
490 | PPC_LL r23, VCPU_GPR(r23)(r4) | 490 | PPC_LL r23, VCPU_GPR(R23)(r4) |
491 | PPC_LL r24, VCPU_GPR(r24)(r4) | 491 | PPC_LL r24, VCPU_GPR(R24)(r4) |
492 | PPC_LL r25, VCPU_GPR(r25)(r4) | 492 | PPC_LL r25, VCPU_GPR(R25)(r4) |
493 | PPC_LL r26, VCPU_GPR(r26)(r4) | 493 | PPC_LL r26, VCPU_GPR(R26)(r4) |
494 | PPC_LL r27, VCPU_GPR(r27)(r4) | 494 | PPC_LL r27, VCPU_GPR(R27)(r4) |
495 | PPC_LL r28, VCPU_GPR(r28)(r4) | 495 | PPC_LL r28, VCPU_GPR(R28)(r4) |
496 | PPC_LL r29, VCPU_GPR(r29)(r4) | 496 | PPC_LL r29, VCPU_GPR(R29)(r4) |
497 | PPC_LL r30, VCPU_GPR(r30)(r4) | 497 | PPC_LL r30, VCPU_GPR(R30)(r4) |
498 | PPC_LL r31, VCPU_GPR(r31)(r4) | 498 | PPC_LL r31, VCPU_GPR(R31)(r4) |
499 | 499 | ||
500 | 500 | ||
501 | lightweight_exit: | 501 | lightweight_exit: |
@@ -554,13 +554,13 @@ lightweight_exit: | |||
554 | lwz r7, VCPU_CR(r4) | 554 | lwz r7, VCPU_CR(r4) |
555 | PPC_LL r8, VCPU_PC(r4) | 555 | PPC_LL r8, VCPU_PC(r4) |
556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) | 556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) |
557 | PPC_LL r0, VCPU_GPR(r0)(r4) | 557 | PPC_LL r0, VCPU_GPR(R0)(r4) |
558 | PPC_LL r1, VCPU_GPR(r1)(r4) | 558 | PPC_LL r1, VCPU_GPR(R1)(r4) |
559 | PPC_LL r2, VCPU_GPR(r2)(r4) | 559 | PPC_LL r2, VCPU_GPR(R2)(r4) |
560 | PPC_LL r10, VCPU_GPR(r10)(r4) | 560 | PPC_LL r10, VCPU_GPR(R10)(r4) |
561 | PPC_LL r11, VCPU_GPR(r11)(r4) | 561 | PPC_LL r11, VCPU_GPR(R11)(r4) |
562 | PPC_LL r12, VCPU_GPR(r12)(r4) | 562 | PPC_LL r12, VCPU_GPR(R12)(r4) |
563 | PPC_LL r13, VCPU_GPR(r13)(r4) | 563 | PPC_LL r13, VCPU_GPR(R13)(r4) |
564 | mtlr r3 | 564 | mtlr r3 |
565 | mtxer r5 | 565 | mtxer r5 |
566 | mtctr r6 | 566 | mtctr r6 |
@@ -586,12 +586,12 @@ lightweight_exit: | |||
586 | mtcr r7 | 586 | mtcr r7 |
587 | 587 | ||
588 | /* Finish loading guest volatiles and jump to guest. */ | 588 | /* Finish loading guest volatiles and jump to guest. */ |
589 | PPC_LL r5, VCPU_GPR(r5)(r4) | 589 | PPC_LL r5, VCPU_GPR(R5)(r4) |
590 | PPC_LL r6, VCPU_GPR(r6)(r4) | 590 | PPC_LL r6, VCPU_GPR(R6)(r4) |
591 | PPC_LL r7, VCPU_GPR(r7)(r4) | 591 | PPC_LL r7, VCPU_GPR(R7)(r4) |
592 | PPC_LL r8, VCPU_GPR(r8)(r4) | 592 | PPC_LL r8, VCPU_GPR(R8)(r4) |
593 | PPC_LL r9, VCPU_GPR(r9)(r4) | 593 | PPC_LL r9, VCPU_GPR(R9)(r4) |
594 | 594 | ||
595 | PPC_LL r3, VCPU_GPR(r3)(r4) | 595 | PPC_LL r3, VCPU_GPR(R3)(r4) |
596 | PPC_LL r4, VCPU_GPR(r4)(r4) | 596 | PPC_LL r4, VCPU_GPR(R4)(r4) |
597 | rfi | 597 | rfi |