diff options
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 220 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_interrupts.S | 72 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 272 | ||||
-rw-r--r-- | arch/powerpc/kvm/bookehv_interrupts.S | 222 |
4 files changed, 393 insertions, 393 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a1044f43becd..bc99015030c3 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
206 | /* Load up FP, VMX and VSX registers */ | 206 | /* Load up FP, VMX and VSX registers */ |
207 | bl kvmppc_load_fp | 207 | bl kvmppc_load_fp |
208 | 208 | ||
209 | ld r14, VCPU_GPR(r14)(r4) | 209 | ld r14, VCPU_GPR(R14)(r4) |
210 | ld r15, VCPU_GPR(r15)(r4) | 210 | ld r15, VCPU_GPR(R15)(r4) |
211 | ld r16, VCPU_GPR(r16)(r4) | 211 | ld r16, VCPU_GPR(R16)(r4) |
212 | ld r17, VCPU_GPR(r17)(r4) | 212 | ld r17, VCPU_GPR(R17)(r4) |
213 | ld r18, VCPU_GPR(r18)(r4) | 213 | ld r18, VCPU_GPR(R18)(r4) |
214 | ld r19, VCPU_GPR(r19)(r4) | 214 | ld r19, VCPU_GPR(R19)(r4) |
215 | ld r20, VCPU_GPR(r20)(r4) | 215 | ld r20, VCPU_GPR(R20)(r4) |
216 | ld r21, VCPU_GPR(r21)(r4) | 216 | ld r21, VCPU_GPR(R21)(r4) |
217 | ld r22, VCPU_GPR(r22)(r4) | 217 | ld r22, VCPU_GPR(R22)(r4) |
218 | ld r23, VCPU_GPR(r23)(r4) | 218 | ld r23, VCPU_GPR(R23)(r4) |
219 | ld r24, VCPU_GPR(r24)(r4) | 219 | ld r24, VCPU_GPR(R24)(r4) |
220 | ld r25, VCPU_GPR(r25)(r4) | 220 | ld r25, VCPU_GPR(R25)(r4) |
221 | ld r26, VCPU_GPR(r26)(r4) | 221 | ld r26, VCPU_GPR(R26)(r4) |
222 | ld r27, VCPU_GPR(r27)(r4) | 222 | ld r27, VCPU_GPR(R27)(r4) |
223 | ld r28, VCPU_GPR(r28)(r4) | 223 | ld r28, VCPU_GPR(R28)(r4) |
224 | ld r29, VCPU_GPR(r29)(r4) | 224 | ld r29, VCPU_GPR(R29)(r4) |
225 | ld r30, VCPU_GPR(r30)(r4) | 225 | ld r30, VCPU_GPR(R30)(r4) |
226 | ld r31, VCPU_GPR(r31)(r4) | 226 | ld r31, VCPU_GPR(R31)(r4) |
227 | 227 | ||
228 | BEGIN_FTR_SECTION | 228 | BEGIN_FTR_SECTION |
229 | /* Switch DSCR to guest value */ | 229 | /* Switch DSCR to guest value */ |
@@ -547,21 +547,21 @@ fast_guest_return: | |||
547 | mtlr r5 | 547 | mtlr r5 |
548 | mtcr r6 | 548 | mtcr r6 |
549 | 549 | ||
550 | ld r0, VCPU_GPR(r0)(r4) | 550 | ld r0, VCPU_GPR(R0)(r4) |
551 | ld r1, VCPU_GPR(r1)(r4) | 551 | ld r1, VCPU_GPR(R1)(r4) |
552 | ld r2, VCPU_GPR(r2)(r4) | 552 | ld r2, VCPU_GPR(R2)(r4) |
553 | ld r3, VCPU_GPR(r3)(r4) | 553 | ld r3, VCPU_GPR(R3)(r4) |
554 | ld r5, VCPU_GPR(r5)(r4) | 554 | ld r5, VCPU_GPR(R5)(r4) |
555 | ld r6, VCPU_GPR(r6)(r4) | 555 | ld r6, VCPU_GPR(R6)(r4) |
556 | ld r7, VCPU_GPR(r7)(r4) | 556 | ld r7, VCPU_GPR(R7)(r4) |
557 | ld r8, VCPU_GPR(r8)(r4) | 557 | ld r8, VCPU_GPR(R8)(r4) |
558 | ld r9, VCPU_GPR(r9)(r4) | 558 | ld r9, VCPU_GPR(R9)(r4) |
559 | ld r10, VCPU_GPR(r10)(r4) | 559 | ld r10, VCPU_GPR(R10)(r4) |
560 | ld r11, VCPU_GPR(r11)(r4) | 560 | ld r11, VCPU_GPR(R11)(r4) |
561 | ld r12, VCPU_GPR(r12)(r4) | 561 | ld r12, VCPU_GPR(R12)(r4) |
562 | ld r13, VCPU_GPR(r13)(r4) | 562 | ld r13, VCPU_GPR(R13)(r4) |
563 | 563 | ||
564 | ld r4, VCPU_GPR(r4)(r4) | 564 | ld r4, VCPU_GPR(R4)(r4) |
565 | 565 | ||
566 | hrfid | 566 | hrfid |
567 | b . | 567 | b . |
@@ -590,22 +590,22 @@ kvmppc_interrupt: | |||
590 | 590 | ||
591 | /* Save registers */ | 591 | /* Save registers */ |
592 | 592 | ||
593 | std r0, VCPU_GPR(r0)(r9) | 593 | std r0, VCPU_GPR(R0)(r9) |
594 | std r1, VCPU_GPR(r1)(r9) | 594 | std r1, VCPU_GPR(R1)(r9) |
595 | std r2, VCPU_GPR(r2)(r9) | 595 | std r2, VCPU_GPR(R2)(r9) |
596 | std r3, VCPU_GPR(r3)(r9) | 596 | std r3, VCPU_GPR(R3)(r9) |
597 | std r4, VCPU_GPR(r4)(r9) | 597 | std r4, VCPU_GPR(R4)(r9) |
598 | std r5, VCPU_GPR(r5)(r9) | 598 | std r5, VCPU_GPR(R5)(r9) |
599 | std r6, VCPU_GPR(r6)(r9) | 599 | std r6, VCPU_GPR(R6)(r9) |
600 | std r7, VCPU_GPR(r7)(r9) | 600 | std r7, VCPU_GPR(R7)(r9) |
601 | std r8, VCPU_GPR(r8)(r9) | 601 | std r8, VCPU_GPR(R8)(r9) |
602 | ld r0, HSTATE_HOST_R2(r13) | 602 | ld r0, HSTATE_HOST_R2(r13) |
603 | std r0, VCPU_GPR(r9)(r9) | 603 | std r0, VCPU_GPR(R9)(r9) |
604 | std r10, VCPU_GPR(r10)(r9) | 604 | std r10, VCPU_GPR(R10)(r9) |
605 | std r11, VCPU_GPR(r11)(r9) | 605 | std r11, VCPU_GPR(R11)(r9) |
606 | ld r3, HSTATE_SCRATCH0(r13) | 606 | ld r3, HSTATE_SCRATCH0(r13) |
607 | lwz r4, HSTATE_SCRATCH1(r13) | 607 | lwz r4, HSTATE_SCRATCH1(r13) |
608 | std r3, VCPU_GPR(r12)(r9) | 608 | std r3, VCPU_GPR(R12)(r9) |
609 | stw r4, VCPU_CR(r9) | 609 | stw r4, VCPU_CR(r9) |
610 | 610 | ||
611 | /* Restore R1/R2 so we can handle faults */ | 611 | /* Restore R1/R2 so we can handle faults */ |
@@ -626,7 +626,7 @@ kvmppc_interrupt: | |||
626 | 626 | ||
627 | GET_SCRATCH0(r3) | 627 | GET_SCRATCH0(r3) |
628 | mflr r4 | 628 | mflr r4 |
629 | std r3, VCPU_GPR(r13)(r9) | 629 | std r3, VCPU_GPR(R13)(r9) |
630 | std r4, VCPU_LR(r9) | 630 | std r4, VCPU_LR(r9) |
631 | 631 | ||
632 | /* Unset guest mode */ | 632 | /* Unset guest mode */ |
@@ -968,24 +968,24 @@ BEGIN_FTR_SECTION | |||
968 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 968 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
969 | 969 | ||
970 | /* Save non-volatile GPRs */ | 970 | /* Save non-volatile GPRs */ |
971 | std r14, VCPU_GPR(r14)(r9) | 971 | std r14, VCPU_GPR(R14)(r9) |
972 | std r15, VCPU_GPR(r15)(r9) | 972 | std r15, VCPU_GPR(R15)(r9) |
973 | std r16, VCPU_GPR(r16)(r9) | 973 | std r16, VCPU_GPR(R16)(r9) |
974 | std r17, VCPU_GPR(r17)(r9) | 974 | std r17, VCPU_GPR(R17)(r9) |
975 | std r18, VCPU_GPR(r18)(r9) | 975 | std r18, VCPU_GPR(R18)(r9) |
976 | std r19, VCPU_GPR(r19)(r9) | 976 | std r19, VCPU_GPR(R19)(r9) |
977 | std r20, VCPU_GPR(r20)(r9) | 977 | std r20, VCPU_GPR(R20)(r9) |
978 | std r21, VCPU_GPR(r21)(r9) | 978 | std r21, VCPU_GPR(R21)(r9) |
979 | std r22, VCPU_GPR(r22)(r9) | 979 | std r22, VCPU_GPR(R22)(r9) |
980 | std r23, VCPU_GPR(r23)(r9) | 980 | std r23, VCPU_GPR(R23)(r9) |
981 | std r24, VCPU_GPR(r24)(r9) | 981 | std r24, VCPU_GPR(R24)(r9) |
982 | std r25, VCPU_GPR(r25)(r9) | 982 | std r25, VCPU_GPR(R25)(r9) |
983 | std r26, VCPU_GPR(r26)(r9) | 983 | std r26, VCPU_GPR(R26)(r9) |
984 | std r27, VCPU_GPR(r27)(r9) | 984 | std r27, VCPU_GPR(R27)(r9) |
985 | std r28, VCPU_GPR(r28)(r9) | 985 | std r28, VCPU_GPR(R28)(r9) |
986 | std r29, VCPU_GPR(r29)(r9) | 986 | std r29, VCPU_GPR(R29)(r9) |
987 | std r30, VCPU_GPR(r30)(r9) | 987 | std r30, VCPU_GPR(R30)(r9) |
988 | std r31, VCPU_GPR(r31)(r9) | 988 | std r31, VCPU_GPR(R31)(r9) |
989 | 989 | ||
990 | /* Save SPRGs */ | 990 | /* Save SPRGs */ |
991 | mfspr r3, SPRN_SPRG0 | 991 | mfspr r3, SPRN_SPRG0 |
@@ -1160,7 +1160,7 @@ kvmppc_hdsi: | |||
1160 | andi. r0, r11, MSR_DR /* data relocation enabled? */ | 1160 | andi. r0, r11, MSR_DR /* data relocation enabled? */ |
1161 | beq 3f | 1161 | beq 3f |
1162 | clrrdi r0, r4, 28 | 1162 | clrrdi r0, r4, 28 |
1163 | PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ | 1163 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
1164 | bne 1f /* if no SLB entry found */ | 1164 | bne 1f /* if no SLB entry found */ |
1165 | 4: std r4, VCPU_FAULT_DAR(r9) | 1165 | 4: std r4, VCPU_FAULT_DAR(r9) |
1166 | stw r6, VCPU_FAULT_DSISR(r9) | 1166 | stw r6, VCPU_FAULT_DSISR(r9) |
@@ -1234,7 +1234,7 @@ kvmppc_hisi: | |||
1234 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ | 1234 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ |
1235 | beq 3f | 1235 | beq 3f |
1236 | clrrdi r0, r10, 28 | 1236 | clrrdi r0, r10, 28 |
1237 | PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ | 1237 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
1238 | bne 1f /* if no SLB entry found */ | 1238 | bne 1f /* if no SLB entry found */ |
1239 | 4: | 1239 | 4: |
1240 | /* Search the hash table. */ | 1240 | /* Search the hash table. */ |
@@ -1278,7 +1278,7 @@ kvmppc_hisi: | |||
1278 | */ | 1278 | */ |
1279 | .globl hcall_try_real_mode | 1279 | .globl hcall_try_real_mode |
1280 | hcall_try_real_mode: | 1280 | hcall_try_real_mode: |
1281 | ld r3,VCPU_GPR(r3)(r9) | 1281 | ld r3,VCPU_GPR(R3)(r9) |
1282 | andi. r0,r11,MSR_PR | 1282 | andi. r0,r11,MSR_PR |
1283 | bne hcall_real_cont | 1283 | bne hcall_real_cont |
1284 | clrrdi r3,r3,2 | 1284 | clrrdi r3,r3,2 |
@@ -1291,12 +1291,12 @@ hcall_try_real_mode: | |||
1291 | add r3,r3,r4 | 1291 | add r3,r3,r4 |
1292 | mtctr r3 | 1292 | mtctr r3 |
1293 | mr r3,r9 /* get vcpu pointer */ | 1293 | mr r3,r9 /* get vcpu pointer */ |
1294 | ld r4,VCPU_GPR(r4)(r9) | 1294 | ld r4,VCPU_GPR(R4)(r9) |
1295 | bctrl | 1295 | bctrl |
1296 | cmpdi r3,H_TOO_HARD | 1296 | cmpdi r3,H_TOO_HARD |
1297 | beq hcall_real_fallback | 1297 | beq hcall_real_fallback |
1298 | ld r4,HSTATE_KVM_VCPU(r13) | 1298 | ld r4,HSTATE_KVM_VCPU(r13) |
1299 | std r3,VCPU_GPR(r3)(r4) | 1299 | std r3,VCPU_GPR(R3)(r4) |
1300 | ld r10,VCPU_PC(r4) | 1300 | ld r10,VCPU_PC(r4) |
1301 | ld r11,VCPU_MSR(r4) | 1301 | ld r11,VCPU_MSR(r4) |
1302 | b fast_guest_return | 1302 | b fast_guest_return |
@@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede) | |||
1424 | li r0,0 /* set trap to 0 to say hcall is handled */ | 1424 | li r0,0 /* set trap to 0 to say hcall is handled */ |
1425 | stw r0,VCPU_TRAP(r3) | 1425 | stw r0,VCPU_TRAP(r3) |
1426 | li r0,H_SUCCESS | 1426 | li r0,H_SUCCESS |
1427 | std r0,VCPU_GPR(r3)(r3) | 1427 | std r0,VCPU_GPR(R3)(r3) |
1428 | BEGIN_FTR_SECTION | 1428 | BEGIN_FTR_SECTION |
1429 | b 2f /* just send it up to host on 970 */ | 1429 | b 2f /* just send it up to host on 970 */ |
1430 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | 1430 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) |
@@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1443 | addi r6,r5,VCORE_NAPPING_THREADS | 1443 | addi r6,r5,VCORE_NAPPING_THREADS |
1444 | 31: lwarx r4,0,r6 | 1444 | 31: lwarx r4,0,r6 |
1445 | or r4,r4,r0 | 1445 | or r4,r4,r0 |
1446 | PPC_POPCNTW(r7,r4) | 1446 | PPC_POPCNTW(R7,R4) |
1447 | cmpw r7,r8 | 1447 | cmpw r7,r8 |
1448 | bge 2f | 1448 | bge 2f |
1449 | stwcx. r4,0,r6 | 1449 | stwcx. r4,0,r6 |
@@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1464 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. | 1464 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. |
1465 | */ | 1465 | */ |
1466 | /* Save non-volatile GPRs */ | 1466 | /* Save non-volatile GPRs */ |
1467 | std r14, VCPU_GPR(r14)(r3) | 1467 | std r14, VCPU_GPR(R14)(r3) |
1468 | std r15, VCPU_GPR(r15)(r3) | 1468 | std r15, VCPU_GPR(R15)(r3) |
1469 | std r16, VCPU_GPR(r16)(r3) | 1469 | std r16, VCPU_GPR(R16)(r3) |
1470 | std r17, VCPU_GPR(r17)(r3) | 1470 | std r17, VCPU_GPR(R17)(r3) |
1471 | std r18, VCPU_GPR(r18)(r3) | 1471 | std r18, VCPU_GPR(R18)(r3) |
1472 | std r19, VCPU_GPR(r19)(r3) | 1472 | std r19, VCPU_GPR(R19)(r3) |
1473 | std r20, VCPU_GPR(r20)(r3) | 1473 | std r20, VCPU_GPR(R20)(r3) |
1474 | std r21, VCPU_GPR(r21)(r3) | 1474 | std r21, VCPU_GPR(R21)(r3) |
1475 | std r22, VCPU_GPR(r22)(r3) | 1475 | std r22, VCPU_GPR(R22)(r3) |
1476 | std r23, VCPU_GPR(r23)(r3) | 1476 | std r23, VCPU_GPR(R23)(r3) |
1477 | std r24, VCPU_GPR(r24)(r3) | 1477 | std r24, VCPU_GPR(R24)(r3) |
1478 | std r25, VCPU_GPR(r25)(r3) | 1478 | std r25, VCPU_GPR(R25)(r3) |
1479 | std r26, VCPU_GPR(r26)(r3) | 1479 | std r26, VCPU_GPR(R26)(r3) |
1480 | std r27, VCPU_GPR(r27)(r3) | 1480 | std r27, VCPU_GPR(R27)(r3) |
1481 | std r28, VCPU_GPR(r28)(r3) | 1481 | std r28, VCPU_GPR(R28)(r3) |
1482 | std r29, VCPU_GPR(r29)(r3) | 1482 | std r29, VCPU_GPR(R29)(r3) |
1483 | std r30, VCPU_GPR(r30)(r3) | 1483 | std r30, VCPU_GPR(R30)(r3) |
1484 | std r31, VCPU_GPR(r31)(r3) | 1484 | std r31, VCPU_GPR(R31)(r3) |
1485 | 1485 | ||
1486 | /* save FP state */ | 1486 | /* save FP state */ |
1487 | bl .kvmppc_save_fp | 1487 | bl .kvmppc_save_fp |
@@ -1513,24 +1513,24 @@ kvm_end_cede: | |||
1513 | bl kvmppc_load_fp | 1513 | bl kvmppc_load_fp |
1514 | 1514 | ||
1515 | /* Load NV GPRS */ | 1515 | /* Load NV GPRS */ |
1516 | ld r14, VCPU_GPR(r14)(r4) | 1516 | ld r14, VCPU_GPR(R14)(r4) |
1517 | ld r15, VCPU_GPR(r15)(r4) | 1517 | ld r15, VCPU_GPR(R15)(r4) |
1518 | ld r16, VCPU_GPR(r16)(r4) | 1518 | ld r16, VCPU_GPR(R16)(r4) |
1519 | ld r17, VCPU_GPR(r17)(r4) | 1519 | ld r17, VCPU_GPR(R17)(r4) |
1520 | ld r18, VCPU_GPR(r18)(r4) | 1520 | ld r18, VCPU_GPR(R18)(r4) |
1521 | ld r19, VCPU_GPR(r19)(r4) | 1521 | ld r19, VCPU_GPR(R19)(r4) |
1522 | ld r20, VCPU_GPR(r20)(r4) | 1522 | ld r20, VCPU_GPR(R20)(r4) |
1523 | ld r21, VCPU_GPR(r21)(r4) | 1523 | ld r21, VCPU_GPR(R21)(r4) |
1524 | ld r22, VCPU_GPR(r22)(r4) | 1524 | ld r22, VCPU_GPR(R22)(r4) |
1525 | ld r23, VCPU_GPR(r23)(r4) | 1525 | ld r23, VCPU_GPR(R23)(r4) |
1526 | ld r24, VCPU_GPR(r24)(r4) | 1526 | ld r24, VCPU_GPR(R24)(r4) |
1527 | ld r25, VCPU_GPR(r25)(r4) | 1527 | ld r25, VCPU_GPR(R25)(r4) |
1528 | ld r26, VCPU_GPR(r26)(r4) | 1528 | ld r26, VCPU_GPR(R26)(r4) |
1529 | ld r27, VCPU_GPR(r27)(r4) | 1529 | ld r27, VCPU_GPR(R27)(r4) |
1530 | ld r28, VCPU_GPR(r28)(r4) | 1530 | ld r28, VCPU_GPR(R28)(r4) |
1531 | ld r29, VCPU_GPR(r29)(r4) | 1531 | ld r29, VCPU_GPR(R29)(r4) |
1532 | ld r30, VCPU_GPR(r30)(r4) | 1532 | ld r30, VCPU_GPR(R30)(r4) |
1533 | ld r31, VCPU_GPR(r31)(r4) | 1533 | ld r31, VCPU_GPR(R31)(r4) |
1534 | 1534 | ||
1535 | /* clear our bit in vcore->napping_threads */ | 1535 | /* clear our bit in vcore->napping_threads */ |
1536 | 33: ld r5,HSTATE_KVM_VCORE(r13) | 1536 | 33: ld r5,HSTATE_KVM_VCORE(r13) |
@@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION | |||
1649 | reg = 0 | 1649 | reg = 0 |
1650 | .rept 32 | 1650 | .rept 32 |
1651 | li r6,reg*16+VCPU_VSRS | 1651 | li r6,reg*16+VCPU_VSRS |
1652 | STXVD2X(reg,r6,r3) | 1652 | STXVD2X(reg,R6,R3) |
1653 | reg = reg + 1 | 1653 | reg = reg + 1 |
1654 | .endr | 1654 | .endr |
1655 | FTR_SECTION_ELSE | 1655 | FTR_SECTION_ELSE |
@@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION | |||
1711 | reg = 0 | 1711 | reg = 0 |
1712 | .rept 32 | 1712 | .rept 32 |
1713 | li r7,reg*16+VCPU_VSRS | 1713 | li r7,reg*16+VCPU_VSRS |
1714 | LXVD2X(reg,r7,r4) | 1714 | LXVD2X(reg,R7,R4) |
1715 | reg = reg + 1 | 1715 | reg = reg + 1 |
1716 | .endr | 1716 | .endr |
1717 | FTR_SECTION_ELSE | 1717 | FTR_SECTION_ELSE |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 3e35383bdb21..2ddab0f90a81 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -39,24 +39,24 @@ | |||
39 | 39 | ||
40 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | 40 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) |
41 | #define VCPU_LOAD_NVGPRS(vcpu) \ | 41 | #define VCPU_LOAD_NVGPRS(vcpu) \ |
42 | PPC_LL r14, VCPU_GPR(r14)(vcpu); \ | 42 | PPC_LL r14, VCPU_GPR(R14)(vcpu); \ |
43 | PPC_LL r15, VCPU_GPR(r15)(vcpu); \ | 43 | PPC_LL r15, VCPU_GPR(R15)(vcpu); \ |
44 | PPC_LL r16, VCPU_GPR(r16)(vcpu); \ | 44 | PPC_LL r16, VCPU_GPR(R16)(vcpu); \ |
45 | PPC_LL r17, VCPU_GPR(r17)(vcpu); \ | 45 | PPC_LL r17, VCPU_GPR(R17)(vcpu); \ |
46 | PPC_LL r18, VCPU_GPR(r18)(vcpu); \ | 46 | PPC_LL r18, VCPU_GPR(R18)(vcpu); \ |
47 | PPC_LL r19, VCPU_GPR(r19)(vcpu); \ | 47 | PPC_LL r19, VCPU_GPR(R19)(vcpu); \ |
48 | PPC_LL r20, VCPU_GPR(r20)(vcpu); \ | 48 | PPC_LL r20, VCPU_GPR(R20)(vcpu); \ |
49 | PPC_LL r21, VCPU_GPR(r21)(vcpu); \ | 49 | PPC_LL r21, VCPU_GPR(R21)(vcpu); \ |
50 | PPC_LL r22, VCPU_GPR(r22)(vcpu); \ | 50 | PPC_LL r22, VCPU_GPR(R22)(vcpu); \ |
51 | PPC_LL r23, VCPU_GPR(r23)(vcpu); \ | 51 | PPC_LL r23, VCPU_GPR(R23)(vcpu); \ |
52 | PPC_LL r24, VCPU_GPR(r24)(vcpu); \ | 52 | PPC_LL r24, VCPU_GPR(R24)(vcpu); \ |
53 | PPC_LL r25, VCPU_GPR(r25)(vcpu); \ | 53 | PPC_LL r25, VCPU_GPR(R25)(vcpu); \ |
54 | PPC_LL r26, VCPU_GPR(r26)(vcpu); \ | 54 | PPC_LL r26, VCPU_GPR(R26)(vcpu); \ |
55 | PPC_LL r27, VCPU_GPR(r27)(vcpu); \ | 55 | PPC_LL r27, VCPU_GPR(R27)(vcpu); \ |
56 | PPC_LL r28, VCPU_GPR(r28)(vcpu); \ | 56 | PPC_LL r28, VCPU_GPR(R28)(vcpu); \ |
57 | PPC_LL r29, VCPU_GPR(r29)(vcpu); \ | 57 | PPC_LL r29, VCPU_GPR(R29)(vcpu); \ |
58 | PPC_LL r30, VCPU_GPR(r30)(vcpu); \ | 58 | PPC_LL r30, VCPU_GPR(R30)(vcpu); \ |
59 | PPC_LL r31, VCPU_GPR(r31)(vcpu); \ | 59 | PPC_LL r31, VCPU_GPR(R31)(vcpu); \ |
60 | 60 | ||
61 | /***************************************************************************** | 61 | /***************************************************************************** |
62 | * * | 62 | * * |
@@ -131,24 +131,24 @@ kvmppc_handler_highmem: | |||
131 | /* R7 = vcpu */ | 131 | /* R7 = vcpu */ |
132 | PPC_LL r7, GPR4(r1) | 132 | PPC_LL r7, GPR4(r1) |
133 | 133 | ||
134 | PPC_STL r14, VCPU_GPR(r14)(r7) | 134 | PPC_STL r14, VCPU_GPR(R14)(r7) |
135 | PPC_STL r15, VCPU_GPR(r15)(r7) | 135 | PPC_STL r15, VCPU_GPR(R15)(r7) |
136 | PPC_STL r16, VCPU_GPR(r16)(r7) | 136 | PPC_STL r16, VCPU_GPR(R16)(r7) |
137 | PPC_STL r17, VCPU_GPR(r17)(r7) | 137 | PPC_STL r17, VCPU_GPR(R17)(r7) |
138 | PPC_STL r18, VCPU_GPR(r18)(r7) | 138 | PPC_STL r18, VCPU_GPR(R18)(r7) |
139 | PPC_STL r19, VCPU_GPR(r19)(r7) | 139 | PPC_STL r19, VCPU_GPR(R19)(r7) |
140 | PPC_STL r20, VCPU_GPR(r20)(r7) | 140 | PPC_STL r20, VCPU_GPR(R20)(r7) |
141 | PPC_STL r21, VCPU_GPR(r21)(r7) | 141 | PPC_STL r21, VCPU_GPR(R21)(r7) |
142 | PPC_STL r22, VCPU_GPR(r22)(r7) | 142 | PPC_STL r22, VCPU_GPR(R22)(r7) |
143 | PPC_STL r23, VCPU_GPR(r23)(r7) | 143 | PPC_STL r23, VCPU_GPR(R23)(r7) |
144 | PPC_STL r24, VCPU_GPR(r24)(r7) | 144 | PPC_STL r24, VCPU_GPR(R24)(r7) |
145 | PPC_STL r25, VCPU_GPR(r25)(r7) | 145 | PPC_STL r25, VCPU_GPR(R25)(r7) |
146 | PPC_STL r26, VCPU_GPR(r26)(r7) | 146 | PPC_STL r26, VCPU_GPR(R26)(r7) |
147 | PPC_STL r27, VCPU_GPR(r27)(r7) | 147 | PPC_STL r27, VCPU_GPR(R27)(r7) |
148 | PPC_STL r28, VCPU_GPR(r28)(r7) | 148 | PPC_STL r28, VCPU_GPR(R28)(r7) |
149 | PPC_STL r29, VCPU_GPR(r29)(r7) | 149 | PPC_STL r29, VCPU_GPR(R29)(r7) |
150 | PPC_STL r30, VCPU_GPR(r30)(r7) | 150 | PPC_STL r30, VCPU_GPR(R30)(r7) |
151 | PPC_STL r31, VCPU_GPR(r31)(r7) | 151 | PPC_STL r31, VCPU_GPR(R31)(r7) |
152 | 152 | ||
153 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 153 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
154 | mr r5, r12 | 154 | mr r5, r12 |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 8feec2ff3928..e598a5a0d5e4 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -37,7 +37,7 @@ | |||
37 | #define HOST_CR 16 | 37 | #define HOST_CR 16 |
38 | #define HOST_NV_GPRS 20 | 38 | #define HOST_NV_GPRS 20 |
39 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) | 39 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) |
40 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) | 40 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) |
41 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ | 41 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ |
42 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ | 42 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ |
43 | 43 | ||
@@ -58,8 +58,8 @@ _GLOBAL(kvmppc_handler_\ivor_nr) | |||
58 | /* Get pointer to vcpu and record exit number. */ | 58 | /* Get pointer to vcpu and record exit number. */ |
59 | mtspr SPRN_SPRG_WSCRATCH0, r4 | 59 | mtspr SPRN_SPRG_WSCRATCH0, r4 |
60 | mfspr r4, SPRN_SPRG_RVCPU | 60 | mfspr r4, SPRN_SPRG_RVCPU |
61 | stw r5, VCPU_GPR(r5)(r4) | 61 | stw r5, VCPU_GPR(R5)(r4) |
62 | stw r6, VCPU_GPR(r6)(r4) | 62 | stw r6, VCPU_GPR(R6)(r4) |
63 | mfctr r5 | 63 | mfctr r5 |
64 | lis r6, kvmppc_resume_host@h | 64 | lis r6, kvmppc_resume_host@h |
65 | stw r5, VCPU_CTR(r4) | 65 | stw r5, VCPU_CTR(r4) |
@@ -100,12 +100,12 @@ _GLOBAL(kvmppc_handler_len) | |||
100 | * r5: KVM exit number | 100 | * r5: KVM exit number |
101 | */ | 101 | */ |
102 | _GLOBAL(kvmppc_resume_host) | 102 | _GLOBAL(kvmppc_resume_host) |
103 | stw r3, VCPU_GPR(r3)(r4) | 103 | stw r3, VCPU_GPR(R3)(r4) |
104 | mfcr r3 | 104 | mfcr r3 |
105 | stw r3, VCPU_CR(r4) | 105 | stw r3, VCPU_CR(r4) |
106 | stw r7, VCPU_GPR(r7)(r4) | 106 | stw r7, VCPU_GPR(R7)(r4) |
107 | stw r8, VCPU_GPR(r8)(r4) | 107 | stw r8, VCPU_GPR(R8)(r4) |
108 | stw r9, VCPU_GPR(r9)(r4) | 108 | stw r9, VCPU_GPR(R9)(r4) |
109 | 109 | ||
110 | li r6, 1 | 110 | li r6, 1 |
111 | slw r6, r6, r5 | 111 | slw r6, r6, r5 |
@@ -135,23 +135,23 @@ _GLOBAL(kvmppc_resume_host) | |||
135 | isync | 135 | isync |
136 | stw r9, VCPU_LAST_INST(r4) | 136 | stw r9, VCPU_LAST_INST(r4) |
137 | 137 | ||
138 | stw r15, VCPU_GPR(r15)(r4) | 138 | stw r15, VCPU_GPR(R15)(r4) |
139 | stw r16, VCPU_GPR(r16)(r4) | 139 | stw r16, VCPU_GPR(R16)(r4) |
140 | stw r17, VCPU_GPR(r17)(r4) | 140 | stw r17, VCPU_GPR(R17)(r4) |
141 | stw r18, VCPU_GPR(r18)(r4) | 141 | stw r18, VCPU_GPR(R18)(r4) |
142 | stw r19, VCPU_GPR(r19)(r4) | 142 | stw r19, VCPU_GPR(R19)(r4) |
143 | stw r20, VCPU_GPR(r20)(r4) | 143 | stw r20, VCPU_GPR(R20)(r4) |
144 | stw r21, VCPU_GPR(r21)(r4) | 144 | stw r21, VCPU_GPR(R21)(r4) |
145 | stw r22, VCPU_GPR(r22)(r4) | 145 | stw r22, VCPU_GPR(R22)(r4) |
146 | stw r23, VCPU_GPR(r23)(r4) | 146 | stw r23, VCPU_GPR(R23)(r4) |
147 | stw r24, VCPU_GPR(r24)(r4) | 147 | stw r24, VCPU_GPR(R24)(r4) |
148 | stw r25, VCPU_GPR(r25)(r4) | 148 | stw r25, VCPU_GPR(R25)(r4) |
149 | stw r26, VCPU_GPR(r26)(r4) | 149 | stw r26, VCPU_GPR(R26)(r4) |
150 | stw r27, VCPU_GPR(r27)(r4) | 150 | stw r27, VCPU_GPR(R27)(r4) |
151 | stw r28, VCPU_GPR(r28)(r4) | 151 | stw r28, VCPU_GPR(R28)(r4) |
152 | stw r29, VCPU_GPR(r29)(r4) | 152 | stw r29, VCPU_GPR(R29)(r4) |
153 | stw r30, VCPU_GPR(r30)(r4) | 153 | stw r30, VCPU_GPR(R30)(r4) |
154 | stw r31, VCPU_GPR(r31)(r4) | 154 | stw r31, VCPU_GPR(R31)(r4) |
155 | ..skip_inst_copy: | 155 | ..skip_inst_copy: |
156 | 156 | ||
157 | /* Also grab DEAR and ESR before the host can clobber them. */ | 157 | /* Also grab DEAR and ESR before the host can clobber them. */ |
@@ -169,20 +169,20 @@ _GLOBAL(kvmppc_resume_host) | |||
169 | ..skip_esr: | 169 | ..skip_esr: |
170 | 170 | ||
171 | /* Save remaining volatile guest register state to vcpu. */ | 171 | /* Save remaining volatile guest register state to vcpu. */ |
172 | stw r0, VCPU_GPR(r0)(r4) | 172 | stw r0, VCPU_GPR(R0)(r4) |
173 | stw r1, VCPU_GPR(r1)(r4) | 173 | stw r1, VCPU_GPR(R1)(r4) |
174 | stw r2, VCPU_GPR(r2)(r4) | 174 | stw r2, VCPU_GPR(R2)(r4) |
175 | stw r10, VCPU_GPR(r10)(r4) | 175 | stw r10, VCPU_GPR(R10)(r4) |
176 | stw r11, VCPU_GPR(r11)(r4) | 176 | stw r11, VCPU_GPR(R11)(r4) |
177 | stw r12, VCPU_GPR(r12)(r4) | 177 | stw r12, VCPU_GPR(R12)(r4) |
178 | stw r13, VCPU_GPR(r13)(r4) | 178 | stw r13, VCPU_GPR(R13)(r4) |
179 | stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ | 179 | stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ |
180 | mflr r3 | 180 | mflr r3 |
181 | stw r3, VCPU_LR(r4) | 181 | stw r3, VCPU_LR(r4) |
182 | mfxer r3 | 182 | mfxer r3 |
183 | stw r3, VCPU_XER(r4) | 183 | stw r3, VCPU_XER(r4) |
184 | mfspr r3, SPRN_SPRG_RSCRATCH0 | 184 | mfspr r3, SPRN_SPRG_RSCRATCH0 |
185 | stw r3, VCPU_GPR(r4)(r4) | 185 | stw r3, VCPU_GPR(R4)(r4) |
186 | mfspr r3, SPRN_SRR0 | 186 | mfspr r3, SPRN_SRR0 |
187 | stw r3, VCPU_PC(r4) | 187 | stw r3, VCPU_PC(r4) |
188 | 188 | ||
@@ -214,28 +214,28 @@ _GLOBAL(kvmppc_resume_host) | |||
214 | 214 | ||
215 | /* Restore vcpu pointer and the nonvolatiles we used. */ | 215 | /* Restore vcpu pointer and the nonvolatiles we used. */ |
216 | mr r4, r14 | 216 | mr r4, r14 |
217 | lwz r14, VCPU_GPR(r14)(r4) | 217 | lwz r14, VCPU_GPR(R14)(r4) |
218 | 218 | ||
219 | /* Sometimes instruction emulation must restore complete GPR state. */ | 219 | /* Sometimes instruction emulation must restore complete GPR state. */ |
220 | andi. r5, r3, RESUME_FLAG_NV | 220 | andi. r5, r3, RESUME_FLAG_NV |
221 | beq ..skip_nv_load | 221 | beq ..skip_nv_load |
222 | lwz r15, VCPU_GPR(r15)(r4) | 222 | lwz r15, VCPU_GPR(R15)(r4) |
223 | lwz r16, VCPU_GPR(r16)(r4) | 223 | lwz r16, VCPU_GPR(R16)(r4) |
224 | lwz r17, VCPU_GPR(r17)(r4) | 224 | lwz r17, VCPU_GPR(R17)(r4) |
225 | lwz r18, VCPU_GPR(r18)(r4) | 225 | lwz r18, VCPU_GPR(R18)(r4) |
226 | lwz r19, VCPU_GPR(r19)(r4) | 226 | lwz r19, VCPU_GPR(R19)(r4) |
227 | lwz r20, VCPU_GPR(r20)(r4) | 227 | lwz r20, VCPU_GPR(R20)(r4) |
228 | lwz r21, VCPU_GPR(r21)(r4) | 228 | lwz r21, VCPU_GPR(R21)(r4) |
229 | lwz r22, VCPU_GPR(r22)(r4) | 229 | lwz r22, VCPU_GPR(R22)(r4) |
230 | lwz r23, VCPU_GPR(r23)(r4) | 230 | lwz r23, VCPU_GPR(R23)(r4) |
231 | lwz r24, VCPU_GPR(r24)(r4) | 231 | lwz r24, VCPU_GPR(R24)(r4) |
232 | lwz r25, VCPU_GPR(r25)(r4) | 232 | lwz r25, VCPU_GPR(R25)(r4) |
233 | lwz r26, VCPU_GPR(r26)(r4) | 233 | lwz r26, VCPU_GPR(R26)(r4) |
234 | lwz r27, VCPU_GPR(r27)(r4) | 234 | lwz r27, VCPU_GPR(R27)(r4) |
235 | lwz r28, VCPU_GPR(r28)(r4) | 235 | lwz r28, VCPU_GPR(R28)(r4) |
236 | lwz r29, VCPU_GPR(r29)(r4) | 236 | lwz r29, VCPU_GPR(R29)(r4) |
237 | lwz r30, VCPU_GPR(r30)(r4) | 237 | lwz r30, VCPU_GPR(R30)(r4) |
238 | lwz r31, VCPU_GPR(r31)(r4) | 238 | lwz r31, VCPU_GPR(R31)(r4) |
239 | ..skip_nv_load: | 239 | ..skip_nv_load: |
240 | 240 | ||
241 | /* Should we return to the guest? */ | 241 | /* Should we return to the guest? */ |
@@ -257,43 +257,43 @@ heavyweight_exit: | |||
257 | 257 | ||
258 | /* We already saved guest volatile register state; now save the | 258 | /* We already saved guest volatile register state; now save the |
259 | * non-volatiles. */ | 259 | * non-volatiles. */ |
260 | stw r15, VCPU_GPR(r15)(r4) | 260 | stw r15, VCPU_GPR(R15)(r4) |
261 | stw r16, VCPU_GPR(r16)(r4) | 261 | stw r16, VCPU_GPR(R16)(r4) |
262 | stw r17, VCPU_GPR(r17)(r4) | 262 | stw r17, VCPU_GPR(R17)(r4) |
263 | stw r18, VCPU_GPR(r18)(r4) | 263 | stw r18, VCPU_GPR(R18)(r4) |
264 | stw r19, VCPU_GPR(r19)(r4) | 264 | stw r19, VCPU_GPR(R19)(r4) |
265 | stw r20, VCPU_GPR(r20)(r4) | 265 | stw r20, VCPU_GPR(R20)(r4) |
266 | stw r21, VCPU_GPR(r21)(r4) | 266 | stw r21, VCPU_GPR(R21)(r4) |
267 | stw r22, VCPU_GPR(r22)(r4) | 267 | stw r22, VCPU_GPR(R22)(r4) |
268 | stw r23, VCPU_GPR(r23)(r4) | 268 | stw r23, VCPU_GPR(R23)(r4) |
269 | stw r24, VCPU_GPR(r24)(r4) | 269 | stw r24, VCPU_GPR(R24)(r4) |
270 | stw r25, VCPU_GPR(r25)(r4) | 270 | stw r25, VCPU_GPR(R25)(r4) |
271 | stw r26, VCPU_GPR(r26)(r4) | 271 | stw r26, VCPU_GPR(R26)(r4) |
272 | stw r27, VCPU_GPR(r27)(r4) | 272 | stw r27, VCPU_GPR(R27)(r4) |
273 | stw r28, VCPU_GPR(r28)(r4) | 273 | stw r28, VCPU_GPR(R28)(r4) |
274 | stw r29, VCPU_GPR(r29)(r4) | 274 | stw r29, VCPU_GPR(R29)(r4) |
275 | stw r30, VCPU_GPR(r30)(r4) | 275 | stw r30, VCPU_GPR(R30)(r4) |
276 | stw r31, VCPU_GPR(r31)(r4) | 276 | stw r31, VCPU_GPR(R31)(r4) |
277 | 277 | ||
278 | /* Load host non-volatile register state from host stack. */ | 278 | /* Load host non-volatile register state from host stack. */ |
279 | lwz r14, HOST_NV_GPR(r14)(r1) | 279 | lwz r14, HOST_NV_GPR(R14)(r1) |
280 | lwz r15, HOST_NV_GPR(r15)(r1) | 280 | lwz r15, HOST_NV_GPR(R15)(r1) |
281 | lwz r16, HOST_NV_GPR(r16)(r1) | 281 | lwz r16, HOST_NV_GPR(R16)(r1) |
282 | lwz r17, HOST_NV_GPR(r17)(r1) | 282 | lwz r17, HOST_NV_GPR(R17)(r1) |
283 | lwz r18, HOST_NV_GPR(r18)(r1) | 283 | lwz r18, HOST_NV_GPR(R18)(r1) |
284 | lwz r19, HOST_NV_GPR(r19)(r1) | 284 | lwz r19, HOST_NV_GPR(R19)(r1) |
285 | lwz r20, HOST_NV_GPR(r20)(r1) | 285 | lwz r20, HOST_NV_GPR(R20)(r1) |
286 | lwz r21, HOST_NV_GPR(r21)(r1) | 286 | lwz r21, HOST_NV_GPR(R21)(r1) |
287 | lwz r22, HOST_NV_GPR(r22)(r1) | 287 | lwz r22, HOST_NV_GPR(R22)(r1) |
288 | lwz r23, HOST_NV_GPR(r23)(r1) | 288 | lwz r23, HOST_NV_GPR(R23)(r1) |
289 | lwz r24, HOST_NV_GPR(r24)(r1) | 289 | lwz r24, HOST_NV_GPR(R24)(r1) |
290 | lwz r25, HOST_NV_GPR(r25)(r1) | 290 | lwz r25, HOST_NV_GPR(R25)(r1) |
291 | lwz r26, HOST_NV_GPR(r26)(r1) | 291 | lwz r26, HOST_NV_GPR(R26)(r1) |
292 | lwz r27, HOST_NV_GPR(r27)(r1) | 292 | lwz r27, HOST_NV_GPR(R27)(r1) |
293 | lwz r28, HOST_NV_GPR(r28)(r1) | 293 | lwz r28, HOST_NV_GPR(R28)(r1) |
294 | lwz r29, HOST_NV_GPR(r29)(r1) | 294 | lwz r29, HOST_NV_GPR(R29)(r1) |
295 | lwz r30, HOST_NV_GPR(r30)(r1) | 295 | lwz r30, HOST_NV_GPR(R30)(r1) |
296 | lwz r31, HOST_NV_GPR(r31)(r1) | 296 | lwz r31, HOST_NV_GPR(R31)(r1) |
297 | 297 | ||
298 | /* Return to kvm_vcpu_run(). */ | 298 | /* Return to kvm_vcpu_run(). */ |
299 | lwz r4, HOST_STACK_LR(r1) | 299 | lwz r4, HOST_STACK_LR(r1) |
@@ -321,44 +321,44 @@ _GLOBAL(__kvmppc_vcpu_run) | |||
321 | stw r5, HOST_CR(r1) | 321 | stw r5, HOST_CR(r1) |
322 | 322 | ||
323 | /* Save host non-volatile register state to stack. */ | 323 | /* Save host non-volatile register state to stack. */ |
324 | stw r14, HOST_NV_GPR(r14)(r1) | 324 | stw r14, HOST_NV_GPR(R14)(r1) |
325 | stw r15, HOST_NV_GPR(r15)(r1) | 325 | stw r15, HOST_NV_GPR(R15)(r1) |
326 | stw r16, HOST_NV_GPR(r16)(r1) | 326 | stw r16, HOST_NV_GPR(R16)(r1) |
327 | stw r17, HOST_NV_GPR(r17)(r1) | 327 | stw r17, HOST_NV_GPR(R17)(r1) |
328 | stw r18, HOST_NV_GPR(r18)(r1) | 328 | stw r18, HOST_NV_GPR(R18)(r1) |
329 | stw r19, HOST_NV_GPR(r19)(r1) | 329 | stw r19, HOST_NV_GPR(R19)(r1) |
330 | stw r20, HOST_NV_GPR(r20)(r1) | 330 | stw r20, HOST_NV_GPR(R20)(r1) |
331 | stw r21, HOST_NV_GPR(r21)(r1) | 331 | stw r21, HOST_NV_GPR(R21)(r1) |
332 | stw r22, HOST_NV_GPR(r22)(r1) | 332 | stw r22, HOST_NV_GPR(R22)(r1) |
333 | stw r23, HOST_NV_GPR(r23)(r1) | 333 | stw r23, HOST_NV_GPR(R23)(r1) |
334 | stw r24, HOST_NV_GPR(r24)(r1) | 334 | stw r24, HOST_NV_GPR(R24)(r1) |
335 | stw r25, HOST_NV_GPR(r25)(r1) | 335 | stw r25, HOST_NV_GPR(R25)(r1) |
336 | stw r26, HOST_NV_GPR(r26)(r1) | 336 | stw r26, HOST_NV_GPR(R26)(r1) |
337 | stw r27, HOST_NV_GPR(r27)(r1) | 337 | stw r27, HOST_NV_GPR(R27)(r1) |
338 | stw r28, HOST_NV_GPR(r28)(r1) | 338 | stw r28, HOST_NV_GPR(R28)(r1) |
339 | stw r29, HOST_NV_GPR(r29)(r1) | 339 | stw r29, HOST_NV_GPR(R29)(r1) |
340 | stw r30, HOST_NV_GPR(r30)(r1) | 340 | stw r30, HOST_NV_GPR(R30)(r1) |
341 | stw r31, HOST_NV_GPR(r31)(r1) | 341 | stw r31, HOST_NV_GPR(R31)(r1) |
342 | 342 | ||
343 | /* Load guest non-volatiles. */ | 343 | /* Load guest non-volatiles. */ |
344 | lwz r14, VCPU_GPR(r14)(r4) | 344 | lwz r14, VCPU_GPR(R14)(r4) |
345 | lwz r15, VCPU_GPR(r15)(r4) | 345 | lwz r15, VCPU_GPR(R15)(r4) |
346 | lwz r16, VCPU_GPR(r16)(r4) | 346 | lwz r16, VCPU_GPR(R16)(r4) |
347 | lwz r17, VCPU_GPR(r17)(r4) | 347 | lwz r17, VCPU_GPR(R17)(r4) |
348 | lwz r18, VCPU_GPR(r18)(r4) | 348 | lwz r18, VCPU_GPR(R18)(r4) |
349 | lwz r19, VCPU_GPR(r19)(r4) | 349 | lwz r19, VCPU_GPR(R19)(r4) |
350 | lwz r20, VCPU_GPR(r20)(r4) | 350 | lwz r20, VCPU_GPR(R20)(r4) |
351 | lwz r21, VCPU_GPR(r21)(r4) | 351 | lwz r21, VCPU_GPR(R21)(r4) |
352 | lwz r22, VCPU_GPR(r22)(r4) | 352 | lwz r22, VCPU_GPR(R22)(r4) |
353 | lwz r23, VCPU_GPR(r23)(r4) | 353 | lwz r23, VCPU_GPR(R23)(r4) |
354 | lwz r24, VCPU_GPR(r24)(r4) | 354 | lwz r24, VCPU_GPR(R24)(r4) |
355 | lwz r25, VCPU_GPR(r25)(r4) | 355 | lwz r25, VCPU_GPR(R25)(r4) |
356 | lwz r26, VCPU_GPR(r26)(r4) | 356 | lwz r26, VCPU_GPR(R26)(r4) |
357 | lwz r27, VCPU_GPR(r27)(r4) | 357 | lwz r27, VCPU_GPR(R27)(r4) |
358 | lwz r28, VCPU_GPR(r28)(r4) | 358 | lwz r28, VCPU_GPR(R28)(r4) |
359 | lwz r29, VCPU_GPR(r29)(r4) | 359 | lwz r29, VCPU_GPR(R29)(r4) |
360 | lwz r30, VCPU_GPR(r30)(r4) | 360 | lwz r30, VCPU_GPR(R30)(r4) |
361 | lwz r31, VCPU_GPR(r31)(r4) | 361 | lwz r31, VCPU_GPR(R31)(r4) |
362 | 362 | ||
363 | #ifdef CONFIG_SPE | 363 | #ifdef CONFIG_SPE |
364 | /* save host SPEFSCR and load guest SPEFSCR */ | 364 | /* save host SPEFSCR and load guest SPEFSCR */ |
@@ -386,13 +386,13 @@ lightweight_exit: | |||
386 | #endif | 386 | #endif |
387 | 387 | ||
388 | /* Load some guest volatiles. */ | 388 | /* Load some guest volatiles. */ |
389 | lwz r0, VCPU_GPR(r0)(r4) | 389 | lwz r0, VCPU_GPR(R0)(r4) |
390 | lwz r2, VCPU_GPR(r2)(r4) | 390 | lwz r2, VCPU_GPR(R2)(r4) |
391 | lwz r9, VCPU_GPR(r9)(r4) | 391 | lwz r9, VCPU_GPR(R9)(r4) |
392 | lwz r10, VCPU_GPR(r10)(r4) | 392 | lwz r10, VCPU_GPR(R10)(r4) |
393 | lwz r11, VCPU_GPR(r11)(r4) | 393 | lwz r11, VCPU_GPR(R11)(r4) |
394 | lwz r12, VCPU_GPR(r12)(r4) | 394 | lwz r12, VCPU_GPR(R12)(r4) |
395 | lwz r13, VCPU_GPR(r13)(r4) | 395 | lwz r13, VCPU_GPR(R13)(r4) |
396 | lwz r3, VCPU_LR(r4) | 396 | lwz r3, VCPU_LR(r4) |
397 | mtlr r3 | 397 | mtlr r3 |
398 | lwz r3, VCPU_XER(r4) | 398 | lwz r3, VCPU_XER(r4) |
@@ -411,7 +411,7 @@ lightweight_exit: | |||
411 | 411 | ||
412 | /* Can't switch the stack pointer until after IVPR is switched, | 412 | /* Can't switch the stack pointer until after IVPR is switched, |
413 | * because host interrupt handlers would get confused. */ | 413 | * because host interrupt handlers would get confused. */ |
414 | lwz r1, VCPU_GPR(r1)(r4) | 414 | lwz r1, VCPU_GPR(R1)(r4) |
415 | 415 | ||
416 | /* | 416 | /* |
417 | * Host interrupt handlers may have clobbered these | 417 | * Host interrupt handlers may have clobbered these |
@@ -449,10 +449,10 @@ lightweight_exit: | |||
449 | mtcr r5 | 449 | mtcr r5 |
450 | mtsrr0 r6 | 450 | mtsrr0 r6 |
451 | mtsrr1 r7 | 451 | mtsrr1 r7 |
452 | lwz r5, VCPU_GPR(r5)(r4) | 452 | lwz r5, VCPU_GPR(R5)(r4) |
453 | lwz r6, VCPU_GPR(r6)(r4) | 453 | lwz r6, VCPU_GPR(R6)(r4) |
454 | lwz r7, VCPU_GPR(r7)(r4) | 454 | lwz r7, VCPU_GPR(R7)(r4) |
455 | lwz r8, VCPU_GPR(r8)(r4) | 455 | lwz r8, VCPU_GPR(R8)(r4) |
456 | 456 | ||
457 | /* Clear any debug events which occurred since we disabled MSR[DE]. | 457 | /* Clear any debug events which occurred since we disabled MSR[DE]. |
458 | * XXX This gives us a 3-instruction window in which a breakpoint | 458 | * XXX This gives us a 3-instruction window in which a breakpoint |
@@ -461,8 +461,8 @@ lightweight_exit: | |||
461 | ori r3, r3, 0xffff | 461 | ori r3, r3, 0xffff |
462 | mtspr SPRN_DBSR, r3 | 462 | mtspr SPRN_DBSR, r3 |
463 | 463 | ||
464 | lwz r3, VCPU_GPR(r3)(r4) | 464 | lwz r3, VCPU_GPR(R3)(r4) |
465 | lwz r4, VCPU_GPR(r4)(r4) | 465 | lwz r4, VCPU_GPR(R4)(r4) |
466 | rfi | 466 | rfi |
467 | 467 | ||
468 | #ifdef CONFIG_SPE | 468 | #ifdef CONFIG_SPE |
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 6048a00515d7..a623b1d32d3e 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -67,15 +67,15 @@ | |||
67 | */ | 67 | */ |
68 | .macro kvm_handler_common intno, srr0, flags | 68 | .macro kvm_handler_common intno, srr0, flags |
69 | /* Restore host stack pointer */ | 69 | /* Restore host stack pointer */ |
70 | PPC_STL r1, VCPU_GPR(r1)(r4) | 70 | PPC_STL r1, VCPU_GPR(R1)(r4) |
71 | PPC_STL r2, VCPU_GPR(r2)(r4) | 71 | PPC_STL r2, VCPU_GPR(R2)(r4) |
72 | PPC_LL r1, VCPU_HOST_STACK(r4) | 72 | PPC_LL r1, VCPU_HOST_STACK(r4) |
73 | PPC_LL r2, HOST_R2(r1) | 73 | PPC_LL r2, HOST_R2(r1) |
74 | 74 | ||
75 | mfspr r10, SPRN_PID | 75 | mfspr r10, SPRN_PID |
76 | lwz r8, VCPU_HOST_PID(r4) | 76 | lwz r8, VCPU_HOST_PID(r4) |
77 | PPC_LL r11, VCPU_SHARED(r4) | 77 | PPC_LL r11, VCPU_SHARED(r4) |
78 | PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ | 78 | PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */ |
79 | li r14, \intno | 79 | li r14, \intno |
80 | 80 | ||
81 | stw r10, VCPU_GUEST_PID(r4) | 81 | stw r10, VCPU_GUEST_PID(r4) |
@@ -137,27 +137,27 @@ | |||
137 | */ | 137 | */ |
138 | 138 | ||
139 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ | 139 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ |
140 | PPC_STL r15, VCPU_GPR(r15)(r4) | 140 | PPC_STL r15, VCPU_GPR(R15)(r4) |
141 | PPC_STL r16, VCPU_GPR(r16)(r4) | 141 | PPC_STL r16, VCPU_GPR(R16)(r4) |
142 | PPC_STL r17, VCPU_GPR(r17)(r4) | 142 | PPC_STL r17, VCPU_GPR(R17)(r4) |
143 | PPC_STL r18, VCPU_GPR(r18)(r4) | 143 | PPC_STL r18, VCPU_GPR(R18)(r4) |
144 | PPC_STL r19, VCPU_GPR(r19)(r4) | 144 | PPC_STL r19, VCPU_GPR(R19)(r4) |
145 | mr r8, r3 | 145 | mr r8, r3 |
146 | PPC_STL r20, VCPU_GPR(r20)(r4) | 146 | PPC_STL r20, VCPU_GPR(R20)(r4) |
147 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS | 147 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS |
148 | PPC_STL r21, VCPU_GPR(r21)(r4) | 148 | PPC_STL r21, VCPU_GPR(R21)(r4) |
149 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR | 149 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR |
150 | PPC_STL r22, VCPU_GPR(r22)(r4) | 150 | PPC_STL r22, VCPU_GPR(R22)(r4) |
151 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID | 151 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID |
152 | PPC_STL r23, VCPU_GPR(r23)(r4) | 152 | PPC_STL r23, VCPU_GPR(R23)(r4) |
153 | PPC_STL r24, VCPU_GPR(r24)(r4) | 153 | PPC_STL r24, VCPU_GPR(R24)(r4) |
154 | PPC_STL r25, VCPU_GPR(r25)(r4) | 154 | PPC_STL r25, VCPU_GPR(R25)(r4) |
155 | PPC_STL r26, VCPU_GPR(r26)(r4) | 155 | PPC_STL r26, VCPU_GPR(R26)(r4) |
156 | PPC_STL r27, VCPU_GPR(r27)(r4) | 156 | PPC_STL r27, VCPU_GPR(R27)(r4) |
157 | PPC_STL r28, VCPU_GPR(r28)(r4) | 157 | PPC_STL r28, VCPU_GPR(R28)(r4) |
158 | PPC_STL r29, VCPU_GPR(r29)(r4) | 158 | PPC_STL r29, VCPU_GPR(R29)(r4) |
159 | PPC_STL r30, VCPU_GPR(r30)(r4) | 159 | PPC_STL r30, VCPU_GPR(R30)(r4) |
160 | PPC_STL r31, VCPU_GPR(r31)(r4) | 160 | PPC_STL r31, VCPU_GPR(R31)(r4) |
161 | mtspr SPRN_EPLC, r8 | 161 | mtspr SPRN_EPLC, r8 |
162 | 162 | ||
163 | /* disable preemption, so we are sure we hit the fixup handler */ | 163 | /* disable preemption, so we are sure we hit the fixup handler */ |
@@ -211,24 +211,24 @@ | |||
211 | .macro kvm_handler intno srr0, srr1, flags | 211 | .macro kvm_handler intno srr0, srr1, flags |
212 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | 212 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) |
213 | GET_VCPU(r11, r10) | 213 | GET_VCPU(r11, r10) |
214 | PPC_STL r3, VCPU_GPR(r3)(r11) | 214 | PPC_STL r3, VCPU_GPR(R3)(r11) |
215 | mfspr r3, SPRN_SPRG_RSCRATCH0 | 215 | mfspr r3, SPRN_SPRG_RSCRATCH0 |
216 | PPC_STL r4, VCPU_GPR(r4)(r11) | 216 | PPC_STL r4, VCPU_GPR(R4)(r11) |
217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) | 217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) |
218 | PPC_STL r5, VCPU_GPR(r5)(r11) | 218 | PPC_STL r5, VCPU_GPR(R5)(r11) |
219 | stw r13, VCPU_CR(r11) | 219 | stw r13, VCPU_CR(r11) |
220 | mfspr r5, \srr0 | 220 | mfspr r5, \srr0 |
221 | PPC_STL r3, VCPU_GPR(r10)(r11) | 221 | PPC_STL r3, VCPU_GPR(R10)(r11) |
222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) | 222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) |
223 | PPC_STL r6, VCPU_GPR(r6)(r11) | 223 | PPC_STL r6, VCPU_GPR(R6)(r11) |
224 | PPC_STL r4, VCPU_GPR(r11)(r11) | 224 | PPC_STL r4, VCPU_GPR(R11)(r11) |
225 | mfspr r6, \srr1 | 225 | mfspr r6, \srr1 |
226 | PPC_STL r7, VCPU_GPR(r7)(r11) | 226 | PPC_STL r7, VCPU_GPR(R7)(r11) |
227 | PPC_STL r8, VCPU_GPR(r8)(r11) | 227 | PPC_STL r8, VCPU_GPR(R8)(r11) |
228 | PPC_STL r9, VCPU_GPR(r9)(r11) | 228 | PPC_STL r9, VCPU_GPR(R9)(r11) |
229 | PPC_STL r3, VCPU_GPR(r13)(r11) | 229 | PPC_STL r3, VCPU_GPR(R13)(r11) |
230 | mfctr r7 | 230 | mfctr r7 |
231 | PPC_STL r12, VCPU_GPR(r12)(r11) | 231 | PPC_STL r12, VCPU_GPR(R12)(r11) |
232 | PPC_STL r7, VCPU_CTR(r11) | 232 | PPC_STL r7, VCPU_CTR(r11) |
233 | mr r4, r11 | 233 | mr r4, r11 |
234 | kvm_handler_common \intno, \srr0, \flags | 234 | kvm_handler_common \intno, \srr0, \flags |
@@ -238,25 +238,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |||
238 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | 238 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) |
239 | mfspr r10, SPRN_SPRG_THREAD | 239 | mfspr r10, SPRN_SPRG_THREAD |
240 | GET_VCPU(r11, r10) | 240 | GET_VCPU(r11, r10) |
241 | PPC_STL r3, VCPU_GPR(r3)(r11) | 241 | PPC_STL r3, VCPU_GPR(R3)(r11) |
242 | mfspr r3, \scratch | 242 | mfspr r3, \scratch |
243 | PPC_STL r4, VCPU_GPR(r4)(r11) | 243 | PPC_STL r4, VCPU_GPR(R4)(r11) |
244 | PPC_LL r4, GPR9(r8) | 244 | PPC_LL r4, GPR9(r8) |
245 | PPC_STL r5, VCPU_GPR(r5)(r11) | 245 | PPC_STL r5, VCPU_GPR(R5)(r11) |
246 | stw r9, VCPU_CR(r11) | 246 | stw r9, VCPU_CR(r11) |
247 | mfspr r5, \srr0 | 247 | mfspr r5, \srr0 |
248 | PPC_STL r3, VCPU_GPR(r8)(r11) | 248 | PPC_STL r3, VCPU_GPR(R8)(r11) |
249 | PPC_LL r3, GPR10(r8) | 249 | PPC_LL r3, GPR10(r8) |
250 | PPC_STL r6, VCPU_GPR(r6)(r11) | 250 | PPC_STL r6, VCPU_GPR(R6)(r11) |
251 | PPC_STL r4, VCPU_GPR(r9)(r11) | 251 | PPC_STL r4, VCPU_GPR(R9)(r11) |
252 | mfspr r6, \srr1 | 252 | mfspr r6, \srr1 |
253 | PPC_LL r4, GPR11(r8) | 253 | PPC_LL r4, GPR11(r8) |
254 | PPC_STL r7, VCPU_GPR(r7)(r11) | 254 | PPC_STL r7, VCPU_GPR(R7)(r11) |
255 | PPC_STL r3, VCPU_GPR(r10)(r11) | 255 | PPC_STL r3, VCPU_GPR(R10)(r11) |
256 | mfctr r7 | 256 | mfctr r7 |
257 | PPC_STL r12, VCPU_GPR(r12)(r11) | 257 | PPC_STL r12, VCPU_GPR(R12)(r11) |
258 | PPC_STL r13, VCPU_GPR(r13)(r11) | 258 | PPC_STL r13, VCPU_GPR(R13)(r11) |
259 | PPC_STL r4, VCPU_GPR(r11)(r11) | 259 | PPC_STL r4, VCPU_GPR(R11)(r11) |
260 | PPC_STL r7, VCPU_CTR(r11) | 260 | PPC_STL r7, VCPU_CTR(r11) |
261 | mr r4, r11 | 261 | mr r4, r11 |
262 | kvm_handler_common \intno, \srr0, \flags | 262 | kvm_handler_common \intno, \srr0, \flags |
@@ -310,7 +310,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | |||
310 | _GLOBAL(kvmppc_resume_host) | 310 | _GLOBAL(kvmppc_resume_host) |
311 | /* Save remaining volatile guest register state to vcpu. */ | 311 | /* Save remaining volatile guest register state to vcpu. */ |
312 | mfspr r3, SPRN_VRSAVE | 312 | mfspr r3, SPRN_VRSAVE |
313 | PPC_STL r0, VCPU_GPR(r0)(r4) | 313 | PPC_STL r0, VCPU_GPR(R0)(r4) |
314 | mflr r5 | 314 | mflr r5 |
315 | mfspr r6, SPRN_SPRG4 | 315 | mfspr r6, SPRN_SPRG4 |
316 | PPC_STL r5, VCPU_LR(r4) | 316 | PPC_STL r5, VCPU_LR(r4) |
@@ -358,27 +358,27 @@ _GLOBAL(kvmppc_resume_host) | |||
358 | 358 | ||
359 | /* Restore vcpu pointer and the nonvolatiles we used. */ | 359 | /* Restore vcpu pointer and the nonvolatiles we used. */ |
360 | mr r4, r14 | 360 | mr r4, r14 |
361 | PPC_LL r14, VCPU_GPR(r14)(r4) | 361 | PPC_LL r14, VCPU_GPR(R14)(r4) |
362 | 362 | ||
363 | andi. r5, r3, RESUME_FLAG_NV | 363 | andi. r5, r3, RESUME_FLAG_NV |
364 | beq skip_nv_load | 364 | beq skip_nv_load |
365 | PPC_LL r15, VCPU_GPR(r15)(r4) | 365 | PPC_LL r15, VCPU_GPR(R15)(r4) |
366 | PPC_LL r16, VCPU_GPR(r16)(r4) | 366 | PPC_LL r16, VCPU_GPR(R16)(r4) |
367 | PPC_LL r17, VCPU_GPR(r17)(r4) | 367 | PPC_LL r17, VCPU_GPR(R17)(r4) |
368 | PPC_LL r18, VCPU_GPR(r18)(r4) | 368 | PPC_LL r18, VCPU_GPR(R18)(r4) |
369 | PPC_LL r19, VCPU_GPR(r19)(r4) | 369 | PPC_LL r19, VCPU_GPR(R19)(r4) |
370 | PPC_LL r20, VCPU_GPR(r20)(r4) | 370 | PPC_LL r20, VCPU_GPR(R20)(r4) |
371 | PPC_LL r21, VCPU_GPR(r21)(r4) | 371 | PPC_LL r21, VCPU_GPR(R21)(r4) |
372 | PPC_LL r22, VCPU_GPR(r22)(r4) | 372 | PPC_LL r22, VCPU_GPR(R22)(r4) |
373 | PPC_LL r23, VCPU_GPR(r23)(r4) | 373 | PPC_LL r23, VCPU_GPR(R23)(r4) |
374 | PPC_LL r24, VCPU_GPR(r24)(r4) | 374 | PPC_LL r24, VCPU_GPR(R24)(r4) |
375 | PPC_LL r25, VCPU_GPR(r25)(r4) | 375 | PPC_LL r25, VCPU_GPR(R25)(r4) |
376 | PPC_LL r26, VCPU_GPR(r26)(r4) | 376 | PPC_LL r26, VCPU_GPR(R26)(r4) |
377 | PPC_LL r27, VCPU_GPR(r27)(r4) | 377 | PPC_LL r27, VCPU_GPR(R27)(r4) |
378 | PPC_LL r28, VCPU_GPR(r28)(r4) | 378 | PPC_LL r28, VCPU_GPR(R28)(r4) |
379 | PPC_LL r29, VCPU_GPR(r29)(r4) | 379 | PPC_LL r29, VCPU_GPR(R29)(r4) |
380 | PPC_LL r30, VCPU_GPR(r30)(r4) | 380 | PPC_LL r30, VCPU_GPR(R30)(r4) |
381 | PPC_LL r31, VCPU_GPR(r31)(r4) | 381 | PPC_LL r31, VCPU_GPR(R31)(r4) |
382 | skip_nv_load: | 382 | skip_nv_load: |
383 | /* Should we return to the guest? */ | 383 | /* Should we return to the guest? */ |
384 | andi. r5, r3, RESUME_FLAG_HOST | 384 | andi. r5, r3, RESUME_FLAG_HOST |
@@ -396,23 +396,23 @@ heavyweight_exit: | |||
396 | * non-volatiles. | 396 | * non-volatiles. |
397 | */ | 397 | */ |
398 | 398 | ||
399 | PPC_STL r15, VCPU_GPR(r15)(r4) | 399 | PPC_STL r15, VCPU_GPR(R15)(r4) |
400 | PPC_STL r16, VCPU_GPR(r16)(r4) | 400 | PPC_STL r16, VCPU_GPR(R16)(r4) |
401 | PPC_STL r17, VCPU_GPR(r17)(r4) | 401 | PPC_STL r17, VCPU_GPR(R17)(r4) |
402 | PPC_STL r18, VCPU_GPR(r18)(r4) | 402 | PPC_STL r18, VCPU_GPR(R18)(r4) |
403 | PPC_STL r19, VCPU_GPR(r19)(r4) | 403 | PPC_STL r19, VCPU_GPR(R19)(r4) |
404 | PPC_STL r20, VCPU_GPR(r20)(r4) | 404 | PPC_STL r20, VCPU_GPR(R20)(r4) |
405 | PPC_STL r21, VCPU_GPR(r21)(r4) | 405 | PPC_STL r21, VCPU_GPR(R21)(r4) |
406 | PPC_STL r22, VCPU_GPR(r22)(r4) | 406 | PPC_STL r22, VCPU_GPR(R22)(r4) |
407 | PPC_STL r23, VCPU_GPR(r23)(r4) | 407 | PPC_STL r23, VCPU_GPR(R23)(r4) |
408 | PPC_STL r24, VCPU_GPR(r24)(r4) | 408 | PPC_STL r24, VCPU_GPR(R24)(r4) |
409 | PPC_STL r25, VCPU_GPR(r25)(r4) | 409 | PPC_STL r25, VCPU_GPR(R25)(r4) |
410 | PPC_STL r26, VCPU_GPR(r26)(r4) | 410 | PPC_STL r26, VCPU_GPR(R26)(r4) |
411 | PPC_STL r27, VCPU_GPR(r27)(r4) | 411 | PPC_STL r27, VCPU_GPR(R27)(r4) |
412 | PPC_STL r28, VCPU_GPR(r28)(r4) | 412 | PPC_STL r28, VCPU_GPR(R28)(r4) |
413 | PPC_STL r29, VCPU_GPR(r29)(r4) | 413 | PPC_STL r29, VCPU_GPR(R29)(r4) |
414 | PPC_STL r30, VCPU_GPR(r30)(r4) | 414 | PPC_STL r30, VCPU_GPR(R30)(r4) |
415 | PPC_STL r31, VCPU_GPR(r31)(r4) | 415 | PPC_STL r31, VCPU_GPR(R31)(r4) |
416 | 416 | ||
417 | /* Load host non-volatile register state from host stack. */ | 417 | /* Load host non-volatile register state from host stack. */ |
418 | PPC_LL r14, HOST_NV_GPR(r14)(r1) | 418 | PPC_LL r14, HOST_NV_GPR(r14)(r1) |
@@ -478,24 +478,24 @@ _GLOBAL(__kvmppc_vcpu_run) | |||
478 | PPC_STL r31, HOST_NV_GPR(r31)(r1) | 478 | PPC_STL r31, HOST_NV_GPR(r31)(r1) |
479 | 479 | ||
480 | /* Load guest non-volatiles. */ | 480 | /* Load guest non-volatiles. */ |
481 | PPC_LL r14, VCPU_GPR(r14)(r4) | 481 | PPC_LL r14, VCPU_GPR(R14)(r4) |
482 | PPC_LL r15, VCPU_GPR(r15)(r4) | 482 | PPC_LL r15, VCPU_GPR(R15)(r4) |
483 | PPC_LL r16, VCPU_GPR(r16)(r4) | 483 | PPC_LL r16, VCPU_GPR(R16)(r4) |
484 | PPC_LL r17, VCPU_GPR(r17)(r4) | 484 | PPC_LL r17, VCPU_GPR(R17)(r4) |
485 | PPC_LL r18, VCPU_GPR(r18)(r4) | 485 | PPC_LL r18, VCPU_GPR(R18)(r4) |
486 | PPC_LL r19, VCPU_GPR(r19)(r4) | 486 | PPC_LL r19, VCPU_GPR(R19)(r4) |
487 | PPC_LL r20, VCPU_GPR(r20)(r4) | 487 | PPC_LL r20, VCPU_GPR(R20)(r4) |
488 | PPC_LL r21, VCPU_GPR(r21)(r4) | 488 | PPC_LL r21, VCPU_GPR(R21)(r4) |
489 | PPC_LL r22, VCPU_GPR(r22)(r4) | 489 | PPC_LL r22, VCPU_GPR(R22)(r4) |
490 | PPC_LL r23, VCPU_GPR(r23)(r4) | 490 | PPC_LL r23, VCPU_GPR(R23)(r4) |
491 | PPC_LL r24, VCPU_GPR(r24)(r4) | 491 | PPC_LL r24, VCPU_GPR(R24)(r4) |
492 | PPC_LL r25, VCPU_GPR(r25)(r4) | 492 | PPC_LL r25, VCPU_GPR(R25)(r4) |
493 | PPC_LL r26, VCPU_GPR(r26)(r4) | 493 | PPC_LL r26, VCPU_GPR(R26)(r4) |
494 | PPC_LL r27, VCPU_GPR(r27)(r4) | 494 | PPC_LL r27, VCPU_GPR(R27)(r4) |
495 | PPC_LL r28, VCPU_GPR(r28)(r4) | 495 | PPC_LL r28, VCPU_GPR(R28)(r4) |
496 | PPC_LL r29, VCPU_GPR(r29)(r4) | 496 | PPC_LL r29, VCPU_GPR(R29)(r4) |
497 | PPC_LL r30, VCPU_GPR(r30)(r4) | 497 | PPC_LL r30, VCPU_GPR(R30)(r4) |
498 | PPC_LL r31, VCPU_GPR(r31)(r4) | 498 | PPC_LL r31, VCPU_GPR(R31)(r4) |
499 | 499 | ||
500 | 500 | ||
501 | lightweight_exit: | 501 | lightweight_exit: |
@@ -554,13 +554,13 @@ lightweight_exit: | |||
554 | lwz r7, VCPU_CR(r4) | 554 | lwz r7, VCPU_CR(r4) |
555 | PPC_LL r8, VCPU_PC(r4) | 555 | PPC_LL r8, VCPU_PC(r4) |
556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) | 556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) |
557 | PPC_LL r0, VCPU_GPR(r0)(r4) | 557 | PPC_LL r0, VCPU_GPR(R0)(r4) |
558 | PPC_LL r1, VCPU_GPR(r1)(r4) | 558 | PPC_LL r1, VCPU_GPR(R1)(r4) |
559 | PPC_LL r2, VCPU_GPR(r2)(r4) | 559 | PPC_LL r2, VCPU_GPR(R2)(r4) |
560 | PPC_LL r10, VCPU_GPR(r10)(r4) | 560 | PPC_LL r10, VCPU_GPR(R10)(r4) |
561 | PPC_LL r11, VCPU_GPR(r11)(r4) | 561 | PPC_LL r11, VCPU_GPR(R11)(r4) |
562 | PPC_LL r12, VCPU_GPR(r12)(r4) | 562 | PPC_LL r12, VCPU_GPR(R12)(r4) |
563 | PPC_LL r13, VCPU_GPR(r13)(r4) | 563 | PPC_LL r13, VCPU_GPR(R13)(r4) |
564 | mtlr r3 | 564 | mtlr r3 |
565 | mtxer r5 | 565 | mtxer r5 |
566 | mtctr r6 | 566 | mtctr r6 |
@@ -586,12 +586,12 @@ lightweight_exit: | |||
586 | mtcr r7 | 586 | mtcr r7 |
587 | 587 | ||
588 | /* Finish loading guest volatiles and jump to guest. */ | 588 | /* Finish loading guest volatiles and jump to guest. */ |
589 | PPC_LL r5, VCPU_GPR(r5)(r4) | 589 | PPC_LL r5, VCPU_GPR(R5)(r4) |
590 | PPC_LL r6, VCPU_GPR(r6)(r4) | 590 | PPC_LL r6, VCPU_GPR(R6)(r4) |
591 | PPC_LL r7, VCPU_GPR(r7)(r4) | 591 | PPC_LL r7, VCPU_GPR(R7)(r4) |
592 | PPC_LL r8, VCPU_GPR(r8)(r4) | 592 | PPC_LL r8, VCPU_GPR(R8)(r4) |
593 | PPC_LL r9, VCPU_GPR(r9)(r4) | 593 | PPC_LL r9, VCPU_GPR(R9)(r4) |
594 | 594 | ||
595 | PPC_LL r3, VCPU_GPR(r3)(r4) | 595 | PPC_LL r3, VCPU_GPR(R3)(r4) |
596 | PPC_LL r4, VCPU_GPR(r4)(r4) | 596 | PPC_LL r4, VCPU_GPR(R4)(r4) |
597 | rfi | 597 | rfi |