aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-23 21:54:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-23 21:54:23 -0400
commit83c7f72259ea4bd0561e2f2762d97ee2888126ce (patch)
treec8b181e9f9a0bb061f5ab63fabb98197d7aee19a /arch/powerpc/kvm
parente05644e17e744315bce12b0948cdc36910b9a76e (diff)
parent574ce79cea9d3fda109ffcc82f81733de4740e5c (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "Notable highlights: - iommu improvements from Anton removing the per-iommu global lock in favor of dividing the DMA space into pools, each with its own lock, and hashed on the CPU number. Along with making the locking more fine grained, this gives significant improvements in multiqueue networking scalability. - Still from Anton, we know provide a vdso based variant of getcpu which makes sched_getcpu with the appropriate glibc patch something like 18 times faster. - More anton goodness (he's been busy !) in other areas such as a faster __clear_user and copy_page on P7, various perf fixes to improve sampling quality, etc... - One more step toward removing legacy i2c interfaces by using new device-tree based probing of platform devices for the AOA audio drivers - A nice series of patches from Michael Neuling that helps avoiding confusion between register numbers and litterals in assembly code, trying to enforce the use of "%rN" register names in gas rather than plain numbers. - A pile of FSL updates - The usual bunch of small fixes, cleanups etc... You may spot a change to drivers/char/mem. The patch got no comment or ack from outside, it's a trivial patch to allow the architecture to skip creating /dev/port, which we use to disable it on ppc64 that don't have a legacy brige. On those, IO ports 0...64K are not mapped in kernel space at all, so accesses to /dev/port cause oopses (and yes, distros -still- ship userspace that bangs hard coded ports such as kbdrate)." * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (106 commits) powerpc/mpic: Create a revmap with enough entries for IPIs and timers Remove stale .rej file powerpc/iommu: Fix iommu pool initialization powerpc/eeh: Check handle_eeh_events() return value powerpc/85xx: Add phy nodes in SGMII mode for MPC8536/44/72DS & P2020DS powerpc/e500: add paravirt QEMU platform powerpc/mpc85xx_ds: convert to unified PCI init powerpc/fsl-pci: get PCI init out of board files powerpc/85xx: Update corenet64_smp_defconfig powerpc/85xx: Update corenet32_smp_defconfig powerpc/85xx: Rename P1021RDB-PC device trees to be consistent powerpc/watchdog: move booke watchdog param related code to setup-common.c sound/aoa: Adapt to new i2c probing scheme i2c/powermac: Improve detection of devices from device-tree powerpc: Disable /dev/port interface on systems without an ISA bridge of: Improve prom_update_property() function powerpc: Add "memory" attribute for mfmsr() powerpc/ftrace: Fix assembly trampoline register usage powerpc/hw_breakpoints: Fix incorrect pointer access powerpc: Put the gpr save/restore functions in their own section ...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S227
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S80
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S2
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S277
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S229
6 files changed, 400 insertions, 416 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a1044f43becd..5a84c8d3d040 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -72,9 +72,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
72 mtsrr1 r6 72 mtsrr1 r6
73 RFI 73 RFI
74 74
75#define ULONG_SIZE 8
76#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
77
78/****************************************************************************** 75/******************************************************************************
79 * * 76 * *
80 * Entry code * 77 * Entry code *
@@ -206,24 +203,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
206 /* Load up FP, VMX and VSX registers */ 203 /* Load up FP, VMX and VSX registers */
207 bl kvmppc_load_fp 204 bl kvmppc_load_fp
208 205
209 ld r14, VCPU_GPR(r14)(r4) 206 ld r14, VCPU_GPR(R14)(r4)
210 ld r15, VCPU_GPR(r15)(r4) 207 ld r15, VCPU_GPR(R15)(r4)
211 ld r16, VCPU_GPR(r16)(r4) 208 ld r16, VCPU_GPR(R16)(r4)
212 ld r17, VCPU_GPR(r17)(r4) 209 ld r17, VCPU_GPR(R17)(r4)
213 ld r18, VCPU_GPR(r18)(r4) 210 ld r18, VCPU_GPR(R18)(r4)
214 ld r19, VCPU_GPR(r19)(r4) 211 ld r19, VCPU_GPR(R19)(r4)
215 ld r20, VCPU_GPR(r20)(r4) 212 ld r20, VCPU_GPR(R20)(r4)
216 ld r21, VCPU_GPR(r21)(r4) 213 ld r21, VCPU_GPR(R21)(r4)
217 ld r22, VCPU_GPR(r22)(r4) 214 ld r22, VCPU_GPR(R22)(r4)
218 ld r23, VCPU_GPR(r23)(r4) 215 ld r23, VCPU_GPR(R23)(r4)
219 ld r24, VCPU_GPR(r24)(r4) 216 ld r24, VCPU_GPR(R24)(r4)
220 ld r25, VCPU_GPR(r25)(r4) 217 ld r25, VCPU_GPR(R25)(r4)
221 ld r26, VCPU_GPR(r26)(r4) 218 ld r26, VCPU_GPR(R26)(r4)
222 ld r27, VCPU_GPR(r27)(r4) 219 ld r27, VCPU_GPR(R27)(r4)
223 ld r28, VCPU_GPR(r28)(r4) 220 ld r28, VCPU_GPR(R28)(r4)
224 ld r29, VCPU_GPR(r29)(r4) 221 ld r29, VCPU_GPR(R29)(r4)
225 ld r30, VCPU_GPR(r30)(r4) 222 ld r30, VCPU_GPR(R30)(r4)
226 ld r31, VCPU_GPR(r31)(r4) 223 ld r31, VCPU_GPR(R31)(r4)
227 224
228BEGIN_FTR_SECTION 225BEGIN_FTR_SECTION
229 /* Switch DSCR to guest value */ 226 /* Switch DSCR to guest value */
@@ -547,21 +544,21 @@ fast_guest_return:
547 mtlr r5 544 mtlr r5
548 mtcr r6 545 mtcr r6
549 546
550 ld r0, VCPU_GPR(r0)(r4) 547 ld r0, VCPU_GPR(R0)(r4)
551 ld r1, VCPU_GPR(r1)(r4) 548 ld r1, VCPU_GPR(R1)(r4)
552 ld r2, VCPU_GPR(r2)(r4) 549 ld r2, VCPU_GPR(R2)(r4)
553 ld r3, VCPU_GPR(r3)(r4) 550 ld r3, VCPU_GPR(R3)(r4)
554 ld r5, VCPU_GPR(r5)(r4) 551 ld r5, VCPU_GPR(R5)(r4)
555 ld r6, VCPU_GPR(r6)(r4) 552 ld r6, VCPU_GPR(R6)(r4)
556 ld r7, VCPU_GPR(r7)(r4) 553 ld r7, VCPU_GPR(R7)(r4)
557 ld r8, VCPU_GPR(r8)(r4) 554 ld r8, VCPU_GPR(R8)(r4)
558 ld r9, VCPU_GPR(r9)(r4) 555 ld r9, VCPU_GPR(R9)(r4)
559 ld r10, VCPU_GPR(r10)(r4) 556 ld r10, VCPU_GPR(R10)(r4)
560 ld r11, VCPU_GPR(r11)(r4) 557 ld r11, VCPU_GPR(R11)(r4)
561 ld r12, VCPU_GPR(r12)(r4) 558 ld r12, VCPU_GPR(R12)(r4)
562 ld r13, VCPU_GPR(r13)(r4) 559 ld r13, VCPU_GPR(R13)(r4)
563 560
564 ld r4, VCPU_GPR(r4)(r4) 561 ld r4, VCPU_GPR(R4)(r4)
565 562
566 hrfid 563 hrfid
567 b . 564 b .
@@ -590,22 +587,22 @@ kvmppc_interrupt:
590 587
591 /* Save registers */ 588 /* Save registers */
592 589
593 std r0, VCPU_GPR(r0)(r9) 590 std r0, VCPU_GPR(R0)(r9)
594 std r1, VCPU_GPR(r1)(r9) 591 std r1, VCPU_GPR(R1)(r9)
595 std r2, VCPU_GPR(r2)(r9) 592 std r2, VCPU_GPR(R2)(r9)
596 std r3, VCPU_GPR(r3)(r9) 593 std r3, VCPU_GPR(R3)(r9)
597 std r4, VCPU_GPR(r4)(r9) 594 std r4, VCPU_GPR(R4)(r9)
598 std r5, VCPU_GPR(r5)(r9) 595 std r5, VCPU_GPR(R5)(r9)
599 std r6, VCPU_GPR(r6)(r9) 596 std r6, VCPU_GPR(R6)(r9)
600 std r7, VCPU_GPR(r7)(r9) 597 std r7, VCPU_GPR(R7)(r9)
601 std r8, VCPU_GPR(r8)(r9) 598 std r8, VCPU_GPR(R8)(r9)
602 ld r0, HSTATE_HOST_R2(r13) 599 ld r0, HSTATE_HOST_R2(r13)
603 std r0, VCPU_GPR(r9)(r9) 600 std r0, VCPU_GPR(R9)(r9)
604 std r10, VCPU_GPR(r10)(r9) 601 std r10, VCPU_GPR(R10)(r9)
605 std r11, VCPU_GPR(r11)(r9) 602 std r11, VCPU_GPR(R11)(r9)
606 ld r3, HSTATE_SCRATCH0(r13) 603 ld r3, HSTATE_SCRATCH0(r13)
607 lwz r4, HSTATE_SCRATCH1(r13) 604 lwz r4, HSTATE_SCRATCH1(r13)
608 std r3, VCPU_GPR(r12)(r9) 605 std r3, VCPU_GPR(R12)(r9)
609 stw r4, VCPU_CR(r9) 606 stw r4, VCPU_CR(r9)
610 607
611 /* Restore R1/R2 so we can handle faults */ 608 /* Restore R1/R2 so we can handle faults */
@@ -626,7 +623,7 @@ kvmppc_interrupt:
626 623
627 GET_SCRATCH0(r3) 624 GET_SCRATCH0(r3)
628 mflr r4 625 mflr r4
629 std r3, VCPU_GPR(r13)(r9) 626 std r3, VCPU_GPR(R13)(r9)
630 std r4, VCPU_LR(r9) 627 std r4, VCPU_LR(r9)
631 628
632 /* Unset guest mode */ 629 /* Unset guest mode */
@@ -968,24 +965,24 @@ BEGIN_FTR_SECTION
968END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 965END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
969 966
970 /* Save non-volatile GPRs */ 967 /* Save non-volatile GPRs */
971 std r14, VCPU_GPR(r14)(r9) 968 std r14, VCPU_GPR(R14)(r9)
972 std r15, VCPU_GPR(r15)(r9) 969 std r15, VCPU_GPR(R15)(r9)
973 std r16, VCPU_GPR(r16)(r9) 970 std r16, VCPU_GPR(R16)(r9)
974 std r17, VCPU_GPR(r17)(r9) 971 std r17, VCPU_GPR(R17)(r9)
975 std r18, VCPU_GPR(r18)(r9) 972 std r18, VCPU_GPR(R18)(r9)
976 std r19, VCPU_GPR(r19)(r9) 973 std r19, VCPU_GPR(R19)(r9)
977 std r20, VCPU_GPR(r20)(r9) 974 std r20, VCPU_GPR(R20)(r9)
978 std r21, VCPU_GPR(r21)(r9) 975 std r21, VCPU_GPR(R21)(r9)
979 std r22, VCPU_GPR(r22)(r9) 976 std r22, VCPU_GPR(R22)(r9)
980 std r23, VCPU_GPR(r23)(r9) 977 std r23, VCPU_GPR(R23)(r9)
981 std r24, VCPU_GPR(r24)(r9) 978 std r24, VCPU_GPR(R24)(r9)
982 std r25, VCPU_GPR(r25)(r9) 979 std r25, VCPU_GPR(R25)(r9)
983 std r26, VCPU_GPR(r26)(r9) 980 std r26, VCPU_GPR(R26)(r9)
984 std r27, VCPU_GPR(r27)(r9) 981 std r27, VCPU_GPR(R27)(r9)
985 std r28, VCPU_GPR(r28)(r9) 982 std r28, VCPU_GPR(R28)(r9)
986 std r29, VCPU_GPR(r29)(r9) 983 std r29, VCPU_GPR(R29)(r9)
987 std r30, VCPU_GPR(r30)(r9) 984 std r30, VCPU_GPR(R30)(r9)
988 std r31, VCPU_GPR(r31)(r9) 985 std r31, VCPU_GPR(R31)(r9)
989 986
990 /* Save SPRGs */ 987 /* Save SPRGs */
991 mfspr r3, SPRN_SPRG0 988 mfspr r3, SPRN_SPRG0
@@ -1067,6 +1064,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1067 mtspr SPRN_DABR,r5 1064 mtspr SPRN_DABR,r5
1068 mtspr SPRN_DABRX,r6 1065 mtspr SPRN_DABRX,r6
1069 1066
1067 /* Restore SPRG3 */
1068 ld r3,HSTATE_SPRG3(r13)
1069 mtspr SPRN_SPRG3,r3
1070
1070 /* 1071 /*
1071 * Reload DEC. HDEC interrupts were disabled when 1072 * Reload DEC. HDEC interrupts were disabled when
1072 * we reloaded the host's LPCR value. 1073 * we reloaded the host's LPCR value.
@@ -1160,7 +1161,7 @@ kvmppc_hdsi:
1160 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1161 andi. r0, r11, MSR_DR /* data relocation enabled? */
1161 beq 3f 1162 beq 3f
1162 clrrdi r0, r4, 28 1163 clrrdi r0, r4, 28
1163 PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ 1164 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1164 bne 1f /* if no SLB entry found */ 1165 bne 1f /* if no SLB entry found */
11654: std r4, VCPU_FAULT_DAR(r9) 11664: std r4, VCPU_FAULT_DAR(r9)
1166 stw r6, VCPU_FAULT_DSISR(r9) 1167 stw r6, VCPU_FAULT_DSISR(r9)
@@ -1234,7 +1235,7 @@ kvmppc_hisi:
1234 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1235 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1235 beq 3f 1236 beq 3f
1236 clrrdi r0, r10, 28 1237 clrrdi r0, r10, 28
1237 PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ 1238 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1238 bne 1f /* if no SLB entry found */ 1239 bne 1f /* if no SLB entry found */
12394: 12404:
1240 /* Search the hash table. */ 1241 /* Search the hash table. */
@@ -1278,7 +1279,7 @@ kvmppc_hisi:
1278 */ 1279 */
1279 .globl hcall_try_real_mode 1280 .globl hcall_try_real_mode
1280hcall_try_real_mode: 1281hcall_try_real_mode:
1281 ld r3,VCPU_GPR(r3)(r9) 1282 ld r3,VCPU_GPR(R3)(r9)
1282 andi. r0,r11,MSR_PR 1283 andi. r0,r11,MSR_PR
1283 bne hcall_real_cont 1284 bne hcall_real_cont
1284 clrrdi r3,r3,2 1285 clrrdi r3,r3,2
@@ -1291,12 +1292,12 @@ hcall_try_real_mode:
1291 add r3,r3,r4 1292 add r3,r3,r4
1292 mtctr r3 1293 mtctr r3
1293 mr r3,r9 /* get vcpu pointer */ 1294 mr r3,r9 /* get vcpu pointer */
1294 ld r4,VCPU_GPR(r4)(r9) 1295 ld r4,VCPU_GPR(R4)(r9)
1295 bctrl 1296 bctrl
1296 cmpdi r3,H_TOO_HARD 1297 cmpdi r3,H_TOO_HARD
1297 beq hcall_real_fallback 1298 beq hcall_real_fallback
1298 ld r4,HSTATE_KVM_VCPU(r13) 1299 ld r4,HSTATE_KVM_VCPU(r13)
1299 std r3,VCPU_GPR(r3)(r4) 1300 std r3,VCPU_GPR(R3)(r4)
1300 ld r10,VCPU_PC(r4) 1301 ld r10,VCPU_PC(r4)
1301 ld r11,VCPU_MSR(r4) 1302 ld r11,VCPU_MSR(r4)
1302 b fast_guest_return 1303 b fast_guest_return
@@ -1424,7 +1425,7 @@ _GLOBAL(kvmppc_h_cede)
1424 li r0,0 /* set trap to 0 to say hcall is handled */ 1425 li r0,0 /* set trap to 0 to say hcall is handled */
1425 stw r0,VCPU_TRAP(r3) 1426 stw r0,VCPU_TRAP(r3)
1426 li r0,H_SUCCESS 1427 li r0,H_SUCCESS
1427 std r0,VCPU_GPR(r3)(r3) 1428 std r0,VCPU_GPR(R3)(r3)
1428BEGIN_FTR_SECTION 1429BEGIN_FTR_SECTION
1429 b 2f /* just send it up to host on 970 */ 1430 b 2f /* just send it up to host on 970 */
1430END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1431END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
@@ -1443,7 +1444,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1443 addi r6,r5,VCORE_NAPPING_THREADS 1444 addi r6,r5,VCORE_NAPPING_THREADS
144431: lwarx r4,0,r6 144531: lwarx r4,0,r6
1445 or r4,r4,r0 1446 or r4,r4,r0
1446 PPC_POPCNTW(r7,r4) 1447 PPC_POPCNTW(R7,R4)
1447 cmpw r7,r8 1448 cmpw r7,r8
1448 bge 2f 1449 bge 2f
1449 stwcx. r4,0,r6 1450 stwcx. r4,0,r6
@@ -1464,24 +1465,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1464 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1465 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1465 */ 1466 */
1466 /* Save non-volatile GPRs */ 1467 /* Save non-volatile GPRs */
1467 std r14, VCPU_GPR(r14)(r3) 1468 std r14, VCPU_GPR(R14)(r3)
1468 std r15, VCPU_GPR(r15)(r3) 1469 std r15, VCPU_GPR(R15)(r3)
1469 std r16, VCPU_GPR(r16)(r3) 1470 std r16, VCPU_GPR(R16)(r3)
1470 std r17, VCPU_GPR(r17)(r3) 1471 std r17, VCPU_GPR(R17)(r3)
1471 std r18, VCPU_GPR(r18)(r3) 1472 std r18, VCPU_GPR(R18)(r3)
1472 std r19, VCPU_GPR(r19)(r3) 1473 std r19, VCPU_GPR(R19)(r3)
1473 std r20, VCPU_GPR(r20)(r3) 1474 std r20, VCPU_GPR(R20)(r3)
1474 std r21, VCPU_GPR(r21)(r3) 1475 std r21, VCPU_GPR(R21)(r3)
1475 std r22, VCPU_GPR(r22)(r3) 1476 std r22, VCPU_GPR(R22)(r3)
1476 std r23, VCPU_GPR(r23)(r3) 1477 std r23, VCPU_GPR(R23)(r3)
1477 std r24, VCPU_GPR(r24)(r3) 1478 std r24, VCPU_GPR(R24)(r3)
1478 std r25, VCPU_GPR(r25)(r3) 1479 std r25, VCPU_GPR(R25)(r3)
1479 std r26, VCPU_GPR(r26)(r3) 1480 std r26, VCPU_GPR(R26)(r3)
1480 std r27, VCPU_GPR(r27)(r3) 1481 std r27, VCPU_GPR(R27)(r3)
1481 std r28, VCPU_GPR(r28)(r3) 1482 std r28, VCPU_GPR(R28)(r3)
1482 std r29, VCPU_GPR(r29)(r3) 1483 std r29, VCPU_GPR(R29)(r3)
1483 std r30, VCPU_GPR(r30)(r3) 1484 std r30, VCPU_GPR(R30)(r3)
1484 std r31, VCPU_GPR(r31)(r3) 1485 std r31, VCPU_GPR(R31)(r3)
1485 1486
1486 /* save FP state */ 1487 /* save FP state */
1487 bl .kvmppc_save_fp 1488 bl .kvmppc_save_fp
@@ -1513,24 +1514,24 @@ kvm_end_cede:
1513 bl kvmppc_load_fp 1514 bl kvmppc_load_fp
1514 1515
1515 /* Load NV GPRS */ 1516 /* Load NV GPRS */
1516 ld r14, VCPU_GPR(r14)(r4) 1517 ld r14, VCPU_GPR(R14)(r4)
1517 ld r15, VCPU_GPR(r15)(r4) 1518 ld r15, VCPU_GPR(R15)(r4)
1518 ld r16, VCPU_GPR(r16)(r4) 1519 ld r16, VCPU_GPR(R16)(r4)
1519 ld r17, VCPU_GPR(r17)(r4) 1520 ld r17, VCPU_GPR(R17)(r4)
1520 ld r18, VCPU_GPR(r18)(r4) 1521 ld r18, VCPU_GPR(R18)(r4)
1521 ld r19, VCPU_GPR(r19)(r4) 1522 ld r19, VCPU_GPR(R19)(r4)
1522 ld r20, VCPU_GPR(r20)(r4) 1523 ld r20, VCPU_GPR(R20)(r4)
1523 ld r21, VCPU_GPR(r21)(r4) 1524 ld r21, VCPU_GPR(R21)(r4)
1524 ld r22, VCPU_GPR(r22)(r4) 1525 ld r22, VCPU_GPR(R22)(r4)
1525 ld r23, VCPU_GPR(r23)(r4) 1526 ld r23, VCPU_GPR(R23)(r4)
1526 ld r24, VCPU_GPR(r24)(r4) 1527 ld r24, VCPU_GPR(R24)(r4)
1527 ld r25, VCPU_GPR(r25)(r4) 1528 ld r25, VCPU_GPR(R25)(r4)
1528 ld r26, VCPU_GPR(r26)(r4) 1529 ld r26, VCPU_GPR(R26)(r4)
1529 ld r27, VCPU_GPR(r27)(r4) 1530 ld r27, VCPU_GPR(R27)(r4)
1530 ld r28, VCPU_GPR(r28)(r4) 1531 ld r28, VCPU_GPR(R28)(r4)
1531 ld r29, VCPU_GPR(r29)(r4) 1532 ld r29, VCPU_GPR(R29)(r4)
1532 ld r30, VCPU_GPR(r30)(r4) 1533 ld r30, VCPU_GPR(R30)(r4)
1533 ld r31, VCPU_GPR(r31)(r4) 1534 ld r31, VCPU_GPR(R31)(r4)
1534 1535
1535 /* clear our bit in vcore->napping_threads */ 1536 /* clear our bit in vcore->napping_threads */
153633: ld r5,HSTATE_KVM_VCORE(r13) 153733: ld r5,HSTATE_KVM_VCORE(r13)
@@ -1649,7 +1650,7 @@ BEGIN_FTR_SECTION
1649 reg = 0 1650 reg = 0
1650 .rept 32 1651 .rept 32
1651 li r6,reg*16+VCPU_VSRS 1652 li r6,reg*16+VCPU_VSRS
1652 STXVD2X(reg,r6,r3) 1653 STXVD2X(reg,R6,R3)
1653 reg = reg + 1 1654 reg = reg + 1
1654 .endr 1655 .endr
1655FTR_SECTION_ELSE 1656FTR_SECTION_ELSE
@@ -1711,7 +1712,7 @@ BEGIN_FTR_SECTION
1711 reg = 0 1712 reg = 0
1712 .rept 32 1713 .rept 32
1713 li r7,reg*16+VCPU_VSRS 1714 li r7,reg*16+VCPU_VSRS
1714 LXVD2X(reg,r7,r4) 1715 LXVD2X(reg,R7,R4)
1715 reg = reg + 1 1716 reg = reg + 1
1716 .endr 1717 .endr
1717FTR_SECTION_ELSE 1718FTR_SECTION_ELSE
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 3e35383bdb21..48cbbf862958 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,38 +25,30 @@
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28
29#define ULONG_SIZE 8
30#define FUNC(name) GLUE(.,name) 28#define FUNC(name) GLUE(.,name)
31
32#elif defined(CONFIG_PPC_BOOK3S_32) 29#elif defined(CONFIG_PPC_BOOK3S_32)
33
34#define ULONG_SIZE 4
35#define FUNC(name) name 30#define FUNC(name) name
36
37#endif /* CONFIG_PPC_BOOK3S_XX */ 31#endif /* CONFIG_PPC_BOOK3S_XX */
38 32
39
40#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
41#define VCPU_LOAD_NVGPRS(vcpu) \ 33#define VCPU_LOAD_NVGPRS(vcpu) \
42 PPC_LL r14, VCPU_GPR(r14)(vcpu); \ 34 PPC_LL r14, VCPU_GPR(R14)(vcpu); \
43 PPC_LL r15, VCPU_GPR(r15)(vcpu); \ 35 PPC_LL r15, VCPU_GPR(R15)(vcpu); \
44 PPC_LL r16, VCPU_GPR(r16)(vcpu); \ 36 PPC_LL r16, VCPU_GPR(R16)(vcpu); \
45 PPC_LL r17, VCPU_GPR(r17)(vcpu); \ 37 PPC_LL r17, VCPU_GPR(R17)(vcpu); \
46 PPC_LL r18, VCPU_GPR(r18)(vcpu); \ 38 PPC_LL r18, VCPU_GPR(R18)(vcpu); \
47 PPC_LL r19, VCPU_GPR(r19)(vcpu); \ 39 PPC_LL r19, VCPU_GPR(R19)(vcpu); \
48 PPC_LL r20, VCPU_GPR(r20)(vcpu); \ 40 PPC_LL r20, VCPU_GPR(R20)(vcpu); \
49 PPC_LL r21, VCPU_GPR(r21)(vcpu); \ 41 PPC_LL r21, VCPU_GPR(R21)(vcpu); \
50 PPC_LL r22, VCPU_GPR(r22)(vcpu); \ 42 PPC_LL r22, VCPU_GPR(R22)(vcpu); \
51 PPC_LL r23, VCPU_GPR(r23)(vcpu); \ 43 PPC_LL r23, VCPU_GPR(R23)(vcpu); \
52 PPC_LL r24, VCPU_GPR(r24)(vcpu); \ 44 PPC_LL r24, VCPU_GPR(R24)(vcpu); \
53 PPC_LL r25, VCPU_GPR(r25)(vcpu); \ 45 PPC_LL r25, VCPU_GPR(R25)(vcpu); \
54 PPC_LL r26, VCPU_GPR(r26)(vcpu); \ 46 PPC_LL r26, VCPU_GPR(R26)(vcpu); \
55 PPC_LL r27, VCPU_GPR(r27)(vcpu); \ 47 PPC_LL r27, VCPU_GPR(R27)(vcpu); \
56 PPC_LL r28, VCPU_GPR(r28)(vcpu); \ 48 PPC_LL r28, VCPU_GPR(R28)(vcpu); \
57 PPC_LL r29, VCPU_GPR(r29)(vcpu); \ 49 PPC_LL r29, VCPU_GPR(R29)(vcpu); \
58 PPC_LL r30, VCPU_GPR(r30)(vcpu); \ 50 PPC_LL r30, VCPU_GPR(R30)(vcpu); \
59 PPC_LL r31, VCPU_GPR(r31)(vcpu); \ 51 PPC_LL r31, VCPU_GPR(R31)(vcpu); \
60 52
61/***************************************************************************** 53/*****************************************************************************
62 * * 54 * *
@@ -131,24 +123,24 @@ kvmppc_handler_highmem:
131 /* R7 = vcpu */ 123 /* R7 = vcpu */
132 PPC_LL r7, GPR4(r1) 124 PPC_LL r7, GPR4(r1)
133 125
134 PPC_STL r14, VCPU_GPR(r14)(r7) 126 PPC_STL r14, VCPU_GPR(R14)(r7)
135 PPC_STL r15, VCPU_GPR(r15)(r7) 127 PPC_STL r15, VCPU_GPR(R15)(r7)
136 PPC_STL r16, VCPU_GPR(r16)(r7) 128 PPC_STL r16, VCPU_GPR(R16)(r7)
137 PPC_STL r17, VCPU_GPR(r17)(r7) 129 PPC_STL r17, VCPU_GPR(R17)(r7)
138 PPC_STL r18, VCPU_GPR(r18)(r7) 130 PPC_STL r18, VCPU_GPR(R18)(r7)
139 PPC_STL r19, VCPU_GPR(r19)(r7) 131 PPC_STL r19, VCPU_GPR(R19)(r7)
140 PPC_STL r20, VCPU_GPR(r20)(r7) 132 PPC_STL r20, VCPU_GPR(R20)(r7)
141 PPC_STL r21, VCPU_GPR(r21)(r7) 133 PPC_STL r21, VCPU_GPR(R21)(r7)
142 PPC_STL r22, VCPU_GPR(r22)(r7) 134 PPC_STL r22, VCPU_GPR(R22)(r7)
143 PPC_STL r23, VCPU_GPR(r23)(r7) 135 PPC_STL r23, VCPU_GPR(R23)(r7)
144 PPC_STL r24, VCPU_GPR(r24)(r7) 136 PPC_STL r24, VCPU_GPR(R24)(r7)
145 PPC_STL r25, VCPU_GPR(r25)(r7) 137 PPC_STL r25, VCPU_GPR(R25)(r7)
146 PPC_STL r26, VCPU_GPR(r26)(r7) 138 PPC_STL r26, VCPU_GPR(R26)(r7)
147 PPC_STL r27, VCPU_GPR(r27)(r7) 139 PPC_STL r27, VCPU_GPR(R27)(r7)
148 PPC_STL r28, VCPU_GPR(r28)(r7) 140 PPC_STL r28, VCPU_GPR(R28)(r7)
149 PPC_STL r29, VCPU_GPR(r29)(r7) 141 PPC_STL r29, VCPU_GPR(R29)(r7)
150 PPC_STL r30, VCPU_GPR(r30)(r7) 142 PPC_STL r30, VCPU_GPR(R30)(r7)
151 PPC_STL r31, VCPU_GPR(r31)(r7) 143 PPC_STL r31, VCPU_GPR(R31)(r7)
152 144
153 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 145 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
154 mr r5, r12 146 mr r5, r12
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 34187585c507..ab523f3c1731 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -37,7 +37,6 @@
37#if defined(CONFIG_PPC_BOOK3S_64) 37#if defined(CONFIG_PPC_BOOK3S_64)
38 38
39#define FUNC(name) GLUE(.,name) 39#define FUNC(name) GLUE(.,name)
40#define MTMSR_EERI(reg) mtmsrd (reg),1
41 40
42 .globl kvmppc_skip_interrupt 41 .globl kvmppc_skip_interrupt
43kvmppc_skip_interrupt: 42kvmppc_skip_interrupt:
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 798491a268b3..1abe4788191a 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -23,7 +23,6 @@
23 23
24#define GET_SHADOW_VCPU(reg) \ 24#define GET_SHADOW_VCPU(reg) \
25 mr reg, r13 25 mr reg, r13
26#define MTMSR_EERI(reg) mtmsrd (reg),1
27 26
28#elif defined(CONFIG_PPC_BOOK3S_32) 27#elif defined(CONFIG_PPC_BOOK3S_32)
29 28
@@ -31,7 +30,6 @@
31 tophys(reg, r2); \ 30 tophys(reg, r2); \
32 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ 31 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
33 tophys(reg, reg) 32 tophys(reg, reg)
34#define MTMSR_EERI(reg) mtmsr (reg)
35 33
36#endif 34#endif
37 35
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 8feec2ff3928..8fd4b2a0911b 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -25,8 +25,6 @@
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27 27
28#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
29
30/* The host stack layout: */ 28/* The host stack layout: */
31#define HOST_R1 0 /* Implied by stwu. */ 29#define HOST_R1 0 /* Implied by stwu. */
32#define HOST_CALLEE_LR 4 30#define HOST_CALLEE_LR 4
@@ -36,8 +34,9 @@
36#define HOST_R2 12 34#define HOST_R2 12
37#define HOST_CR 16 35#define HOST_CR 16
38#define HOST_NV_GPRS 20 36#define HOST_NV_GPRS 20
39#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) 37#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
40#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) 38#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
39#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
41#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ 40#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
42#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ 41#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
43 42
@@ -58,8 +57,8 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
58 /* Get pointer to vcpu and record exit number. */ 57 /* Get pointer to vcpu and record exit number. */
59 mtspr SPRN_SPRG_WSCRATCH0, r4 58 mtspr SPRN_SPRG_WSCRATCH0, r4
60 mfspr r4, SPRN_SPRG_RVCPU 59 mfspr r4, SPRN_SPRG_RVCPU
61 stw r5, VCPU_GPR(r5)(r4) 60 stw r5, VCPU_GPR(R5)(r4)
62 stw r6, VCPU_GPR(r6)(r4) 61 stw r6, VCPU_GPR(R6)(r4)
63 mfctr r5 62 mfctr r5
64 lis r6, kvmppc_resume_host@h 63 lis r6, kvmppc_resume_host@h
65 stw r5, VCPU_CTR(r4) 64 stw r5, VCPU_CTR(r4)
@@ -100,12 +99,12 @@ _GLOBAL(kvmppc_handler_len)
100 * r5: KVM exit number 99 * r5: KVM exit number
101 */ 100 */
102_GLOBAL(kvmppc_resume_host) 101_GLOBAL(kvmppc_resume_host)
103 stw r3, VCPU_GPR(r3)(r4) 102 stw r3, VCPU_GPR(R3)(r4)
104 mfcr r3 103 mfcr r3
105 stw r3, VCPU_CR(r4) 104 stw r3, VCPU_CR(r4)
106 stw r7, VCPU_GPR(r7)(r4) 105 stw r7, VCPU_GPR(R7)(r4)
107 stw r8, VCPU_GPR(r8)(r4) 106 stw r8, VCPU_GPR(R8)(r4)
108 stw r9, VCPU_GPR(r9)(r4) 107 stw r9, VCPU_GPR(R9)(r4)
109 108
110 li r6, 1 109 li r6, 1
111 slw r6, r6, r5 110 slw r6, r6, r5
@@ -135,23 +134,23 @@ _GLOBAL(kvmppc_resume_host)
135 isync 134 isync
136 stw r9, VCPU_LAST_INST(r4) 135 stw r9, VCPU_LAST_INST(r4)
137 136
138 stw r15, VCPU_GPR(r15)(r4) 137 stw r15, VCPU_GPR(R15)(r4)
139 stw r16, VCPU_GPR(r16)(r4) 138 stw r16, VCPU_GPR(R16)(r4)
140 stw r17, VCPU_GPR(r17)(r4) 139 stw r17, VCPU_GPR(R17)(r4)
141 stw r18, VCPU_GPR(r18)(r4) 140 stw r18, VCPU_GPR(R18)(r4)
142 stw r19, VCPU_GPR(r19)(r4) 141 stw r19, VCPU_GPR(R19)(r4)
143 stw r20, VCPU_GPR(r20)(r4) 142 stw r20, VCPU_GPR(R20)(r4)
144 stw r21, VCPU_GPR(r21)(r4) 143 stw r21, VCPU_GPR(R21)(r4)
145 stw r22, VCPU_GPR(r22)(r4) 144 stw r22, VCPU_GPR(R22)(r4)
146 stw r23, VCPU_GPR(r23)(r4) 145 stw r23, VCPU_GPR(R23)(r4)
147 stw r24, VCPU_GPR(r24)(r4) 146 stw r24, VCPU_GPR(R24)(r4)
148 stw r25, VCPU_GPR(r25)(r4) 147 stw r25, VCPU_GPR(R25)(r4)
149 stw r26, VCPU_GPR(r26)(r4) 148 stw r26, VCPU_GPR(R26)(r4)
150 stw r27, VCPU_GPR(r27)(r4) 149 stw r27, VCPU_GPR(R27)(r4)
151 stw r28, VCPU_GPR(r28)(r4) 150 stw r28, VCPU_GPR(R28)(r4)
152 stw r29, VCPU_GPR(r29)(r4) 151 stw r29, VCPU_GPR(R29)(r4)
153 stw r30, VCPU_GPR(r30)(r4) 152 stw r30, VCPU_GPR(R30)(r4)
154 stw r31, VCPU_GPR(r31)(r4) 153 stw r31, VCPU_GPR(R31)(r4)
155..skip_inst_copy: 154..skip_inst_copy:
156 155
157 /* Also grab DEAR and ESR before the host can clobber them. */ 156 /* Also grab DEAR and ESR before the host can clobber them. */
@@ -169,20 +168,20 @@ _GLOBAL(kvmppc_resume_host)
169..skip_esr: 168..skip_esr:
170 169
171 /* Save remaining volatile guest register state to vcpu. */ 170 /* Save remaining volatile guest register state to vcpu. */
172 stw r0, VCPU_GPR(r0)(r4) 171 stw r0, VCPU_GPR(R0)(r4)
173 stw r1, VCPU_GPR(r1)(r4) 172 stw r1, VCPU_GPR(R1)(r4)
174 stw r2, VCPU_GPR(r2)(r4) 173 stw r2, VCPU_GPR(R2)(r4)
175 stw r10, VCPU_GPR(r10)(r4) 174 stw r10, VCPU_GPR(R10)(r4)
176 stw r11, VCPU_GPR(r11)(r4) 175 stw r11, VCPU_GPR(R11)(r4)
177 stw r12, VCPU_GPR(r12)(r4) 176 stw r12, VCPU_GPR(R12)(r4)
178 stw r13, VCPU_GPR(r13)(r4) 177 stw r13, VCPU_GPR(R13)(r4)
179 stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ 178 stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
180 mflr r3 179 mflr r3
181 stw r3, VCPU_LR(r4) 180 stw r3, VCPU_LR(r4)
182 mfxer r3 181 mfxer r3
183 stw r3, VCPU_XER(r4) 182 stw r3, VCPU_XER(r4)
184 mfspr r3, SPRN_SPRG_RSCRATCH0 183 mfspr r3, SPRN_SPRG_RSCRATCH0
185 stw r3, VCPU_GPR(r4)(r4) 184 stw r3, VCPU_GPR(R4)(r4)
186 mfspr r3, SPRN_SRR0 185 mfspr r3, SPRN_SRR0
187 stw r3, VCPU_PC(r4) 186 stw r3, VCPU_PC(r4)
188 187
@@ -214,28 +213,28 @@ _GLOBAL(kvmppc_resume_host)
214 213
215 /* Restore vcpu pointer and the nonvolatiles we used. */ 214 /* Restore vcpu pointer and the nonvolatiles we used. */
216 mr r4, r14 215 mr r4, r14
217 lwz r14, VCPU_GPR(r14)(r4) 216 lwz r14, VCPU_GPR(R14)(r4)
218 217
219 /* Sometimes instruction emulation must restore complete GPR state. */ 218 /* Sometimes instruction emulation must restore complete GPR state. */
220 andi. r5, r3, RESUME_FLAG_NV 219 andi. r5, r3, RESUME_FLAG_NV
221 beq ..skip_nv_load 220 beq ..skip_nv_load
222 lwz r15, VCPU_GPR(r15)(r4) 221 lwz r15, VCPU_GPR(R15)(r4)
223 lwz r16, VCPU_GPR(r16)(r4) 222 lwz r16, VCPU_GPR(R16)(r4)
224 lwz r17, VCPU_GPR(r17)(r4) 223 lwz r17, VCPU_GPR(R17)(r4)
225 lwz r18, VCPU_GPR(r18)(r4) 224 lwz r18, VCPU_GPR(R18)(r4)
226 lwz r19, VCPU_GPR(r19)(r4) 225 lwz r19, VCPU_GPR(R19)(r4)
227 lwz r20, VCPU_GPR(r20)(r4) 226 lwz r20, VCPU_GPR(R20)(r4)
228 lwz r21, VCPU_GPR(r21)(r4) 227 lwz r21, VCPU_GPR(R21)(r4)
229 lwz r22, VCPU_GPR(r22)(r4) 228 lwz r22, VCPU_GPR(R22)(r4)
230 lwz r23, VCPU_GPR(r23)(r4) 229 lwz r23, VCPU_GPR(R23)(r4)
231 lwz r24, VCPU_GPR(r24)(r4) 230 lwz r24, VCPU_GPR(R24)(r4)
232 lwz r25, VCPU_GPR(r25)(r4) 231 lwz r25, VCPU_GPR(R25)(r4)
233 lwz r26, VCPU_GPR(r26)(r4) 232 lwz r26, VCPU_GPR(R26)(r4)
234 lwz r27, VCPU_GPR(r27)(r4) 233 lwz r27, VCPU_GPR(R27)(r4)
235 lwz r28, VCPU_GPR(r28)(r4) 234 lwz r28, VCPU_GPR(R28)(r4)
236 lwz r29, VCPU_GPR(r29)(r4) 235 lwz r29, VCPU_GPR(R29)(r4)
237 lwz r30, VCPU_GPR(r30)(r4) 236 lwz r30, VCPU_GPR(R30)(r4)
238 lwz r31, VCPU_GPR(r31)(r4) 237 lwz r31, VCPU_GPR(R31)(r4)
239..skip_nv_load: 238..skip_nv_load:
240 239
241 /* Should we return to the guest? */ 240 /* Should we return to the guest? */
@@ -257,43 +256,43 @@ heavyweight_exit:
257 256
258 /* We already saved guest volatile register state; now save the 257 /* We already saved guest volatile register state; now save the
259 * non-volatiles. */ 258 * non-volatiles. */
260 stw r15, VCPU_GPR(r15)(r4) 259 stw r15, VCPU_GPR(R15)(r4)
261 stw r16, VCPU_GPR(r16)(r4) 260 stw r16, VCPU_GPR(R16)(r4)
262 stw r17, VCPU_GPR(r17)(r4) 261 stw r17, VCPU_GPR(R17)(r4)
263 stw r18, VCPU_GPR(r18)(r4) 262 stw r18, VCPU_GPR(R18)(r4)
264 stw r19, VCPU_GPR(r19)(r4) 263 stw r19, VCPU_GPR(R19)(r4)
265 stw r20, VCPU_GPR(r20)(r4) 264 stw r20, VCPU_GPR(R20)(r4)
266 stw r21, VCPU_GPR(r21)(r4) 265 stw r21, VCPU_GPR(R21)(r4)
267 stw r22, VCPU_GPR(r22)(r4) 266 stw r22, VCPU_GPR(R22)(r4)
268 stw r23, VCPU_GPR(r23)(r4) 267 stw r23, VCPU_GPR(R23)(r4)
269 stw r24, VCPU_GPR(r24)(r4) 268 stw r24, VCPU_GPR(R24)(r4)
270 stw r25, VCPU_GPR(r25)(r4) 269 stw r25, VCPU_GPR(R25)(r4)
271 stw r26, VCPU_GPR(r26)(r4) 270 stw r26, VCPU_GPR(R26)(r4)
272 stw r27, VCPU_GPR(r27)(r4) 271 stw r27, VCPU_GPR(R27)(r4)
273 stw r28, VCPU_GPR(r28)(r4) 272 stw r28, VCPU_GPR(R28)(r4)
274 stw r29, VCPU_GPR(r29)(r4) 273 stw r29, VCPU_GPR(R29)(r4)
275 stw r30, VCPU_GPR(r30)(r4) 274 stw r30, VCPU_GPR(R30)(r4)
276 stw r31, VCPU_GPR(r31)(r4) 275 stw r31, VCPU_GPR(R31)(r4)
277 276
278 /* Load host non-volatile register state from host stack. */ 277 /* Load host non-volatile register state from host stack. */
279 lwz r14, HOST_NV_GPR(r14)(r1) 278 lwz r14, HOST_NV_GPR(R14)(r1)
280 lwz r15, HOST_NV_GPR(r15)(r1) 279 lwz r15, HOST_NV_GPR(R15)(r1)
281 lwz r16, HOST_NV_GPR(r16)(r1) 280 lwz r16, HOST_NV_GPR(R16)(r1)
282 lwz r17, HOST_NV_GPR(r17)(r1) 281 lwz r17, HOST_NV_GPR(R17)(r1)
283 lwz r18, HOST_NV_GPR(r18)(r1) 282 lwz r18, HOST_NV_GPR(R18)(r1)
284 lwz r19, HOST_NV_GPR(r19)(r1) 283 lwz r19, HOST_NV_GPR(R19)(r1)
285 lwz r20, HOST_NV_GPR(r20)(r1) 284 lwz r20, HOST_NV_GPR(R20)(r1)
286 lwz r21, HOST_NV_GPR(r21)(r1) 285 lwz r21, HOST_NV_GPR(R21)(r1)
287 lwz r22, HOST_NV_GPR(r22)(r1) 286 lwz r22, HOST_NV_GPR(R22)(r1)
288 lwz r23, HOST_NV_GPR(r23)(r1) 287 lwz r23, HOST_NV_GPR(R23)(r1)
289 lwz r24, HOST_NV_GPR(r24)(r1) 288 lwz r24, HOST_NV_GPR(R24)(r1)
290 lwz r25, HOST_NV_GPR(r25)(r1) 289 lwz r25, HOST_NV_GPR(R25)(r1)
291 lwz r26, HOST_NV_GPR(r26)(r1) 290 lwz r26, HOST_NV_GPR(R26)(r1)
292 lwz r27, HOST_NV_GPR(r27)(r1) 291 lwz r27, HOST_NV_GPR(R27)(r1)
293 lwz r28, HOST_NV_GPR(r28)(r1) 292 lwz r28, HOST_NV_GPR(R28)(r1)
294 lwz r29, HOST_NV_GPR(r29)(r1) 293 lwz r29, HOST_NV_GPR(R29)(r1)
295 lwz r30, HOST_NV_GPR(r30)(r1) 294 lwz r30, HOST_NV_GPR(R30)(r1)
296 lwz r31, HOST_NV_GPR(r31)(r1) 295 lwz r31, HOST_NV_GPR(R31)(r1)
297 296
298 /* Return to kvm_vcpu_run(). */ 297 /* Return to kvm_vcpu_run(). */
299 lwz r4, HOST_STACK_LR(r1) 298 lwz r4, HOST_STACK_LR(r1)
@@ -321,44 +320,44 @@ _GLOBAL(__kvmppc_vcpu_run)
321 stw r5, HOST_CR(r1) 320 stw r5, HOST_CR(r1)
322 321
323 /* Save host non-volatile register state to stack. */ 322 /* Save host non-volatile register state to stack. */
324 stw r14, HOST_NV_GPR(r14)(r1) 323 stw r14, HOST_NV_GPR(R14)(r1)
325 stw r15, HOST_NV_GPR(r15)(r1) 324 stw r15, HOST_NV_GPR(R15)(r1)
326 stw r16, HOST_NV_GPR(r16)(r1) 325 stw r16, HOST_NV_GPR(R16)(r1)
327 stw r17, HOST_NV_GPR(r17)(r1) 326 stw r17, HOST_NV_GPR(R17)(r1)
328 stw r18, HOST_NV_GPR(r18)(r1) 327 stw r18, HOST_NV_GPR(R18)(r1)
329 stw r19, HOST_NV_GPR(r19)(r1) 328 stw r19, HOST_NV_GPR(R19)(r1)
330 stw r20, HOST_NV_GPR(r20)(r1) 329 stw r20, HOST_NV_GPR(R20)(r1)
331 stw r21, HOST_NV_GPR(r21)(r1) 330 stw r21, HOST_NV_GPR(R21)(r1)
332 stw r22, HOST_NV_GPR(r22)(r1) 331 stw r22, HOST_NV_GPR(R22)(r1)
333 stw r23, HOST_NV_GPR(r23)(r1) 332 stw r23, HOST_NV_GPR(R23)(r1)
334 stw r24, HOST_NV_GPR(r24)(r1) 333 stw r24, HOST_NV_GPR(R24)(r1)
335 stw r25, HOST_NV_GPR(r25)(r1) 334 stw r25, HOST_NV_GPR(R25)(r1)
336 stw r26, HOST_NV_GPR(r26)(r1) 335 stw r26, HOST_NV_GPR(R26)(r1)
337 stw r27, HOST_NV_GPR(r27)(r1) 336 stw r27, HOST_NV_GPR(R27)(r1)
338 stw r28, HOST_NV_GPR(r28)(r1) 337 stw r28, HOST_NV_GPR(R28)(r1)
339 stw r29, HOST_NV_GPR(r29)(r1) 338 stw r29, HOST_NV_GPR(R29)(r1)
340 stw r30, HOST_NV_GPR(r30)(r1) 339 stw r30, HOST_NV_GPR(R30)(r1)
341 stw r31, HOST_NV_GPR(r31)(r1) 340 stw r31, HOST_NV_GPR(R31)(r1)
342 341
343 /* Load guest non-volatiles. */ 342 /* Load guest non-volatiles. */
344 lwz r14, VCPU_GPR(r14)(r4) 343 lwz r14, VCPU_GPR(R14)(r4)
345 lwz r15, VCPU_GPR(r15)(r4) 344 lwz r15, VCPU_GPR(R15)(r4)
346 lwz r16, VCPU_GPR(r16)(r4) 345 lwz r16, VCPU_GPR(R16)(r4)
347 lwz r17, VCPU_GPR(r17)(r4) 346 lwz r17, VCPU_GPR(R17)(r4)
348 lwz r18, VCPU_GPR(r18)(r4) 347 lwz r18, VCPU_GPR(R18)(r4)
349 lwz r19, VCPU_GPR(r19)(r4) 348 lwz r19, VCPU_GPR(R19)(r4)
350 lwz r20, VCPU_GPR(r20)(r4) 349 lwz r20, VCPU_GPR(R20)(r4)
351 lwz r21, VCPU_GPR(r21)(r4) 350 lwz r21, VCPU_GPR(R21)(r4)
352 lwz r22, VCPU_GPR(r22)(r4) 351 lwz r22, VCPU_GPR(R22)(r4)
353 lwz r23, VCPU_GPR(r23)(r4) 352 lwz r23, VCPU_GPR(R23)(r4)
354 lwz r24, VCPU_GPR(r24)(r4) 353 lwz r24, VCPU_GPR(R24)(r4)
355 lwz r25, VCPU_GPR(r25)(r4) 354 lwz r25, VCPU_GPR(R25)(r4)
356 lwz r26, VCPU_GPR(r26)(r4) 355 lwz r26, VCPU_GPR(R26)(r4)
357 lwz r27, VCPU_GPR(r27)(r4) 356 lwz r27, VCPU_GPR(R27)(r4)
358 lwz r28, VCPU_GPR(r28)(r4) 357 lwz r28, VCPU_GPR(R28)(r4)
359 lwz r29, VCPU_GPR(r29)(r4) 358 lwz r29, VCPU_GPR(R29)(r4)
360 lwz r30, VCPU_GPR(r30)(r4) 359 lwz r30, VCPU_GPR(R30)(r4)
361 lwz r31, VCPU_GPR(r31)(r4) 360 lwz r31, VCPU_GPR(R31)(r4)
362 361
363#ifdef CONFIG_SPE 362#ifdef CONFIG_SPE
364 /* save host SPEFSCR and load guest SPEFSCR */ 363 /* save host SPEFSCR and load guest SPEFSCR */
@@ -386,13 +385,13 @@ lightweight_exit:
386#endif 385#endif
387 386
388 /* Load some guest volatiles. */ 387 /* Load some guest volatiles. */
389 lwz r0, VCPU_GPR(r0)(r4) 388 lwz r0, VCPU_GPR(R0)(r4)
390 lwz r2, VCPU_GPR(r2)(r4) 389 lwz r2, VCPU_GPR(R2)(r4)
391 lwz r9, VCPU_GPR(r9)(r4) 390 lwz r9, VCPU_GPR(R9)(r4)
392 lwz r10, VCPU_GPR(r10)(r4) 391 lwz r10, VCPU_GPR(R10)(r4)
393 lwz r11, VCPU_GPR(r11)(r4) 392 lwz r11, VCPU_GPR(R11)(r4)
394 lwz r12, VCPU_GPR(r12)(r4) 393 lwz r12, VCPU_GPR(R12)(r4)
395 lwz r13, VCPU_GPR(r13)(r4) 394 lwz r13, VCPU_GPR(R13)(r4)
396 lwz r3, VCPU_LR(r4) 395 lwz r3, VCPU_LR(r4)
397 mtlr r3 396 mtlr r3
398 lwz r3, VCPU_XER(r4) 397 lwz r3, VCPU_XER(r4)
@@ -411,7 +410,7 @@ lightweight_exit:
411 410
412 /* Can't switch the stack pointer until after IVPR is switched, 411 /* Can't switch the stack pointer until after IVPR is switched,
413 * because host interrupt handlers would get confused. */ 412 * because host interrupt handlers would get confused. */
414 lwz r1, VCPU_GPR(r1)(r4) 413 lwz r1, VCPU_GPR(R1)(r4)
415 414
416 /* 415 /*
417 * Host interrupt handlers may have clobbered these 416 * Host interrupt handlers may have clobbered these
@@ -449,10 +448,10 @@ lightweight_exit:
449 mtcr r5 448 mtcr r5
450 mtsrr0 r6 449 mtsrr0 r6
451 mtsrr1 r7 450 mtsrr1 r7
452 lwz r5, VCPU_GPR(r5)(r4) 451 lwz r5, VCPU_GPR(R5)(r4)
453 lwz r6, VCPU_GPR(r6)(r4) 452 lwz r6, VCPU_GPR(R6)(r4)
454 lwz r7, VCPU_GPR(r7)(r4) 453 lwz r7, VCPU_GPR(R7)(r4)
455 lwz r8, VCPU_GPR(r8)(r4) 454 lwz r8, VCPU_GPR(R8)(r4)
456 455
457 /* Clear any debug events which occurred since we disabled MSR[DE]. 456 /* Clear any debug events which occurred since we disabled MSR[DE].
458 * XXX This gives us a 3-instruction window in which a breakpoint 457 * XXX This gives us a 3-instruction window in which a breakpoint
@@ -461,8 +460,8 @@ lightweight_exit:
461 ori r3, r3, 0xffff 460 ori r3, r3, 0xffff
462 mtspr SPRN_DBSR, r3 461 mtspr SPRN_DBSR, r3
463 462
464 lwz r3, VCPU_GPR(r3)(r4) 463 lwz r3, VCPU_GPR(R3)(r4)
465 lwz r4, VCPU_GPR(r4)(r4) 464 lwz r4, VCPU_GPR(R4)(r4)
466 rfi 465 rfi
467 466
468#ifdef CONFIG_SPE 467#ifdef CONFIG_SPE
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 6048a00515d7..1685dc43bcf2 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -37,7 +37,6 @@
37 37
38#define LONGBYTES (BITS_PER_LONG / 8) 38#define LONGBYTES (BITS_PER_LONG / 8)
39 39
40#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES))
41#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) 40#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
42 41
43/* The host stack layout: */ 42/* The host stack layout: */
@@ -67,15 +66,15 @@
67 */ 66 */
68.macro kvm_handler_common intno, srr0, flags 67.macro kvm_handler_common intno, srr0, flags
69 /* Restore host stack pointer */ 68 /* Restore host stack pointer */
70 PPC_STL r1, VCPU_GPR(r1)(r4) 69 PPC_STL r1, VCPU_GPR(R1)(r4)
71 PPC_STL r2, VCPU_GPR(r2)(r4) 70 PPC_STL r2, VCPU_GPR(R2)(r4)
72 PPC_LL r1, VCPU_HOST_STACK(r4) 71 PPC_LL r1, VCPU_HOST_STACK(r4)
73 PPC_LL r2, HOST_R2(r1) 72 PPC_LL r2, HOST_R2(r1)
74 73
75 mfspr r10, SPRN_PID 74 mfspr r10, SPRN_PID
76 lwz r8, VCPU_HOST_PID(r4) 75 lwz r8, VCPU_HOST_PID(r4)
77 PPC_LL r11, VCPU_SHARED(r4) 76 PPC_LL r11, VCPU_SHARED(r4)
78 PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ 77 PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */
79 li r14, \intno 78 li r14, \intno
80 79
81 stw r10, VCPU_GUEST_PID(r4) 80 stw r10, VCPU_GUEST_PID(r4)
@@ -137,35 +136,31 @@
137 */ 136 */
138 137
139 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ 138 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
140 PPC_STL r15, VCPU_GPR(r15)(r4) 139 PPC_STL r15, VCPU_GPR(R15)(r4)
141 PPC_STL r16, VCPU_GPR(r16)(r4) 140 PPC_STL r16, VCPU_GPR(R16)(r4)
142 PPC_STL r17, VCPU_GPR(r17)(r4) 141 PPC_STL r17, VCPU_GPR(R17)(r4)
143 PPC_STL r18, VCPU_GPR(r18)(r4) 142 PPC_STL r18, VCPU_GPR(R18)(r4)
144 PPC_STL r19, VCPU_GPR(r19)(r4) 143 PPC_STL r19, VCPU_GPR(R19)(r4)
145 mr r8, r3 144 mr r8, r3
146 PPC_STL r20, VCPU_GPR(r20)(r4) 145 PPC_STL r20, VCPU_GPR(R20)(r4)
147 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS 146 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
148 PPC_STL r21, VCPU_GPR(r21)(r4) 147 PPC_STL r21, VCPU_GPR(R21)(r4)
149 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR 148 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
150 PPC_STL r22, VCPU_GPR(r22)(r4) 149 PPC_STL r22, VCPU_GPR(R22)(r4)
151 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID 150 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
152 PPC_STL r23, VCPU_GPR(r23)(r4) 151 PPC_STL r23, VCPU_GPR(R23)(r4)
153 PPC_STL r24, VCPU_GPR(r24)(r4) 152 PPC_STL r24, VCPU_GPR(R24)(r4)
154 PPC_STL r25, VCPU_GPR(r25)(r4) 153 PPC_STL r25, VCPU_GPR(R25)(r4)
155 PPC_STL r26, VCPU_GPR(r26)(r4) 154 PPC_STL r26, VCPU_GPR(R26)(r4)
156 PPC_STL r27, VCPU_GPR(r27)(r4) 155 PPC_STL r27, VCPU_GPR(R27)(r4)
157 PPC_STL r28, VCPU_GPR(r28)(r4) 156 PPC_STL r28, VCPU_GPR(R28)(r4)
158 PPC_STL r29, VCPU_GPR(r29)(r4) 157 PPC_STL r29, VCPU_GPR(R29)(r4)
159 PPC_STL r30, VCPU_GPR(r30)(r4) 158 PPC_STL r30, VCPU_GPR(R30)(r4)
160 PPC_STL r31, VCPU_GPR(r31)(r4) 159 PPC_STL r31, VCPU_GPR(R31)(r4)
161 mtspr SPRN_EPLC, r8 160 mtspr SPRN_EPLC, r8
162 161
163 /* disable preemption, so we are sure we hit the fixup handler */ 162 /* disable preemption, so we are sure we hit the fixup handler */
164#ifdef CONFIG_PPC64 163 CURRENT_THREAD_INFO(r8, r1)
165 clrrdi r8,r1,THREAD_SHIFT
166#else
167 rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
168#endif
169 li r7, 1 164 li r7, 1
170 stw r7, TI_PREEMPT(r8) 165 stw r7, TI_PREEMPT(r8)
171 166
@@ -211,24 +206,24 @@
211.macro kvm_handler intno srr0, srr1, flags 206.macro kvm_handler intno srr0, srr1, flags
212_GLOBAL(kvmppc_handler_\intno\()_\srr1) 207_GLOBAL(kvmppc_handler_\intno\()_\srr1)
213 GET_VCPU(r11, r10) 208 GET_VCPU(r11, r10)
214 PPC_STL r3, VCPU_GPR(r3)(r11) 209 PPC_STL r3, VCPU_GPR(R3)(r11)
215 mfspr r3, SPRN_SPRG_RSCRATCH0 210 mfspr r3, SPRN_SPRG_RSCRATCH0
216 PPC_STL r4, VCPU_GPR(r4)(r11) 211 PPC_STL r4, VCPU_GPR(R4)(r11)
217 PPC_LL r4, THREAD_NORMSAVE(0)(r10) 212 PPC_LL r4, THREAD_NORMSAVE(0)(r10)
218 PPC_STL r5, VCPU_GPR(r5)(r11) 213 PPC_STL r5, VCPU_GPR(R5)(r11)
219 stw r13, VCPU_CR(r11) 214 stw r13, VCPU_CR(r11)
220 mfspr r5, \srr0 215 mfspr r5, \srr0
221 PPC_STL r3, VCPU_GPR(r10)(r11) 216 PPC_STL r3, VCPU_GPR(R10)(r11)
222 PPC_LL r3, THREAD_NORMSAVE(2)(r10) 217 PPC_LL r3, THREAD_NORMSAVE(2)(r10)
223 PPC_STL r6, VCPU_GPR(r6)(r11) 218 PPC_STL r6, VCPU_GPR(R6)(r11)
224 PPC_STL r4, VCPU_GPR(r11)(r11) 219 PPC_STL r4, VCPU_GPR(R11)(r11)
225 mfspr r6, \srr1 220 mfspr r6, \srr1
226 PPC_STL r7, VCPU_GPR(r7)(r11) 221 PPC_STL r7, VCPU_GPR(R7)(r11)
227 PPC_STL r8, VCPU_GPR(r8)(r11) 222 PPC_STL r8, VCPU_GPR(R8)(r11)
228 PPC_STL r9, VCPU_GPR(r9)(r11) 223 PPC_STL r9, VCPU_GPR(R9)(r11)
229 PPC_STL r3, VCPU_GPR(r13)(r11) 224 PPC_STL r3, VCPU_GPR(R13)(r11)
230 mfctr r7 225 mfctr r7
231 PPC_STL r12, VCPU_GPR(r12)(r11) 226 PPC_STL r12, VCPU_GPR(R12)(r11)
232 PPC_STL r7, VCPU_CTR(r11) 227 PPC_STL r7, VCPU_CTR(r11)
233 mr r4, r11 228 mr r4, r11
234 kvm_handler_common \intno, \srr0, \flags 229 kvm_handler_common \intno, \srr0, \flags
@@ -238,25 +233,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
238_GLOBAL(kvmppc_handler_\intno\()_\srr1) 233_GLOBAL(kvmppc_handler_\intno\()_\srr1)
239 mfspr r10, SPRN_SPRG_THREAD 234 mfspr r10, SPRN_SPRG_THREAD
240 GET_VCPU(r11, r10) 235 GET_VCPU(r11, r10)
241 PPC_STL r3, VCPU_GPR(r3)(r11) 236 PPC_STL r3, VCPU_GPR(R3)(r11)
242 mfspr r3, \scratch 237 mfspr r3, \scratch
243 PPC_STL r4, VCPU_GPR(r4)(r11) 238 PPC_STL r4, VCPU_GPR(R4)(r11)
244 PPC_LL r4, GPR9(r8) 239 PPC_LL r4, GPR9(r8)
245 PPC_STL r5, VCPU_GPR(r5)(r11) 240 PPC_STL r5, VCPU_GPR(R5)(r11)
246 stw r9, VCPU_CR(r11) 241 stw r9, VCPU_CR(r11)
247 mfspr r5, \srr0 242 mfspr r5, \srr0
248 PPC_STL r3, VCPU_GPR(r8)(r11) 243 PPC_STL r3, VCPU_GPR(R8)(r11)
249 PPC_LL r3, GPR10(r8) 244 PPC_LL r3, GPR10(r8)
250 PPC_STL r6, VCPU_GPR(r6)(r11) 245 PPC_STL r6, VCPU_GPR(R6)(r11)
251 PPC_STL r4, VCPU_GPR(r9)(r11) 246 PPC_STL r4, VCPU_GPR(R9)(r11)
252 mfspr r6, \srr1 247 mfspr r6, \srr1
253 PPC_LL r4, GPR11(r8) 248 PPC_LL r4, GPR11(r8)
254 PPC_STL r7, VCPU_GPR(r7)(r11) 249 PPC_STL r7, VCPU_GPR(R7)(r11)
255 PPC_STL r3, VCPU_GPR(r10)(r11) 250 PPC_STL r3, VCPU_GPR(R10)(r11)
256 mfctr r7 251 mfctr r7
257 PPC_STL r12, VCPU_GPR(r12)(r11) 252 PPC_STL r12, VCPU_GPR(R12)(r11)
258 PPC_STL r13, VCPU_GPR(r13)(r11) 253 PPC_STL r13, VCPU_GPR(R13)(r11)
259 PPC_STL r4, VCPU_GPR(r11)(r11) 254 PPC_STL r4, VCPU_GPR(R11)(r11)
260 PPC_STL r7, VCPU_CTR(r11) 255 PPC_STL r7, VCPU_CTR(r11)
261 mr r4, r11 256 mr r4, r11
262 kvm_handler_common \intno, \srr0, \flags 257 kvm_handler_common \intno, \srr0, \flags
@@ -310,7 +305,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
310_GLOBAL(kvmppc_resume_host) 305_GLOBAL(kvmppc_resume_host)
311 /* Save remaining volatile guest register state to vcpu. */ 306 /* Save remaining volatile guest register state to vcpu. */
312 mfspr r3, SPRN_VRSAVE 307 mfspr r3, SPRN_VRSAVE
313 PPC_STL r0, VCPU_GPR(r0)(r4) 308 PPC_STL r0, VCPU_GPR(R0)(r4)
314 mflr r5 309 mflr r5
315 mfspr r6, SPRN_SPRG4 310 mfspr r6, SPRN_SPRG4
316 PPC_STL r5, VCPU_LR(r4) 311 PPC_STL r5, VCPU_LR(r4)
@@ -358,27 +353,27 @@ _GLOBAL(kvmppc_resume_host)
358 353
359 /* Restore vcpu pointer and the nonvolatiles we used. */ 354 /* Restore vcpu pointer and the nonvolatiles we used. */
360 mr r4, r14 355 mr r4, r14
361 PPC_LL r14, VCPU_GPR(r14)(r4) 356 PPC_LL r14, VCPU_GPR(R14)(r4)
362 357
363 andi. r5, r3, RESUME_FLAG_NV 358 andi. r5, r3, RESUME_FLAG_NV
364 beq skip_nv_load 359 beq skip_nv_load
365 PPC_LL r15, VCPU_GPR(r15)(r4) 360 PPC_LL r15, VCPU_GPR(R15)(r4)
366 PPC_LL r16, VCPU_GPR(r16)(r4) 361 PPC_LL r16, VCPU_GPR(R16)(r4)
367 PPC_LL r17, VCPU_GPR(r17)(r4) 362 PPC_LL r17, VCPU_GPR(R17)(r4)
368 PPC_LL r18, VCPU_GPR(r18)(r4) 363 PPC_LL r18, VCPU_GPR(R18)(r4)
369 PPC_LL r19, VCPU_GPR(r19)(r4) 364 PPC_LL r19, VCPU_GPR(R19)(r4)
370 PPC_LL r20, VCPU_GPR(r20)(r4) 365 PPC_LL r20, VCPU_GPR(R20)(r4)
371 PPC_LL r21, VCPU_GPR(r21)(r4) 366 PPC_LL r21, VCPU_GPR(R21)(r4)
372 PPC_LL r22, VCPU_GPR(r22)(r4) 367 PPC_LL r22, VCPU_GPR(R22)(r4)
373 PPC_LL r23, VCPU_GPR(r23)(r4) 368 PPC_LL r23, VCPU_GPR(R23)(r4)
374 PPC_LL r24, VCPU_GPR(r24)(r4) 369 PPC_LL r24, VCPU_GPR(R24)(r4)
375 PPC_LL r25, VCPU_GPR(r25)(r4) 370 PPC_LL r25, VCPU_GPR(R25)(r4)
376 PPC_LL r26, VCPU_GPR(r26)(r4) 371 PPC_LL r26, VCPU_GPR(R26)(r4)
377 PPC_LL r27, VCPU_GPR(r27)(r4) 372 PPC_LL r27, VCPU_GPR(R27)(r4)
378 PPC_LL r28, VCPU_GPR(r28)(r4) 373 PPC_LL r28, VCPU_GPR(R28)(r4)
379 PPC_LL r29, VCPU_GPR(r29)(r4) 374 PPC_LL r29, VCPU_GPR(R29)(r4)
380 PPC_LL r30, VCPU_GPR(r30)(r4) 375 PPC_LL r30, VCPU_GPR(R30)(r4)
381 PPC_LL r31, VCPU_GPR(r31)(r4) 376 PPC_LL r31, VCPU_GPR(R31)(r4)
382skip_nv_load: 377skip_nv_load:
383 /* Should we return to the guest? */ 378 /* Should we return to the guest? */
384 andi. r5, r3, RESUME_FLAG_HOST 379 andi. r5, r3, RESUME_FLAG_HOST
@@ -396,23 +391,23 @@ heavyweight_exit:
396 * non-volatiles. 391 * non-volatiles.
397 */ 392 */
398 393
399 PPC_STL r15, VCPU_GPR(r15)(r4) 394 PPC_STL r15, VCPU_GPR(R15)(r4)
400 PPC_STL r16, VCPU_GPR(r16)(r4) 395 PPC_STL r16, VCPU_GPR(R16)(r4)
401 PPC_STL r17, VCPU_GPR(r17)(r4) 396 PPC_STL r17, VCPU_GPR(R17)(r4)
402 PPC_STL r18, VCPU_GPR(r18)(r4) 397 PPC_STL r18, VCPU_GPR(R18)(r4)
403 PPC_STL r19, VCPU_GPR(r19)(r4) 398 PPC_STL r19, VCPU_GPR(R19)(r4)
404 PPC_STL r20, VCPU_GPR(r20)(r4) 399 PPC_STL r20, VCPU_GPR(R20)(r4)
405 PPC_STL r21, VCPU_GPR(r21)(r4) 400 PPC_STL r21, VCPU_GPR(R21)(r4)
406 PPC_STL r22, VCPU_GPR(r22)(r4) 401 PPC_STL r22, VCPU_GPR(R22)(r4)
407 PPC_STL r23, VCPU_GPR(r23)(r4) 402 PPC_STL r23, VCPU_GPR(R23)(r4)
408 PPC_STL r24, VCPU_GPR(r24)(r4) 403 PPC_STL r24, VCPU_GPR(R24)(r4)
409 PPC_STL r25, VCPU_GPR(r25)(r4) 404 PPC_STL r25, VCPU_GPR(R25)(r4)
410 PPC_STL r26, VCPU_GPR(r26)(r4) 405 PPC_STL r26, VCPU_GPR(R26)(r4)
411 PPC_STL r27, VCPU_GPR(r27)(r4) 406 PPC_STL r27, VCPU_GPR(R27)(r4)
412 PPC_STL r28, VCPU_GPR(r28)(r4) 407 PPC_STL r28, VCPU_GPR(R28)(r4)
413 PPC_STL r29, VCPU_GPR(r29)(r4) 408 PPC_STL r29, VCPU_GPR(R29)(r4)
414 PPC_STL r30, VCPU_GPR(r30)(r4) 409 PPC_STL r30, VCPU_GPR(R30)(r4)
415 PPC_STL r31, VCPU_GPR(r31)(r4) 410 PPC_STL r31, VCPU_GPR(R31)(r4)
416 411
417 /* Load host non-volatile register state from host stack. */ 412 /* Load host non-volatile register state from host stack. */
418 PPC_LL r14, HOST_NV_GPR(r14)(r1) 413 PPC_LL r14, HOST_NV_GPR(r14)(r1)
@@ -478,24 +473,24 @@ _GLOBAL(__kvmppc_vcpu_run)
478 PPC_STL r31, HOST_NV_GPR(r31)(r1) 473 PPC_STL r31, HOST_NV_GPR(r31)(r1)
479 474
480 /* Load guest non-volatiles. */ 475 /* Load guest non-volatiles. */
481 PPC_LL r14, VCPU_GPR(r14)(r4) 476 PPC_LL r14, VCPU_GPR(R14)(r4)
482 PPC_LL r15, VCPU_GPR(r15)(r4) 477 PPC_LL r15, VCPU_GPR(R15)(r4)
483 PPC_LL r16, VCPU_GPR(r16)(r4) 478 PPC_LL r16, VCPU_GPR(R16)(r4)
484 PPC_LL r17, VCPU_GPR(r17)(r4) 479 PPC_LL r17, VCPU_GPR(R17)(r4)
485 PPC_LL r18, VCPU_GPR(r18)(r4) 480 PPC_LL r18, VCPU_GPR(R18)(r4)
486 PPC_LL r19, VCPU_GPR(r19)(r4) 481 PPC_LL r19, VCPU_GPR(R19)(r4)
487 PPC_LL r20, VCPU_GPR(r20)(r4) 482 PPC_LL r20, VCPU_GPR(R20)(r4)
488 PPC_LL r21, VCPU_GPR(r21)(r4) 483 PPC_LL r21, VCPU_GPR(R21)(r4)
489 PPC_LL r22, VCPU_GPR(r22)(r4) 484 PPC_LL r22, VCPU_GPR(R22)(r4)
490 PPC_LL r23, VCPU_GPR(r23)(r4) 485 PPC_LL r23, VCPU_GPR(R23)(r4)
491 PPC_LL r24, VCPU_GPR(r24)(r4) 486 PPC_LL r24, VCPU_GPR(R24)(r4)
492 PPC_LL r25, VCPU_GPR(r25)(r4) 487 PPC_LL r25, VCPU_GPR(R25)(r4)
493 PPC_LL r26, VCPU_GPR(r26)(r4) 488 PPC_LL r26, VCPU_GPR(R26)(r4)
494 PPC_LL r27, VCPU_GPR(r27)(r4) 489 PPC_LL r27, VCPU_GPR(R27)(r4)
495 PPC_LL r28, VCPU_GPR(r28)(r4) 490 PPC_LL r28, VCPU_GPR(R28)(r4)
496 PPC_LL r29, VCPU_GPR(r29)(r4) 491 PPC_LL r29, VCPU_GPR(R29)(r4)
497 PPC_LL r30, VCPU_GPR(r30)(r4) 492 PPC_LL r30, VCPU_GPR(R30)(r4)
498 PPC_LL r31, VCPU_GPR(r31)(r4) 493 PPC_LL r31, VCPU_GPR(R31)(r4)
499 494
500 495
501lightweight_exit: 496lightweight_exit:
@@ -554,13 +549,13 @@ lightweight_exit:
554 lwz r7, VCPU_CR(r4) 549 lwz r7, VCPU_CR(r4)
555 PPC_LL r8, VCPU_PC(r4) 550 PPC_LL r8, VCPU_PC(r4)
556 PPC_LD(r9, VCPU_SHARED_MSR, r11) 551 PPC_LD(r9, VCPU_SHARED_MSR, r11)
557 PPC_LL r0, VCPU_GPR(r0)(r4) 552 PPC_LL r0, VCPU_GPR(R0)(r4)
558 PPC_LL r1, VCPU_GPR(r1)(r4) 553 PPC_LL r1, VCPU_GPR(R1)(r4)
559 PPC_LL r2, VCPU_GPR(r2)(r4) 554 PPC_LL r2, VCPU_GPR(R2)(r4)
560 PPC_LL r10, VCPU_GPR(r10)(r4) 555 PPC_LL r10, VCPU_GPR(R10)(r4)
561 PPC_LL r11, VCPU_GPR(r11)(r4) 556 PPC_LL r11, VCPU_GPR(R11)(r4)
562 PPC_LL r12, VCPU_GPR(r12)(r4) 557 PPC_LL r12, VCPU_GPR(R12)(r4)
563 PPC_LL r13, VCPU_GPR(r13)(r4) 558 PPC_LL r13, VCPU_GPR(R13)(r4)
564 mtlr r3 559 mtlr r3
565 mtxer r5 560 mtxer r5
566 mtctr r6 561 mtctr r6
@@ -586,12 +581,12 @@ lightweight_exit:
586 mtcr r7 581 mtcr r7
587 582
588 /* Finish loading guest volatiles and jump to guest. */ 583 /* Finish loading guest volatiles and jump to guest. */
589 PPC_LL r5, VCPU_GPR(r5)(r4) 584 PPC_LL r5, VCPU_GPR(R5)(r4)
590 PPC_LL r6, VCPU_GPR(r6)(r4) 585 PPC_LL r6, VCPU_GPR(R6)(r4)
591 PPC_LL r7, VCPU_GPR(r7)(r4) 586 PPC_LL r7, VCPU_GPR(R7)(r4)
592 PPC_LL r8, VCPU_GPR(r8)(r4) 587 PPC_LL r8, VCPU_GPR(R8)(r4)
593 PPC_LL r9, VCPU_GPR(r9)(r4) 588 PPC_LL r9, VCPU_GPR(R9)(r4)
594 589
595 PPC_LL r3, VCPU_GPR(r3)(r4) 590 PPC_LL r3, VCPU_GPR(R3)(r4)
596 PPC_LL r4, VCPU_GPR(r4)(r4) 591 PPC_LL r4, VCPU_GPR(R4)(r4)
597 rfi 592 rfi