diff options
25 files changed, 659 insertions, 659 deletions
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S index ebc62f42a237..95675a7181dc 100644 --- a/arch/powerpc/kernel/cpu_setup_a2.S +++ b/arch/powerpc/kernel/cpu_setup_a2.S | |||
@@ -100,19 +100,19 @@ _icswx_skip_guest: | |||
100 | lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h | 100 | lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h |
101 | mtspr SPRN_MMUCR0, r4 | 101 | mtspr SPRN_MMUCR0, r4 |
102 | li r4,A2_IERAT_SIZE-1 | 102 | li r4,A2_IERAT_SIZE-1 |
103 | PPC_ERATWE(r4,r4,3) | 103 | PPC_ERATWE(R4,R4,3) |
104 | 104 | ||
105 | /* Now set the D-ERAT watermark to 31 */ | 105 | /* Now set the D-ERAT watermark to 31 */ |
106 | lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h | 106 | lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h |
107 | mtspr SPRN_MMUCR0, r4 | 107 | mtspr SPRN_MMUCR0, r4 |
108 | li r4,A2_DERAT_SIZE-1 | 108 | li r4,A2_DERAT_SIZE-1 |
109 | PPC_ERATWE(r4,r4,3) | 109 | PPC_ERATWE(R4,R4,3) |
110 | 110 | ||
111 | /* And invalidate the beast just in case. That won't get rid of | 111 | /* And invalidate the beast just in case. That won't get rid of |
112 | * a bolted entry though it will be in LRU and so will go away eventually | 112 | * a bolted entry though it will be in LRU and so will go away eventually |
113 | * but let's not bother for now | 113 | * but let's not bother for now |
114 | */ | 114 | */ |
115 | PPC_ERATILX(0,0,0) | 115 | PPC_ERATILX(0,R0,R0) |
116 | 1: | 116 | 1: |
117 | blr | 117 | blr |
118 | 118 | ||
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index de369558bf0a..71c1c73bc65f 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -106,7 +106,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
106 | #endif | 106 | #endif |
107 | lfd fr0,THREAD_FPSCR(r5) | 107 | lfd fr0,THREAD_FPSCR(r5) |
108 | MTFSF_L(fr0) | 108 | MTFSF_L(fr0) |
109 | REST_32FPVSRS(0, r4, r5) | 109 | REST_32FPVSRS(0, R4, R5) |
110 | #ifndef CONFIG_SMP | 110 | #ifndef CONFIG_SMP |
111 | subi r4,r5,THREAD | 111 | subi r4,r5,THREAD |
112 | fromreal(r4) | 112 | fromreal(r4) |
@@ -140,7 +140,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
140 | addi r3,r3,THREAD /* want THREAD of task */ | 140 | addi r3,r3,THREAD /* want THREAD of task */ |
141 | PPC_LL r5,PT_REGS(r3) | 141 | PPC_LL r5,PT_REGS(r3) |
142 | PPC_LCMPI 0,r5,0 | 142 | PPC_LCMPI 0,r5,0 |
143 | SAVE_32FPVSRS(0, r4 ,r3) | 143 | SAVE_32FPVSRS(0, R4 ,R3) |
144 | mffs fr0 | 144 | mffs fr0 |
145 | stfd fr0,THREAD_FPSCR(r3) | 145 | stfd fr0,THREAD_FPSCR(r3) |
146 | beq 1f | 146 | beq 1f |
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 62bdf2389669..02c167db6ba0 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c | |||
@@ -302,7 +302,7 @@ static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) | |||
302 | 302 | ||
303 | if (imm_one) { | 303 | if (imm_one) { |
304 | p[kvm_emulate_wrtee_reg_offs] = | 304 | p[kvm_emulate_wrtee_reg_offs] = |
305 | KVM_INST_LI | __PPC_RT(30) | MSR_EE; | 305 | KVM_INST_LI | __PPC_RT(R30) | MSR_EE; |
306 | } else { | 306 | } else { |
307 | /* Make clobbered registers work too */ | 307 | /* Make clobbered registers work too */ |
308 | switch (get_rt(rt)) { | 308 | switch (get_rt(rt)) { |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 616921ef1439..6ba08bc91b21 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -314,7 +314,7 @@ _GLOBAL(real_205_readb) | |||
314 | mtmsrd r0 | 314 | mtmsrd r0 |
315 | sync | 315 | sync |
316 | isync | 316 | isync |
317 | LBZCIX(r3,0,r3) | 317 | LBZCIX(R3,0,R3) |
318 | isync | 318 | isync |
319 | mtmsrd r7 | 319 | mtmsrd r7 |
320 | sync | 320 | sync |
@@ -329,7 +329,7 @@ _GLOBAL(real_205_writeb) | |||
329 | mtmsrd r0 | 329 | mtmsrd r0 |
330 | sync | 330 | sync |
331 | isync | 331 | isync |
332 | STBCIX(r3,0,r4) | 332 | STBCIX(R3,0,R4) |
333 | isync | 333 | isync |
334 | mtmsrd r7 | 334 | mtmsrd r7 |
335 | sync | 335 | sync |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a1044f43becd..bc99015030c3 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
206 | /* Load up FP, VMX and VSX registers */ | 206 | /* Load up FP, VMX and VSX registers */ |
207 | bl kvmppc_load_fp | 207 | bl kvmppc_load_fp |
208 | 208 | ||
209 | ld r14, VCPU_GPR(r14)(r4) | 209 | ld r14, VCPU_GPR(R14)(r4) |
210 | ld r15, VCPU_GPR(r15)(r4) | 210 | ld r15, VCPU_GPR(R15)(r4) |
211 | ld r16, VCPU_GPR(r16)(r4) | 211 | ld r16, VCPU_GPR(R16)(r4) |
212 | ld r17, VCPU_GPR(r17)(r4) | 212 | ld r17, VCPU_GPR(R17)(r4) |
213 | ld r18, VCPU_GPR(r18)(r4) | 213 | ld r18, VCPU_GPR(R18)(r4) |
214 | ld r19, VCPU_GPR(r19)(r4) | 214 | ld r19, VCPU_GPR(R19)(r4) |
215 | ld r20, VCPU_GPR(r20)(r4) | 215 | ld r20, VCPU_GPR(R20)(r4) |
216 | ld r21, VCPU_GPR(r21)(r4) | 216 | ld r21, VCPU_GPR(R21)(r4) |
217 | ld r22, VCPU_GPR(r22)(r4) | 217 | ld r22, VCPU_GPR(R22)(r4) |
218 | ld r23, VCPU_GPR(r23)(r4) | 218 | ld r23, VCPU_GPR(R23)(r4) |
219 | ld r24, VCPU_GPR(r24)(r4) | 219 | ld r24, VCPU_GPR(R24)(r4) |
220 | ld r25, VCPU_GPR(r25)(r4) | 220 | ld r25, VCPU_GPR(R25)(r4) |
221 | ld r26, VCPU_GPR(r26)(r4) | 221 | ld r26, VCPU_GPR(R26)(r4) |
222 | ld r27, VCPU_GPR(r27)(r4) | 222 | ld r27, VCPU_GPR(R27)(r4) |
223 | ld r28, VCPU_GPR(r28)(r4) | 223 | ld r28, VCPU_GPR(R28)(r4) |
224 | ld r29, VCPU_GPR(r29)(r4) | 224 | ld r29, VCPU_GPR(R29)(r4) |
225 | ld r30, VCPU_GPR(r30)(r4) | 225 | ld r30, VCPU_GPR(R30)(r4) |
226 | ld r31, VCPU_GPR(r31)(r4) | 226 | ld r31, VCPU_GPR(R31)(r4) |
227 | 227 | ||
228 | BEGIN_FTR_SECTION | 228 | BEGIN_FTR_SECTION |
229 | /* Switch DSCR to guest value */ | 229 | /* Switch DSCR to guest value */ |
@@ -547,21 +547,21 @@ fast_guest_return: | |||
547 | mtlr r5 | 547 | mtlr r5 |
548 | mtcr r6 | 548 | mtcr r6 |
549 | 549 | ||
550 | ld r0, VCPU_GPR(r0)(r4) | 550 | ld r0, VCPU_GPR(R0)(r4) |
551 | ld r1, VCPU_GPR(r1)(r4) | 551 | ld r1, VCPU_GPR(R1)(r4) |
552 | ld r2, VCPU_GPR(r2)(r4) | 552 | ld r2, VCPU_GPR(R2)(r4) |
553 | ld r3, VCPU_GPR(r3)(r4) | 553 | ld r3, VCPU_GPR(R3)(r4) |
554 | ld r5, VCPU_GPR(r5)(r4) | 554 | ld r5, VCPU_GPR(R5)(r4) |
555 | ld r6, VCPU_GPR(r6)(r4) | 555 | ld r6, VCPU_GPR(R6)(r4) |
556 | ld r7, VCPU_GPR(r7)(r4) | 556 | ld r7, VCPU_GPR(R7)(r4) |
557 | ld r8, VCPU_GPR(r8)(r4) | 557 | ld r8, VCPU_GPR(R8)(r4) |
558 | ld r9, VCPU_GPR(r9)(r4) | 558 | ld r9, VCPU_GPR(R9)(r4) |
559 | ld r10, VCPU_GPR(r10)(r4) | 559 | ld r10, VCPU_GPR(R10)(r4) |
560 | ld r11, VCPU_GPR(r11)(r4) | 560 | ld r11, VCPU_GPR(R11)(r4) |
561 | ld r12, VCPU_GPR(r12)(r4) | 561 | ld r12, VCPU_GPR(R12)(r4) |
562 | ld r13, VCPU_GPR(r13)(r4) | 562 | ld r13, VCPU_GPR(R13)(r4) |
563 | 563 | ||
564 | ld r4, VCPU_GPR(r4)(r4) | 564 | ld r4, VCPU_GPR(R4)(r4) |
565 | 565 | ||
566 | hrfid | 566 | hrfid |
567 | b . | 567 | b . |
@@ -590,22 +590,22 @@ kvmppc_interrupt: | |||
590 | 590 | ||
591 | /* Save registers */ | 591 | /* Save registers */ |
592 | 592 | ||
593 | std r0, VCPU_GPR(r0)(r9) | 593 | std r0, VCPU_GPR(R0)(r9) |
594 | std r1, VCPU_GPR(r1)(r9) | 594 | std r1, VCPU_GPR(R1)(r9) |
595 | std r2, VCPU_GPR(r2)(r9) | 595 | std r2, VCPU_GPR(R2)(r9) |
596 | std r3, VCPU_GPR(r3)(r9) | 596 | std r3, VCPU_GPR(R3)(r9) |
597 | std r4, VCPU_GPR(r4)(r9) | 597 | std r4, VCPU_GPR(R4)(r9) |
598 | std r5, VCPU_GPR(r5)(r9) | 598 | std r5, VCPU_GPR(R5)(r9) |
599 | std r6, VCPU_GPR(r6)(r9) | 599 | std r6, VCPU_GPR(R6)(r9) |
600 | std r7, VCPU_GPR(r7)(r9) | 600 | std r7, VCPU_GPR(R7)(r9) |
601 | std r8, VCPU_GPR(r8)(r9) | 601 | std r8, VCPU_GPR(R8)(r9) |
602 | ld r0, HSTATE_HOST_R2(r13) | 602 | ld r0, HSTATE_HOST_R2(r13) |
603 | std r0, VCPU_GPR(r9)(r9) | 603 | std r0, VCPU_GPR(R9)(r9) |
604 | std r10, VCPU_GPR(r10)(r9) | 604 | std r10, VCPU_GPR(R10)(r9) |
605 | std r11, VCPU_GPR(r11)(r9) | 605 | std r11, VCPU_GPR(R11)(r9) |
606 | ld r3, HSTATE_SCRATCH0(r13) | 606 | ld r3, HSTATE_SCRATCH0(r13) |
607 | lwz r4, HSTATE_SCRATCH1(r13) | 607 | lwz r4, HSTATE_SCRATCH1(r13) |
608 | std r3, VCPU_GPR(r12)(r9) | 608 | std r3, VCPU_GPR(R12)(r9) |
609 | stw r4, VCPU_CR(r9) | 609 | stw r4, VCPU_CR(r9) |
610 | 610 | ||
611 | /* Restore R1/R2 so we can handle faults */ | 611 | /* Restore R1/R2 so we can handle faults */ |
@@ -626,7 +626,7 @@ kvmppc_interrupt: | |||
626 | 626 | ||
627 | GET_SCRATCH0(r3) | 627 | GET_SCRATCH0(r3) |
628 | mflr r4 | 628 | mflr r4 |
629 | std r3, VCPU_GPR(r13)(r9) | 629 | std r3, VCPU_GPR(R13)(r9) |
630 | std r4, VCPU_LR(r9) | 630 | std r4, VCPU_LR(r9) |
631 | 631 | ||
632 | /* Unset guest mode */ | 632 | /* Unset guest mode */ |
@@ -968,24 +968,24 @@ BEGIN_FTR_SECTION | |||
968 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 968 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
969 | 969 | ||
970 | /* Save non-volatile GPRs */ | 970 | /* Save non-volatile GPRs */ |
971 | std r14, VCPU_GPR(r14)(r9) | 971 | std r14, VCPU_GPR(R14)(r9) |
972 | std r15, VCPU_GPR(r15)(r9) | 972 | std r15, VCPU_GPR(R15)(r9) |
973 | std r16, VCPU_GPR(r16)(r9) | 973 | std r16, VCPU_GPR(R16)(r9) |
974 | std r17, VCPU_GPR(r17)(r9) | 974 | std r17, VCPU_GPR(R17)(r9) |
975 | std r18, VCPU_GPR(r18)(r9) | 975 | std r18, VCPU_GPR(R18)(r9) |
976 | std r19, VCPU_GPR(r19)(r9) | 976 | std r19, VCPU_GPR(R19)(r9) |
977 | std r20, VCPU_GPR(r20)(r9) | 977 | std r20, VCPU_GPR(R20)(r9) |
978 | std r21, VCPU_GPR(r21)(r9) | 978 | std r21, VCPU_GPR(R21)(r9) |
979 | std r22, VCPU_GPR(r22)(r9) | 979 | std r22, VCPU_GPR(R22)(r9) |
980 | std r23, VCPU_GPR(r23)(r9) | 980 | std r23, VCPU_GPR(R23)(r9) |
981 | std r24, VCPU_GPR(r24)(r9) | 981 | std r24, VCPU_GPR(R24)(r9) |
982 | std r25, VCPU_GPR(r25)(r9) | 982 | std r25, VCPU_GPR(R25)(r9) |
983 | std r26, VCPU_GPR(r26)(r9) | 983 | std r26, VCPU_GPR(R26)(r9) |
984 | std r27, VCPU_GPR(r27)(r9) | 984 | std r27, VCPU_GPR(R27)(r9) |
985 | std r28, VCPU_GPR(r28)(r9) | 985 | std r28, VCPU_GPR(R28)(r9) |
986 | std r29, VCPU_GPR(r29)(r9) | 986 | std r29, VCPU_GPR(R29)(r9) |
987 | std r30, VCPU_GPR(r30)(r9) | 987 | std r30, VCPU_GPR(R30)(r9) |
988 | std r31, VCPU_GPR(r31)(r9) | 988 | std r31, VCPU_GPR(R31)(r9) |
989 | 989 | ||
990 | /* Save SPRGs */ | 990 | /* Save SPRGs */ |
991 | mfspr r3, SPRN_SPRG0 | 991 | mfspr r3, SPRN_SPRG0 |
@@ -1160,7 +1160,7 @@ kvmppc_hdsi: | |||
1160 | andi. r0, r11, MSR_DR /* data relocation enabled? */ | 1160 | andi. r0, r11, MSR_DR /* data relocation enabled? */ |
1161 | beq 3f | 1161 | beq 3f |
1162 | clrrdi r0, r4, 28 | 1162 | clrrdi r0, r4, 28 |
1163 | PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ | 1163 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
1164 | bne 1f /* if no SLB entry found */ | 1164 | bne 1f /* if no SLB entry found */ |
1165 | 4: std r4, VCPU_FAULT_DAR(r9) | 1165 | 4: std r4, VCPU_FAULT_DAR(r9) |
1166 | stw r6, VCPU_FAULT_DSISR(r9) | 1166 | stw r6, VCPU_FAULT_DSISR(r9) |
@@ -1234,7 +1234,7 @@ kvmppc_hisi: | |||
1234 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ | 1234 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ |
1235 | beq 3f | 1235 | beq 3f |
1236 | clrrdi r0, r10, 28 | 1236 | clrrdi r0, r10, 28 |
1237 | PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ | 1237 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
1238 | bne 1f /* if no SLB entry found */ | 1238 | bne 1f /* if no SLB entry found */ |
1239 | 4: | 1239 | 4: |
1240 | /* Search the hash table. */ | 1240 | /* Search the hash table. */ |
@@ -1278,7 +1278,7 @@ kvmppc_hisi: | |||
1278 | */ | 1278 | */ |
1279 | .globl hcall_try_real_mode | 1279 | .globl hcall_try_real_mode |
1280 | hcall_try_real_mode: | 1280 | hcall_try_real_mode: |
1281 | ld r3,VCPU_GPR(r3)(r9) | 1281 | ld r3,VCPU_GPR(R3)(r9) |
1282 | andi. r0,r11,MSR_PR | 1282 | andi. r0,r11,MSR_PR |
1283 | bne hcall_real_cont | 1283 | bne hcall_real_cont |
1284 | clrrdi r3,r3,2 | 1284 | clrrdi r3,r3,2 |
@@ -1291,12 +1291,12 @@ hcall_try_real_mode: | |||
1291 | add r3,r3,r4 | 1291 | add r3,r3,r4 |
1292 | mtctr r3 | 1292 | mtctr r3 |
1293 | mr r3,r9 /* get vcpu pointer */ | 1293 | mr r3,r9 /* get vcpu pointer */ |
1294 | ld r4,VCPU_GPR(r4)(r9) | 1294 | ld r4,VCPU_GPR(R4)(r9) |
1295 | bctrl | 1295 | bctrl |
1296 | cmpdi r3,H_TOO_HARD | 1296 | cmpdi r3,H_TOO_HARD |
1297 | beq hcall_real_fallback | 1297 | beq hcall_real_fallback |
1298 | ld r4,HSTATE_KVM_VCPU(r13) | 1298 | ld r4,HSTATE_KVM_VCPU(r13) |
1299 | std r3,VCPU_GPR(r3)(r4) | 1299 | std r3,VCPU_GPR(R3)(r4) |
1300 | ld r10,VCPU_PC(r4) | 1300 | ld r10,VCPU_PC(r4) |
1301 | ld r11,VCPU_MSR(r4) | 1301 | ld r11,VCPU_MSR(r4) |
1302 | b fast_guest_return | 1302 | b fast_guest_return |
@@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede) | |||
1424 | li r0,0 /* set trap to 0 to say hcall is handled */ | 1424 | li r0,0 /* set trap to 0 to say hcall is handled */ |
1425 | stw r0,VCPU_TRAP(r3) | 1425 | stw r0,VCPU_TRAP(r3) |
1426 | li r0,H_SUCCESS | 1426 | li r0,H_SUCCESS |
1427 | std r0,VCPU_GPR(r3)(r3) | 1427 | std r0,VCPU_GPR(R3)(r3) |
1428 | BEGIN_FTR_SECTION | 1428 | BEGIN_FTR_SECTION |
1429 | b 2f /* just send it up to host on 970 */ | 1429 | b 2f /* just send it up to host on 970 */ |
1430 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | 1430 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) |
@@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1443 | addi r6,r5,VCORE_NAPPING_THREADS | 1443 | addi r6,r5,VCORE_NAPPING_THREADS |
1444 | 31: lwarx r4,0,r6 | 1444 | 31: lwarx r4,0,r6 |
1445 | or r4,r4,r0 | 1445 | or r4,r4,r0 |
1446 | PPC_POPCNTW(r7,r4) | 1446 | PPC_POPCNTW(R7,R4) |
1447 | cmpw r7,r8 | 1447 | cmpw r7,r8 |
1448 | bge 2f | 1448 | bge 2f |
1449 | stwcx. r4,0,r6 | 1449 | stwcx. r4,0,r6 |
@@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1464 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. | 1464 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. |
1465 | */ | 1465 | */ |
1466 | /* Save non-volatile GPRs */ | 1466 | /* Save non-volatile GPRs */ |
1467 | std r14, VCPU_GPR(r14)(r3) | 1467 | std r14, VCPU_GPR(R14)(r3) |
1468 | std r15, VCPU_GPR(r15)(r3) | 1468 | std r15, VCPU_GPR(R15)(r3) |
1469 | std r16, VCPU_GPR(r16)(r3) | 1469 | std r16, VCPU_GPR(R16)(r3) |
1470 | std r17, VCPU_GPR(r17)(r3) | 1470 | std r17, VCPU_GPR(R17)(r3) |
1471 | std r18, VCPU_GPR(r18)(r3) | 1471 | std r18, VCPU_GPR(R18)(r3) |
1472 | std r19, VCPU_GPR(r19)(r3) | 1472 | std r19, VCPU_GPR(R19)(r3) |
1473 | std r20, VCPU_GPR(r20)(r3) | 1473 | std r20, VCPU_GPR(R20)(r3) |
1474 | std r21, VCPU_GPR(r21)(r3) | 1474 | std r21, VCPU_GPR(R21)(r3) |
1475 | std r22, VCPU_GPR(r22)(r3) | 1475 | std r22, VCPU_GPR(R22)(r3) |
1476 | std r23, VCPU_GPR(r23)(r3) | 1476 | std r23, VCPU_GPR(R23)(r3) |
1477 | std r24, VCPU_GPR(r24)(r3) | 1477 | std r24, VCPU_GPR(R24)(r3) |
1478 | std r25, VCPU_GPR(r25)(r3) | 1478 | std r25, VCPU_GPR(R25)(r3) |
1479 | std r26, VCPU_GPR(r26)(r3) | 1479 | std r26, VCPU_GPR(R26)(r3) |
1480 | std r27, VCPU_GPR(r27)(r3) | 1480 | std r27, VCPU_GPR(R27)(r3) |
1481 | std r28, VCPU_GPR(r28)(r3) | 1481 | std r28, VCPU_GPR(R28)(r3) |
1482 | std r29, VCPU_GPR(r29)(r3) | 1482 | std r29, VCPU_GPR(R29)(r3) |
1483 | std r30, VCPU_GPR(r30)(r3) | 1483 | std r30, VCPU_GPR(R30)(r3) |
1484 | std r31, VCPU_GPR(r31)(r3) | 1484 | std r31, VCPU_GPR(R31)(r3) |
1485 | 1485 | ||
1486 | /* save FP state */ | 1486 | /* save FP state */ |
1487 | bl .kvmppc_save_fp | 1487 | bl .kvmppc_save_fp |
@@ -1513,24 +1513,24 @@ kvm_end_cede: | |||
1513 | bl kvmppc_load_fp | 1513 | bl kvmppc_load_fp |
1514 | 1514 | ||
1515 | /* Load NV GPRS */ | 1515 | /* Load NV GPRS */ |
1516 | ld r14, VCPU_GPR(r14)(r4) | 1516 | ld r14, VCPU_GPR(R14)(r4) |
1517 | ld r15, VCPU_GPR(r15)(r4) | 1517 | ld r15, VCPU_GPR(R15)(r4) |
1518 | ld r16, VCPU_GPR(r16)(r4) | 1518 | ld r16, VCPU_GPR(R16)(r4) |
1519 | ld r17, VCPU_GPR(r17)(r4) | 1519 | ld r17, VCPU_GPR(R17)(r4) |
1520 | ld r18, VCPU_GPR(r18)(r4) | 1520 | ld r18, VCPU_GPR(R18)(r4) |
1521 | ld r19, VCPU_GPR(r19)(r4) | 1521 | ld r19, VCPU_GPR(R19)(r4) |
1522 | ld r20, VCPU_GPR(r20)(r4) | 1522 | ld r20, VCPU_GPR(R20)(r4) |
1523 | ld r21, VCPU_GPR(r21)(r4) | 1523 | ld r21, VCPU_GPR(R21)(r4) |
1524 | ld r22, VCPU_GPR(r22)(r4) | 1524 | ld r22, VCPU_GPR(R22)(r4) |
1525 | ld r23, VCPU_GPR(r23)(r4) | 1525 | ld r23, VCPU_GPR(R23)(r4) |
1526 | ld r24, VCPU_GPR(r24)(r4) | 1526 | ld r24, VCPU_GPR(R24)(r4) |
1527 | ld r25, VCPU_GPR(r25)(r4) | 1527 | ld r25, VCPU_GPR(R25)(r4) |
1528 | ld r26, VCPU_GPR(r26)(r4) | 1528 | ld r26, VCPU_GPR(R26)(r4) |
1529 | ld r27, VCPU_GPR(r27)(r4) | 1529 | ld r27, VCPU_GPR(R27)(r4) |
1530 | ld r28, VCPU_GPR(r28)(r4) | 1530 | ld r28, VCPU_GPR(R28)(r4) |
1531 | ld r29, VCPU_GPR(r29)(r4) | 1531 | ld r29, VCPU_GPR(R29)(r4) |
1532 | ld r30, VCPU_GPR(r30)(r4) | 1532 | ld r30, VCPU_GPR(R30)(r4) |
1533 | ld r31, VCPU_GPR(r31)(r4) | 1533 | ld r31, VCPU_GPR(R31)(r4) |
1534 | 1534 | ||
1535 | /* clear our bit in vcore->napping_threads */ | 1535 | /* clear our bit in vcore->napping_threads */ |
1536 | 33: ld r5,HSTATE_KVM_VCORE(r13) | 1536 | 33: ld r5,HSTATE_KVM_VCORE(r13) |
@@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION | |||
1649 | reg = 0 | 1649 | reg = 0 |
1650 | .rept 32 | 1650 | .rept 32 |
1651 | li r6,reg*16+VCPU_VSRS | 1651 | li r6,reg*16+VCPU_VSRS |
1652 | STXVD2X(reg,r6,r3) | 1652 | STXVD2X(reg,R6,R3) |
1653 | reg = reg + 1 | 1653 | reg = reg + 1 |
1654 | .endr | 1654 | .endr |
1655 | FTR_SECTION_ELSE | 1655 | FTR_SECTION_ELSE |
@@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION | |||
1711 | reg = 0 | 1711 | reg = 0 |
1712 | .rept 32 | 1712 | .rept 32 |
1713 | li r7,reg*16+VCPU_VSRS | 1713 | li r7,reg*16+VCPU_VSRS |
1714 | LXVD2X(reg,r7,r4) | 1714 | LXVD2X(reg,R7,R4) |
1715 | reg = reg + 1 | 1715 | reg = reg + 1 |
1716 | .endr | 1716 | .endr |
1717 | FTR_SECTION_ELSE | 1717 | FTR_SECTION_ELSE |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 3e35383bdb21..2ddab0f90a81 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -39,24 +39,24 @@ | |||
39 | 39 | ||
40 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | 40 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) |
41 | #define VCPU_LOAD_NVGPRS(vcpu) \ | 41 | #define VCPU_LOAD_NVGPRS(vcpu) \ |
42 | PPC_LL r14, VCPU_GPR(r14)(vcpu); \ | 42 | PPC_LL r14, VCPU_GPR(R14)(vcpu); \ |
43 | PPC_LL r15, VCPU_GPR(r15)(vcpu); \ | 43 | PPC_LL r15, VCPU_GPR(R15)(vcpu); \ |
44 | PPC_LL r16, VCPU_GPR(r16)(vcpu); \ | 44 | PPC_LL r16, VCPU_GPR(R16)(vcpu); \ |
45 | PPC_LL r17, VCPU_GPR(r17)(vcpu); \ | 45 | PPC_LL r17, VCPU_GPR(R17)(vcpu); \ |
46 | PPC_LL r18, VCPU_GPR(r18)(vcpu); \ | 46 | PPC_LL r18, VCPU_GPR(R18)(vcpu); \ |
47 | PPC_LL r19, VCPU_GPR(r19)(vcpu); \ | 47 | PPC_LL r19, VCPU_GPR(R19)(vcpu); \ |
48 | PPC_LL r20, VCPU_GPR(r20)(vcpu); \ | 48 | PPC_LL r20, VCPU_GPR(R20)(vcpu); \ |
49 | PPC_LL r21, VCPU_GPR(r21)(vcpu); \ | 49 | PPC_LL r21, VCPU_GPR(R21)(vcpu); \ |
50 | PPC_LL r22, VCPU_GPR(r22)(vcpu); \ | 50 | PPC_LL r22, VCPU_GPR(R22)(vcpu); \ |
51 | PPC_LL r23, VCPU_GPR(r23)(vcpu); \ | 51 | PPC_LL r23, VCPU_GPR(R23)(vcpu); \ |
52 | PPC_LL r24, VCPU_GPR(r24)(vcpu); \ | 52 | PPC_LL r24, VCPU_GPR(R24)(vcpu); \ |
53 | PPC_LL r25, VCPU_GPR(r25)(vcpu); \ | 53 | PPC_LL r25, VCPU_GPR(R25)(vcpu); \ |
54 | PPC_LL r26, VCPU_GPR(r26)(vcpu); \ | 54 | PPC_LL r26, VCPU_GPR(R26)(vcpu); \ |
55 | PPC_LL r27, VCPU_GPR(r27)(vcpu); \ | 55 | PPC_LL r27, VCPU_GPR(R27)(vcpu); \ |
56 | PPC_LL r28, VCPU_GPR(r28)(vcpu); \ | 56 | PPC_LL r28, VCPU_GPR(R28)(vcpu); \ |
57 | PPC_LL r29, VCPU_GPR(r29)(vcpu); \ | 57 | PPC_LL r29, VCPU_GPR(R29)(vcpu); \ |
58 | PPC_LL r30, VCPU_GPR(r30)(vcpu); \ | 58 | PPC_LL r30, VCPU_GPR(R30)(vcpu); \ |
59 | PPC_LL r31, VCPU_GPR(r31)(vcpu); \ | 59 | PPC_LL r31, VCPU_GPR(R31)(vcpu); \ |
60 | 60 | ||
61 | /***************************************************************************** | 61 | /***************************************************************************** |
62 | * * | 62 | * * |
@@ -131,24 +131,24 @@ kvmppc_handler_highmem: | |||
131 | /* R7 = vcpu */ | 131 | /* R7 = vcpu */ |
132 | PPC_LL r7, GPR4(r1) | 132 | PPC_LL r7, GPR4(r1) |
133 | 133 | ||
134 | PPC_STL r14, VCPU_GPR(r14)(r7) | 134 | PPC_STL r14, VCPU_GPR(R14)(r7) |
135 | PPC_STL r15, VCPU_GPR(r15)(r7) | 135 | PPC_STL r15, VCPU_GPR(R15)(r7) |
136 | PPC_STL r16, VCPU_GPR(r16)(r7) | 136 | PPC_STL r16, VCPU_GPR(R16)(r7) |
137 | PPC_STL r17, VCPU_GPR(r17)(r7) | 137 | PPC_STL r17, VCPU_GPR(R17)(r7) |
138 | PPC_STL r18, VCPU_GPR(r18)(r7) | 138 | PPC_STL r18, VCPU_GPR(R18)(r7) |
139 | PPC_STL r19, VCPU_GPR(r19)(r7) | 139 | PPC_STL r19, VCPU_GPR(R19)(r7) |
140 | PPC_STL r20, VCPU_GPR(r20)(r7) | 140 | PPC_STL r20, VCPU_GPR(R20)(r7) |
141 | PPC_STL r21, VCPU_GPR(r21)(r7) | 141 | PPC_STL r21, VCPU_GPR(R21)(r7) |
142 | PPC_STL r22, VCPU_GPR(r22)(r7) | 142 | PPC_STL r22, VCPU_GPR(R22)(r7) |
143 | PPC_STL r23, VCPU_GPR(r23)(r7) | 143 | PPC_STL r23, VCPU_GPR(R23)(r7) |
144 | PPC_STL r24, VCPU_GPR(r24)(r7) | 144 | PPC_STL r24, VCPU_GPR(R24)(r7) |
145 | PPC_STL r25, VCPU_GPR(r25)(r7) | 145 | PPC_STL r25, VCPU_GPR(R25)(r7) |
146 | PPC_STL r26, VCPU_GPR(r26)(r7) | 146 | PPC_STL r26, VCPU_GPR(R26)(r7) |
147 | PPC_STL r27, VCPU_GPR(r27)(r7) | 147 | PPC_STL r27, VCPU_GPR(R27)(r7) |
148 | PPC_STL r28, VCPU_GPR(r28)(r7) | 148 | PPC_STL r28, VCPU_GPR(R28)(r7) |
149 | PPC_STL r29, VCPU_GPR(r29)(r7) | 149 | PPC_STL r29, VCPU_GPR(R29)(r7) |
150 | PPC_STL r30, VCPU_GPR(r30)(r7) | 150 | PPC_STL r30, VCPU_GPR(R30)(r7) |
151 | PPC_STL r31, VCPU_GPR(r31)(r7) | 151 | PPC_STL r31, VCPU_GPR(R31)(r7) |
152 | 152 | ||
153 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 153 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
154 | mr r5, r12 | 154 | mr r5, r12 |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 8feec2ff3928..e598a5a0d5e4 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -37,7 +37,7 @@ | |||
37 | #define HOST_CR 16 | 37 | #define HOST_CR 16 |
38 | #define HOST_NV_GPRS 20 | 38 | #define HOST_NV_GPRS 20 |
39 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) | 39 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) |
40 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) | 40 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) |
41 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ | 41 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ |
42 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ | 42 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ |
43 | 43 | ||
@@ -58,8 +58,8 @@ _GLOBAL(kvmppc_handler_\ivor_nr) | |||
58 | /* Get pointer to vcpu and record exit number. */ | 58 | /* Get pointer to vcpu and record exit number. */ |
59 | mtspr SPRN_SPRG_WSCRATCH0, r4 | 59 | mtspr SPRN_SPRG_WSCRATCH0, r4 |
60 | mfspr r4, SPRN_SPRG_RVCPU | 60 | mfspr r4, SPRN_SPRG_RVCPU |
61 | stw r5, VCPU_GPR(r5)(r4) | 61 | stw r5, VCPU_GPR(R5)(r4) |
62 | stw r6, VCPU_GPR(r6)(r4) | 62 | stw r6, VCPU_GPR(R6)(r4) |
63 | mfctr r5 | 63 | mfctr r5 |
64 | lis r6, kvmppc_resume_host@h | 64 | lis r6, kvmppc_resume_host@h |
65 | stw r5, VCPU_CTR(r4) | 65 | stw r5, VCPU_CTR(r4) |
@@ -100,12 +100,12 @@ _GLOBAL(kvmppc_handler_len) | |||
100 | * r5: KVM exit number | 100 | * r5: KVM exit number |
101 | */ | 101 | */ |
102 | _GLOBAL(kvmppc_resume_host) | 102 | _GLOBAL(kvmppc_resume_host) |
103 | stw r3, VCPU_GPR(r3)(r4) | 103 | stw r3, VCPU_GPR(R3)(r4) |
104 | mfcr r3 | 104 | mfcr r3 |
105 | stw r3, VCPU_CR(r4) | 105 | stw r3, VCPU_CR(r4) |
106 | stw r7, VCPU_GPR(r7)(r4) | 106 | stw r7, VCPU_GPR(R7)(r4) |
107 | stw r8, VCPU_GPR(r8)(r4) | 107 | stw r8, VCPU_GPR(R8)(r4) |
108 | stw r9, VCPU_GPR(r9)(r4) | 108 | stw r9, VCPU_GPR(R9)(r4) |
109 | 109 | ||
110 | li r6, 1 | 110 | li r6, 1 |
111 | slw r6, r6, r5 | 111 | slw r6, r6, r5 |
@@ -135,23 +135,23 @@ _GLOBAL(kvmppc_resume_host) | |||
135 | isync | 135 | isync |
136 | stw r9, VCPU_LAST_INST(r4) | 136 | stw r9, VCPU_LAST_INST(r4) |
137 | 137 | ||
138 | stw r15, VCPU_GPR(r15)(r4) | 138 | stw r15, VCPU_GPR(R15)(r4) |
139 | stw r16, VCPU_GPR(r16)(r4) | 139 | stw r16, VCPU_GPR(R16)(r4) |
140 | stw r17, VCPU_GPR(r17)(r4) | 140 | stw r17, VCPU_GPR(R17)(r4) |
141 | stw r18, VCPU_GPR(r18)(r4) | 141 | stw r18, VCPU_GPR(R18)(r4) |
142 | stw r19, VCPU_GPR(r19)(r4) | 142 | stw r19, VCPU_GPR(R19)(r4) |
143 | stw r20, VCPU_GPR(r20)(r4) | 143 | stw r20, VCPU_GPR(R20)(r4) |
144 | stw r21, VCPU_GPR(r21)(r4) | 144 | stw r21, VCPU_GPR(R21)(r4) |
145 | stw r22, VCPU_GPR(r22)(r4) | 145 | stw r22, VCPU_GPR(R22)(r4) |
146 | stw r23, VCPU_GPR(r23)(r4) | 146 | stw r23, VCPU_GPR(R23)(r4) |
147 | stw r24, VCPU_GPR(r24)(r4) | 147 | stw r24, VCPU_GPR(R24)(r4) |
148 | stw r25, VCPU_GPR(r25)(r4) | 148 | stw r25, VCPU_GPR(R25)(r4) |
149 | stw r26, VCPU_GPR(r26)(r4) | 149 | stw r26, VCPU_GPR(R26)(r4) |
150 | stw r27, VCPU_GPR(r27)(r4) | 150 | stw r27, VCPU_GPR(R27)(r4) |
151 | stw r28, VCPU_GPR(r28)(r4) | 151 | stw r28, VCPU_GPR(R28)(r4) |
152 | stw r29, VCPU_GPR(r29)(r4) | 152 | stw r29, VCPU_GPR(R29)(r4) |
153 | stw r30, VCPU_GPR(r30)(r4) | 153 | stw r30, VCPU_GPR(R30)(r4) |
154 | stw r31, VCPU_GPR(r31)(r4) | 154 | stw r31, VCPU_GPR(R31)(r4) |
155 | ..skip_inst_copy: | 155 | ..skip_inst_copy: |
156 | 156 | ||
157 | /* Also grab DEAR and ESR before the host can clobber them. */ | 157 | /* Also grab DEAR and ESR before the host can clobber them. */ |
@@ -169,20 +169,20 @@ _GLOBAL(kvmppc_resume_host) | |||
169 | ..skip_esr: | 169 | ..skip_esr: |
170 | 170 | ||
171 | /* Save remaining volatile guest register state to vcpu. */ | 171 | /* Save remaining volatile guest register state to vcpu. */ |
172 | stw r0, VCPU_GPR(r0)(r4) | 172 | stw r0, VCPU_GPR(R0)(r4) |
173 | stw r1, VCPU_GPR(r1)(r4) | 173 | stw r1, VCPU_GPR(R1)(r4) |
174 | stw r2, VCPU_GPR(r2)(r4) | 174 | stw r2, VCPU_GPR(R2)(r4) |
175 | stw r10, VCPU_GPR(r10)(r4) | 175 | stw r10, VCPU_GPR(R10)(r4) |
176 | stw r11, VCPU_GPR(r11)(r4) | 176 | stw r11, VCPU_GPR(R11)(r4) |
177 | stw r12, VCPU_GPR(r12)(r4) | 177 | stw r12, VCPU_GPR(R12)(r4) |
178 | stw r13, VCPU_GPR(r13)(r4) | 178 | stw r13, VCPU_GPR(R13)(r4) |
179 | stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ | 179 | stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ |
180 | mflr r3 | 180 | mflr r3 |
181 | stw r3, VCPU_LR(r4) | 181 | stw r3, VCPU_LR(r4) |
182 | mfxer r3 | 182 | mfxer r3 |
183 | stw r3, VCPU_XER(r4) | 183 | stw r3, VCPU_XER(r4) |
184 | mfspr r3, SPRN_SPRG_RSCRATCH0 | 184 | mfspr r3, SPRN_SPRG_RSCRATCH0 |
185 | stw r3, VCPU_GPR(r4)(r4) | 185 | stw r3, VCPU_GPR(R4)(r4) |
186 | mfspr r3, SPRN_SRR0 | 186 | mfspr r3, SPRN_SRR0 |
187 | stw r3, VCPU_PC(r4) | 187 | stw r3, VCPU_PC(r4) |
188 | 188 | ||
@@ -214,28 +214,28 @@ _GLOBAL(kvmppc_resume_host) | |||
214 | 214 | ||
215 | /* Restore vcpu pointer and the nonvolatiles we used. */ | 215 | /* Restore vcpu pointer and the nonvolatiles we used. */ |
216 | mr r4, r14 | 216 | mr r4, r14 |
217 | lwz r14, VCPU_GPR(r14)(r4) | 217 | lwz r14, VCPU_GPR(R14)(r4) |
218 | 218 | ||
219 | /* Sometimes instruction emulation must restore complete GPR state. */ | 219 | /* Sometimes instruction emulation must restore complete GPR state. */ |
220 | andi. r5, r3, RESUME_FLAG_NV | 220 | andi. r5, r3, RESUME_FLAG_NV |
221 | beq ..skip_nv_load | 221 | beq ..skip_nv_load |
222 | lwz r15, VCPU_GPR(r15)(r4) | 222 | lwz r15, VCPU_GPR(R15)(r4) |
223 | lwz r16, VCPU_GPR(r16)(r4) | 223 | lwz r16, VCPU_GPR(R16)(r4) |
224 | lwz r17, VCPU_GPR(r17)(r4) | 224 | lwz r17, VCPU_GPR(R17)(r4) |
225 | lwz r18, VCPU_GPR(r18)(r4) | 225 | lwz r18, VCPU_GPR(R18)(r4) |
226 | lwz r19, VCPU_GPR(r19)(r4) | 226 | lwz r19, VCPU_GPR(R19)(r4) |
227 | lwz r20, VCPU_GPR(r20)(r4) | 227 | lwz r20, VCPU_GPR(R20)(r4) |
228 | lwz r21, VCPU_GPR(r21)(r4) | 228 | lwz r21, VCPU_GPR(R21)(r4) |
229 | lwz r22, VCPU_GPR(r22)(r4) | 229 | lwz r22, VCPU_GPR(R22)(r4) |
230 | lwz r23, VCPU_GPR(r23)(r4) | 230 | lwz r23, VCPU_GPR(R23)(r4) |
231 | lwz r24, VCPU_GPR(r24)(r4) | 231 | lwz r24, VCPU_GPR(R24)(r4) |
232 | lwz r25, VCPU_GPR(r25)(r4) | 232 | lwz r25, VCPU_GPR(R25)(r4) |
233 | lwz r26, VCPU_GPR(r26)(r4) | 233 | lwz r26, VCPU_GPR(R26)(r4) |
234 | lwz r27, VCPU_GPR(r27)(r4) | 234 | lwz r27, VCPU_GPR(R27)(r4) |
235 | lwz r28, VCPU_GPR(r28)(r4) | 235 | lwz r28, VCPU_GPR(R28)(r4) |
236 | lwz r29, VCPU_GPR(r29)(r4) | 236 | lwz r29, VCPU_GPR(R29)(r4) |
237 | lwz r30, VCPU_GPR(r30)(r4) | 237 | lwz r30, VCPU_GPR(R30)(r4) |
238 | lwz r31, VCPU_GPR(r31)(r4) | 238 | lwz r31, VCPU_GPR(R31)(r4) |
239 | ..skip_nv_load: | 239 | ..skip_nv_load: |
240 | 240 | ||
241 | /* Should we return to the guest? */ | 241 | /* Should we return to the guest? */ |
@@ -257,43 +257,43 @@ heavyweight_exit: | |||
257 | 257 | ||
258 | /* We already saved guest volatile register state; now save the | 258 | /* We already saved guest volatile register state; now save the |
259 | * non-volatiles. */ | 259 | * non-volatiles. */ |
260 | stw r15, VCPU_GPR(r15)(r4) | 260 | stw r15, VCPU_GPR(R15)(r4) |
261 | stw r16, VCPU_GPR(r16)(r4) | 261 | stw r16, VCPU_GPR(R16)(r4) |
262 | stw r17, VCPU_GPR(r17)(r4) | 262 | stw r17, VCPU_GPR(R17)(r4) |
263 | stw r18, VCPU_GPR(r18)(r4) | 263 | stw r18, VCPU_GPR(R18)(r4) |
264 | stw r19, VCPU_GPR(r19)(r4) | 264 | stw r19, VCPU_GPR(R19)(r4) |
265 | stw r20, VCPU_GPR(r20)(r4) | 265 | stw r20, VCPU_GPR(R20)(r4) |
266 | stw r21, VCPU_GPR(r21)(r4) | 266 | stw r21, VCPU_GPR(R21)(r4) |
267 | stw r22, VCPU_GPR(r22)(r4) | 267 | stw r22, VCPU_GPR(R22)(r4) |
268 | stw r23, VCPU_GPR(r23)(r4) | 268 | stw r23, VCPU_GPR(R23)(r4) |
269 | stw r24, VCPU_GPR(r24)(r4) | 269 | stw r24, VCPU_GPR(R24)(r4) |
270 | stw r25, VCPU_GPR(r25)(r4) | 270 | stw r25, VCPU_GPR(R25)(r4) |
271 | stw r26, VCPU_GPR(r26)(r4) | 271 | stw r26, VCPU_GPR(R26)(r4) |
272 | stw r27, VCPU_GPR(r27)(r4) | 272 | stw r27, VCPU_GPR(R27)(r4) |
273 | stw r28, VCPU_GPR(r28)(r4) | 273 | stw r28, VCPU_GPR(R28)(r4) |
274 | stw r29, VCPU_GPR(r29)(r4) | 274 | stw r29, VCPU_GPR(R29)(r4) |
275 | stw r30, VCPU_GPR(r30)(r4) | 275 | stw r30, VCPU_GPR(R30)(r4) |
276 | stw r31, VCPU_GPR(r31)(r4) | 276 | stw r31, VCPU_GPR(R31)(r4) |
277 | 277 | ||
278 | /* Load host non-volatile register state from host stack. */ | 278 | /* Load host non-volatile register state from host stack. */ |
279 | lwz r14, HOST_NV_GPR(r14)(r1) | 279 | lwz r14, HOST_NV_GPR(R14)(r1) |
280 | lwz r15, HOST_NV_GPR(r15)(r1) | 280 | lwz r15, HOST_NV_GPR(R15)(r1) |
281 | lwz r16, HOST_NV_GPR(r16)(r1) | 281 | lwz r16, HOST_NV_GPR(R16)(r1) |
282 | lwz r17, HOST_NV_GPR(r17)(r1) | 282 | lwz r17, HOST_NV_GPR(R17)(r1) |
283 | lwz r18, HOST_NV_GPR(r18)(r1) | 283 | lwz r18, HOST_NV_GPR(R18)(r1) |
284 | lwz r19, HOST_NV_GPR(r19)(r1) | 284 | lwz r19, HOST_NV_GPR(R19)(r1) |
285 | lwz r20, HOST_NV_GPR(r20)(r1) | 285 | lwz r20, HOST_NV_GPR(R20)(r1) |
286 | lwz r21, HOST_NV_GPR(r21)(r1) | 286 | lwz r21, HOST_NV_GPR(R21)(r1) |
287 | lwz r22, HOST_NV_GPR(r22)(r1) | 287 | lwz r22, HOST_NV_GPR(R22)(r1) |
288 | lwz r23, HOST_NV_GPR(r23)(r1) | 288 | lwz r23, HOST_NV_GPR(R23)(r1) |
289 | lwz r24, HOST_NV_GPR(r24)(r1) | 289 | lwz r24, HOST_NV_GPR(R24)(r1) |
290 | lwz r25, HOST_NV_GPR(r25)(r1) | 290 | lwz r25, HOST_NV_GPR(R25)(r1) |
291 | lwz r26, HOST_NV_GPR(r26)(r1) | 291 | lwz r26, HOST_NV_GPR(R26)(r1) |
292 | lwz r27, HOST_NV_GPR(r27)(r1) | 292 | lwz r27, HOST_NV_GPR(R27)(r1) |
293 | lwz r28, HOST_NV_GPR(r28)(r1) | 293 | lwz r28, HOST_NV_GPR(R28)(r1) |
294 | lwz r29, HOST_NV_GPR(r29)(r1) | 294 | lwz r29, HOST_NV_GPR(R29)(r1) |
295 | lwz r30, HOST_NV_GPR(r30)(r1) | 295 | lwz r30, HOST_NV_GPR(R30)(r1) |
296 | lwz r31, HOST_NV_GPR(r31)(r1) | 296 | lwz r31, HOST_NV_GPR(R31)(r1) |
297 | 297 | ||
298 | /* Return to kvm_vcpu_run(). */ | 298 | /* Return to kvm_vcpu_run(). */ |
299 | lwz r4, HOST_STACK_LR(r1) | 299 | lwz r4, HOST_STACK_LR(r1) |
@@ -321,44 +321,44 @@ _GLOBAL(__kvmppc_vcpu_run) | |||
321 | stw r5, HOST_CR(r1) | 321 | stw r5, HOST_CR(r1) |
322 | 322 | ||
323 | /* Save host non-volatile register state to stack. */ | 323 | /* Save host non-volatile register state to stack. */ |
324 | stw r14, HOST_NV_GPR(r14)(r1) | 324 | stw r14, HOST_NV_GPR(R14)(r1) |
325 | stw r15, HOST_NV_GPR(r15)(r1) | 325 | stw r15, HOST_NV_GPR(R15)(r1) |
326 | stw r16, HOST_NV_GPR(r16)(r1) | 326 | stw r16, HOST_NV_GPR(R16)(r1) |
327 | stw r17, HOST_NV_GPR(r17)(r1) | 327 | stw r17, HOST_NV_GPR(R17)(r1) |
328 | stw r18, HOST_NV_GPR(r18)(r1) | 328 | stw r18, HOST_NV_GPR(R18)(r1) |
329 | stw r19, HOST_NV_GPR(r19)(r1) | 329 | stw r19, HOST_NV_GPR(R19)(r1) |
330 | stw r20, HOST_NV_GPR(r20)(r1) | 330 | stw r20, HOST_NV_GPR(R20)(r1) |
331 | stw r21, HOST_NV_GPR(r21)(r1) | 331 | stw r21, HOST_NV_GPR(R21)(r1) |
332 | stw r22, HOST_NV_GPR(r22)(r1) | 332 | stw r22, HOST_NV_GPR(R22)(r1) |
333 | stw r23, HOST_NV_GPR(r23)(r1) | 333 | stw r23, HOST_NV_GPR(R23)(r1) |
334 | stw r24, HOST_NV_GPR(r24)(r1) | 334 | stw r24, HOST_NV_GPR(R24)(r1) |
335 | stw r25, HOST_NV_GPR(r25)(r1) | 335 | stw r25, HOST_NV_GPR(R25)(r1) |
336 | stw r26, HOST_NV_GPR(r26)(r1) | 336 | stw r26, HOST_NV_GPR(R26)(r1) |
337 | stw r27, HOST_NV_GPR(r27)(r1) | 337 | stw r27, HOST_NV_GPR(R27)(r1) |
338 | stw r28, HOST_NV_GPR(r28)(r1) | 338 | stw r28, HOST_NV_GPR(R28)(r1) |
339 | stw r29, HOST_NV_GPR(r29)(r1) | 339 | stw r29, HOST_NV_GPR(R29)(r1) |
340 | stw r30, HOST_NV_GPR(r30)(r1) | 340 | stw r30, HOST_NV_GPR(R30)(r1) |
341 | stw r31, HOST_NV_GPR(r31)(r1) | 341 | stw r31, HOST_NV_GPR(R31)(r1) |
342 | 342 | ||
343 | /* Load guest non-volatiles. */ | 343 | /* Load guest non-volatiles. */ |
344 | lwz r14, VCPU_GPR(r14)(r4) | 344 | lwz r14, VCPU_GPR(R14)(r4) |
345 | lwz r15, VCPU_GPR(r15)(r4) | 345 | lwz r15, VCPU_GPR(R15)(r4) |
346 | lwz r16, VCPU_GPR(r16)(r4) | 346 | lwz r16, VCPU_GPR(R16)(r4) |
347 | lwz r17, VCPU_GPR(r17)(r4) | 347 | lwz r17, VCPU_GPR(R17)(r4) |
348 | lwz r18, VCPU_GPR(r18)(r4) | 348 | lwz r18, VCPU_GPR(R18)(r4) |
349 | lwz r19, VCPU_GPR(r19)(r4) | 349 | lwz r19, VCPU_GPR(R19)(r4) |
350 | lwz r20, VCPU_GPR(r20)(r4) | 350 | lwz r20, VCPU_GPR(R20)(r4) |
351 | lwz r21, VCPU_GPR(r21)(r4) | 351 | lwz r21, VCPU_GPR(R21)(r4) |
352 | lwz r22, VCPU_GPR(r22)(r4) | 352 | lwz r22, VCPU_GPR(R22)(r4) |
353 | lwz r23, VCPU_GPR(r23)(r4) | 353 | lwz r23, VCPU_GPR(R23)(r4) |
354 | lwz r24, VCPU_GPR(r24)(r4) | 354 | lwz r24, VCPU_GPR(R24)(r4) |
355 | lwz r25, VCPU_GPR(r25)(r4) | 355 | lwz r25, VCPU_GPR(R25)(r4) |
356 | lwz r26, VCPU_GPR(r26)(r4) | 356 | lwz r26, VCPU_GPR(R26)(r4) |
357 | lwz r27, VCPU_GPR(r27)(r4) | 357 | lwz r27, VCPU_GPR(R27)(r4) |
358 | lwz r28, VCPU_GPR(r28)(r4) | 358 | lwz r28, VCPU_GPR(R28)(r4) |
359 | lwz r29, VCPU_GPR(r29)(r4) | 359 | lwz r29, VCPU_GPR(R29)(r4) |
360 | lwz r30, VCPU_GPR(r30)(r4) | 360 | lwz r30, VCPU_GPR(R30)(r4) |
361 | lwz r31, VCPU_GPR(r31)(r4) | 361 | lwz r31, VCPU_GPR(R31)(r4) |
362 | 362 | ||
363 | #ifdef CONFIG_SPE | 363 | #ifdef CONFIG_SPE |
364 | /* save host SPEFSCR and load guest SPEFSCR */ | 364 | /* save host SPEFSCR and load guest SPEFSCR */ |
@@ -386,13 +386,13 @@ lightweight_exit: | |||
386 | #endif | 386 | #endif |
387 | 387 | ||
388 | /* Load some guest volatiles. */ | 388 | /* Load some guest volatiles. */ |
389 | lwz r0, VCPU_GPR(r0)(r4) | 389 | lwz r0, VCPU_GPR(R0)(r4) |
390 | lwz r2, VCPU_GPR(r2)(r4) | 390 | lwz r2, VCPU_GPR(R2)(r4) |
391 | lwz r9, VCPU_GPR(r9)(r4) | 391 | lwz r9, VCPU_GPR(R9)(r4) |
392 | lwz r10, VCPU_GPR(r10)(r4) | 392 | lwz r10, VCPU_GPR(R10)(r4) |
393 | lwz r11, VCPU_GPR(r11)(r4) | 393 | lwz r11, VCPU_GPR(R11)(r4) |
394 | lwz r12, VCPU_GPR(r12)(r4) | 394 | lwz r12, VCPU_GPR(R12)(r4) |
395 | lwz r13, VCPU_GPR(r13)(r4) | 395 | lwz r13, VCPU_GPR(R13)(r4) |
396 | lwz r3, VCPU_LR(r4) | 396 | lwz r3, VCPU_LR(r4) |
397 | mtlr r3 | 397 | mtlr r3 |
398 | lwz r3, VCPU_XER(r4) | 398 | lwz r3, VCPU_XER(r4) |
@@ -411,7 +411,7 @@ lightweight_exit: | |||
411 | 411 | ||
412 | /* Can't switch the stack pointer until after IVPR is switched, | 412 | /* Can't switch the stack pointer until after IVPR is switched, |
413 | * because host interrupt handlers would get confused. */ | 413 | * because host interrupt handlers would get confused. */ |
414 | lwz r1, VCPU_GPR(r1)(r4) | 414 | lwz r1, VCPU_GPR(R1)(r4) |
415 | 415 | ||
416 | /* | 416 | /* |
417 | * Host interrupt handlers may have clobbered these | 417 | * Host interrupt handlers may have clobbered these |
@@ -449,10 +449,10 @@ lightweight_exit: | |||
449 | mtcr r5 | 449 | mtcr r5 |
450 | mtsrr0 r6 | 450 | mtsrr0 r6 |
451 | mtsrr1 r7 | 451 | mtsrr1 r7 |
452 | lwz r5, VCPU_GPR(r5)(r4) | 452 | lwz r5, VCPU_GPR(R5)(r4) |
453 | lwz r6, VCPU_GPR(r6)(r4) | 453 | lwz r6, VCPU_GPR(R6)(r4) |
454 | lwz r7, VCPU_GPR(r7)(r4) | 454 | lwz r7, VCPU_GPR(R7)(r4) |
455 | lwz r8, VCPU_GPR(r8)(r4) | 455 | lwz r8, VCPU_GPR(R8)(r4) |
456 | 456 | ||
457 | /* Clear any debug events which occurred since we disabled MSR[DE]. | 457 | /* Clear any debug events which occurred since we disabled MSR[DE]. |
458 | * XXX This gives us a 3-instruction window in which a breakpoint | 458 | * XXX This gives us a 3-instruction window in which a breakpoint |
@@ -461,8 +461,8 @@ lightweight_exit: | |||
461 | ori r3, r3, 0xffff | 461 | ori r3, r3, 0xffff |
462 | mtspr SPRN_DBSR, r3 | 462 | mtspr SPRN_DBSR, r3 |
463 | 463 | ||
464 | lwz r3, VCPU_GPR(r3)(r4) | 464 | lwz r3, VCPU_GPR(R3)(r4) |
465 | lwz r4, VCPU_GPR(r4)(r4) | 465 | lwz r4, VCPU_GPR(R4)(r4) |
466 | rfi | 466 | rfi |
467 | 467 | ||
468 | #ifdef CONFIG_SPE | 468 | #ifdef CONFIG_SPE |
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 6048a00515d7..a623b1d32d3e 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -67,15 +67,15 @@ | |||
67 | */ | 67 | */ |
68 | .macro kvm_handler_common intno, srr0, flags | 68 | .macro kvm_handler_common intno, srr0, flags |
69 | /* Restore host stack pointer */ | 69 | /* Restore host stack pointer */ |
70 | PPC_STL r1, VCPU_GPR(r1)(r4) | 70 | PPC_STL r1, VCPU_GPR(R1)(r4) |
71 | PPC_STL r2, VCPU_GPR(r2)(r4) | 71 | PPC_STL r2, VCPU_GPR(R2)(r4) |
72 | PPC_LL r1, VCPU_HOST_STACK(r4) | 72 | PPC_LL r1, VCPU_HOST_STACK(r4) |
73 | PPC_LL r2, HOST_R2(r1) | 73 | PPC_LL r2, HOST_R2(r1) |
74 | 74 | ||
75 | mfspr r10, SPRN_PID | 75 | mfspr r10, SPRN_PID |
76 | lwz r8, VCPU_HOST_PID(r4) | 76 | lwz r8, VCPU_HOST_PID(r4) |
77 | PPC_LL r11, VCPU_SHARED(r4) | 77 | PPC_LL r11, VCPU_SHARED(r4) |
78 | PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ | 78 | PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */ |
79 | li r14, \intno | 79 | li r14, \intno |
80 | 80 | ||
81 | stw r10, VCPU_GUEST_PID(r4) | 81 | stw r10, VCPU_GUEST_PID(r4) |
@@ -137,27 +137,27 @@ | |||
137 | */ | 137 | */ |
138 | 138 | ||
139 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ | 139 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ |
140 | PPC_STL r15, VCPU_GPR(r15)(r4) | 140 | PPC_STL r15, VCPU_GPR(R15)(r4) |
141 | PPC_STL r16, VCPU_GPR(r16)(r4) | 141 | PPC_STL r16, VCPU_GPR(R16)(r4) |
142 | PPC_STL r17, VCPU_GPR(r17)(r4) | 142 | PPC_STL r17, VCPU_GPR(R17)(r4) |
143 | PPC_STL r18, VCPU_GPR(r18)(r4) | 143 | PPC_STL r18, VCPU_GPR(R18)(r4) |
144 | PPC_STL r19, VCPU_GPR(r19)(r4) | 144 | PPC_STL r19, VCPU_GPR(R19)(r4) |
145 | mr r8, r3 | 145 | mr r8, r3 |
146 | PPC_STL r20, VCPU_GPR(r20)(r4) | 146 | PPC_STL r20, VCPU_GPR(R20)(r4) |
147 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS | 147 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS |
148 | PPC_STL r21, VCPU_GPR(r21)(r4) | 148 | PPC_STL r21, VCPU_GPR(R21)(r4) |
149 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR | 149 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR |
150 | PPC_STL r22, VCPU_GPR(r22)(r4) | 150 | PPC_STL r22, VCPU_GPR(R22)(r4) |
151 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID | 151 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID |
152 | PPC_STL r23, VCPU_GPR(r23)(r4) | 152 | PPC_STL r23, VCPU_GPR(R23)(r4) |
153 | PPC_STL r24, VCPU_GPR(r24)(r4) | 153 | PPC_STL r24, VCPU_GPR(R24)(r4) |
154 | PPC_STL r25, VCPU_GPR(r25)(r4) | 154 | PPC_STL r25, VCPU_GPR(R25)(r4) |
155 | PPC_STL r26, VCPU_GPR(r26)(r4) | 155 | PPC_STL r26, VCPU_GPR(R26)(r4) |
156 | PPC_STL r27, VCPU_GPR(r27)(r4) | 156 | PPC_STL r27, VCPU_GPR(R27)(r4) |
157 | PPC_STL r28, VCPU_GPR(r28)(r4) | 157 | PPC_STL r28, VCPU_GPR(R28)(r4) |
158 | PPC_STL r29, VCPU_GPR(r29)(r4) | 158 | PPC_STL r29, VCPU_GPR(R29)(r4) |
159 | PPC_STL r30, VCPU_GPR(r30)(r4) | 159 | PPC_STL r30, VCPU_GPR(R30)(r4) |
160 | PPC_STL r31, VCPU_GPR(r31)(r4) | 160 | PPC_STL r31, VCPU_GPR(R31)(r4) |
161 | mtspr SPRN_EPLC, r8 | 161 | mtspr SPRN_EPLC, r8 |
162 | 162 | ||
163 | /* disable preemption, so we are sure we hit the fixup handler */ | 163 | /* disable preemption, so we are sure we hit the fixup handler */ |
@@ -211,24 +211,24 @@ | |||
211 | .macro kvm_handler intno srr0, srr1, flags | 211 | .macro kvm_handler intno srr0, srr1, flags |
212 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | 212 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) |
213 | GET_VCPU(r11, r10) | 213 | GET_VCPU(r11, r10) |
214 | PPC_STL r3, VCPU_GPR(r3)(r11) | 214 | PPC_STL r3, VCPU_GPR(R3)(r11) |
215 | mfspr r3, SPRN_SPRG_RSCRATCH0 | 215 | mfspr r3, SPRN_SPRG_RSCRATCH0 |
216 | PPC_STL r4, VCPU_GPR(r4)(r11) | 216 | PPC_STL r4, VCPU_GPR(R4)(r11) |
217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) | 217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) |
218 | PPC_STL r5, VCPU_GPR(r5)(r11) | 218 | PPC_STL r5, VCPU_GPR(R5)(r11) |
219 | stw r13, VCPU_CR(r11) | 219 | stw r13, VCPU_CR(r11) |
220 | mfspr r5, \srr0 | 220 | mfspr r5, \srr0 |
221 | PPC_STL r3, VCPU_GPR(r10)(r11) | 221 | PPC_STL r3, VCPU_GPR(R10)(r11) |
222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) | 222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) |
223 | PPC_STL r6, VCPU_GPR(r6)(r11) | 223 | PPC_STL r6, VCPU_GPR(R6)(r11) |
224 | PPC_STL r4, VCPU_GPR(r11)(r11) | 224 | PPC_STL r4, VCPU_GPR(R11)(r11) |
225 | mfspr r6, \srr1 | 225 | mfspr r6, \srr1 |
226 | PPC_STL r7, VCPU_GPR(r7)(r11) | 226 | PPC_STL r7, VCPU_GPR(R7)(r11) |
227 | PPC_STL r8, VCPU_GPR(r8)(r11) | 227 | PPC_STL r8, VCPU_GPR(R8)(r11) |
228 | PPC_STL r9, VCPU_GPR(r9)(r11) | 228 | PPC_STL r9, VCPU_GPR(R9)(r11) |
229 | PPC_STL r3, VCPU_GPR(r13)(r11) | 229 | PPC_STL r3, VCPU_GPR(R13)(r11) |
230 | mfctr r7 | 230 | mfctr r7 |
231 | PPC_STL r12, VCPU_GPR(r12)(r11) | 231 | PPC_STL r12, VCPU_GPR(R12)(r11) |
232 | PPC_STL r7, VCPU_CTR(r11) | 232 | PPC_STL r7, VCPU_CTR(r11) |
233 | mr r4, r11 | 233 | mr r4, r11 |
234 | kvm_handler_common \intno, \srr0, \flags | 234 | kvm_handler_common \intno, \srr0, \flags |
@@ -238,25 +238,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |||
238 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | 238 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) |
239 | mfspr r10, SPRN_SPRG_THREAD | 239 | mfspr r10, SPRN_SPRG_THREAD |
240 | GET_VCPU(r11, r10) | 240 | GET_VCPU(r11, r10) |
241 | PPC_STL r3, VCPU_GPR(r3)(r11) | 241 | PPC_STL r3, VCPU_GPR(R3)(r11) |
242 | mfspr r3, \scratch | 242 | mfspr r3, \scratch |
243 | PPC_STL r4, VCPU_GPR(r4)(r11) | 243 | PPC_STL r4, VCPU_GPR(R4)(r11) |
244 | PPC_LL r4, GPR9(r8) | 244 | PPC_LL r4, GPR9(r8) |
245 | PPC_STL r5, VCPU_GPR(r5)(r11) | 245 | PPC_STL r5, VCPU_GPR(R5)(r11) |
246 | stw r9, VCPU_CR(r11) | 246 | stw r9, VCPU_CR(r11) |
247 | mfspr r5, \srr0 | 247 | mfspr r5, \srr0 |
248 | PPC_STL r3, VCPU_GPR(r8)(r11) | 248 | PPC_STL r3, VCPU_GPR(R8)(r11) |
249 | PPC_LL r3, GPR10(r8) | 249 | PPC_LL r3, GPR10(r8) |
250 | PPC_STL r6, VCPU_GPR(r6)(r11) | 250 | PPC_STL r6, VCPU_GPR(R6)(r11) |
251 | PPC_STL r4, VCPU_GPR(r9)(r11) | 251 | PPC_STL r4, VCPU_GPR(R9)(r11) |
252 | mfspr r6, \srr1 | 252 | mfspr r6, \srr1 |
253 | PPC_LL r4, GPR11(r8) | 253 | PPC_LL r4, GPR11(r8) |
254 | PPC_STL r7, VCPU_GPR(r7)(r11) | 254 | PPC_STL r7, VCPU_GPR(R7)(r11) |
255 | PPC_STL r3, VCPU_GPR(r10)(r11) | 255 | PPC_STL r3, VCPU_GPR(R10)(r11) |
256 | mfctr r7 | 256 | mfctr r7 |
257 | PPC_STL r12, VCPU_GPR(r12)(r11) | 257 | PPC_STL r12, VCPU_GPR(R12)(r11) |
258 | PPC_STL r13, VCPU_GPR(r13)(r11) | 258 | PPC_STL r13, VCPU_GPR(R13)(r11) |
259 | PPC_STL r4, VCPU_GPR(r11)(r11) | 259 | PPC_STL r4, VCPU_GPR(R11)(r11) |
260 | PPC_STL r7, VCPU_CTR(r11) | 260 | PPC_STL r7, VCPU_CTR(r11) |
261 | mr r4, r11 | 261 | mr r4, r11 |
262 | kvm_handler_common \intno, \srr0, \flags | 262 | kvm_handler_common \intno, \srr0, \flags |
@@ -310,7 +310,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | |||
310 | _GLOBAL(kvmppc_resume_host) | 310 | _GLOBAL(kvmppc_resume_host) |
311 | /* Save remaining volatile guest register state to vcpu. */ | 311 | /* Save remaining volatile guest register state to vcpu. */ |
312 | mfspr r3, SPRN_VRSAVE | 312 | mfspr r3, SPRN_VRSAVE |
313 | PPC_STL r0, VCPU_GPR(r0)(r4) | 313 | PPC_STL r0, VCPU_GPR(R0)(r4) |
314 | mflr r5 | 314 | mflr r5 |
315 | mfspr r6, SPRN_SPRG4 | 315 | mfspr r6, SPRN_SPRG4 |
316 | PPC_STL r5, VCPU_LR(r4) | 316 | PPC_STL r5, VCPU_LR(r4) |
@@ -358,27 +358,27 @@ _GLOBAL(kvmppc_resume_host) | |||
358 | 358 | ||
359 | /* Restore vcpu pointer and the nonvolatiles we used. */ | 359 | /* Restore vcpu pointer and the nonvolatiles we used. */ |
360 | mr r4, r14 | 360 | mr r4, r14 |
361 | PPC_LL r14, VCPU_GPR(r14)(r4) | 361 | PPC_LL r14, VCPU_GPR(R14)(r4) |
362 | 362 | ||
363 | andi. r5, r3, RESUME_FLAG_NV | 363 | andi. r5, r3, RESUME_FLAG_NV |
364 | beq skip_nv_load | 364 | beq skip_nv_load |
365 | PPC_LL r15, VCPU_GPR(r15)(r4) | 365 | PPC_LL r15, VCPU_GPR(R15)(r4) |
366 | PPC_LL r16, VCPU_GPR(r16)(r4) | 366 | PPC_LL r16, VCPU_GPR(R16)(r4) |
367 | PPC_LL r17, VCPU_GPR(r17)(r4) | 367 | PPC_LL r17, VCPU_GPR(R17)(r4) |
368 | PPC_LL r18, VCPU_GPR(r18)(r4) | 368 | PPC_LL r18, VCPU_GPR(R18)(r4) |
369 | PPC_LL r19, VCPU_GPR(r19)(r4) | 369 | PPC_LL r19, VCPU_GPR(R19)(r4) |
370 | PPC_LL r20, VCPU_GPR(r20)(r4) | 370 | PPC_LL r20, VCPU_GPR(R20)(r4) |
371 | PPC_LL r21, VCPU_GPR(r21)(r4) | 371 | PPC_LL r21, VCPU_GPR(R21)(r4) |
372 | PPC_LL r22, VCPU_GPR(r22)(r4) | 372 | PPC_LL r22, VCPU_GPR(R22)(r4) |
373 | PPC_LL r23, VCPU_GPR(r23)(r4) | 373 | PPC_LL r23, VCPU_GPR(R23)(r4) |
374 | PPC_LL r24, VCPU_GPR(r24)(r4) | 374 | PPC_LL r24, VCPU_GPR(R24)(r4) |
375 | PPC_LL r25, VCPU_GPR(r25)(r4) | 375 | PPC_LL r25, VCPU_GPR(R25)(r4) |
376 | PPC_LL r26, VCPU_GPR(r26)(r4) | 376 | PPC_LL r26, VCPU_GPR(R26)(r4) |
377 | PPC_LL r27, VCPU_GPR(r27)(r4) | 377 | PPC_LL r27, VCPU_GPR(R27)(r4) |
378 | PPC_LL r28, VCPU_GPR(r28)(r4) | 378 | PPC_LL r28, VCPU_GPR(R28)(r4) |
379 | PPC_LL r29, VCPU_GPR(r29)(r4) | 379 | PPC_LL r29, VCPU_GPR(R29)(r4) |
380 | PPC_LL r30, VCPU_GPR(r30)(r4) | 380 | PPC_LL r30, VCPU_GPR(R30)(r4) |
381 | PPC_LL r31, VCPU_GPR(r31)(r4) | 381 | PPC_LL r31, VCPU_GPR(R31)(r4) |
382 | skip_nv_load: | 382 | skip_nv_load: |
383 | /* Should we return to the guest? */ | 383 | /* Should we return to the guest? */ |
384 | andi. r5, r3, RESUME_FLAG_HOST | 384 | andi. r5, r3, RESUME_FLAG_HOST |
@@ -396,23 +396,23 @@ heavyweight_exit: | |||
396 | * non-volatiles. | 396 | * non-volatiles. |
397 | */ | 397 | */ |
398 | 398 | ||
399 | PPC_STL r15, VCPU_GPR(r15)(r4) | 399 | PPC_STL r15, VCPU_GPR(R15)(r4) |
400 | PPC_STL r16, VCPU_GPR(r16)(r4) | 400 | PPC_STL r16, VCPU_GPR(R16)(r4) |
401 | PPC_STL r17, VCPU_GPR(r17)(r4) | 401 | PPC_STL r17, VCPU_GPR(R17)(r4) |
402 | PPC_STL r18, VCPU_GPR(r18)(r4) | 402 | PPC_STL r18, VCPU_GPR(R18)(r4) |
403 | PPC_STL r19, VCPU_GPR(r19)(r4) | 403 | PPC_STL r19, VCPU_GPR(R19)(r4) |
404 | PPC_STL r20, VCPU_GPR(r20)(r4) | 404 | PPC_STL r20, VCPU_GPR(R20)(r4) |
405 | PPC_STL r21, VCPU_GPR(r21)(r4) | 405 | PPC_STL r21, VCPU_GPR(R21)(r4) |
406 | PPC_STL r22, VCPU_GPR(r22)(r4) | 406 | PPC_STL r22, VCPU_GPR(R22)(r4) |
407 | PPC_STL r23, VCPU_GPR(r23)(r4) | 407 | PPC_STL r23, VCPU_GPR(R23)(r4) |
408 | PPC_STL r24, VCPU_GPR(r24)(r4) | 408 | PPC_STL r24, VCPU_GPR(R24)(r4) |
409 | PPC_STL r25, VCPU_GPR(r25)(r4) | 409 | PPC_STL r25, VCPU_GPR(R25)(r4) |
410 | PPC_STL r26, VCPU_GPR(r26)(r4) | 410 | PPC_STL r26, VCPU_GPR(R26)(r4) |
411 | PPC_STL r27, VCPU_GPR(r27)(r4) | 411 | PPC_STL r27, VCPU_GPR(R27)(r4) |
412 | PPC_STL r28, VCPU_GPR(r28)(r4) | 412 | PPC_STL r28, VCPU_GPR(R28)(r4) |
413 | PPC_STL r29, VCPU_GPR(r29)(r4) | 413 | PPC_STL r29, VCPU_GPR(R29)(r4) |
414 | PPC_STL r30, VCPU_GPR(r30)(r4) | 414 | PPC_STL r30, VCPU_GPR(R30)(r4) |
415 | PPC_STL r31, VCPU_GPR(r31)(r4) | 415 | PPC_STL r31, VCPU_GPR(R31)(r4) |
416 | 416 | ||
417 | /* Load host non-volatile register state from host stack. */ | 417 | /* Load host non-volatile register state from host stack. */ |
418 | PPC_LL r14, HOST_NV_GPR(r14)(r1) | 418 | PPC_LL r14, HOST_NV_GPR(r14)(r1) |
@@ -478,24 +478,24 @@ _GLOBAL(__kvmppc_vcpu_run) | |||
478 | PPC_STL r31, HOST_NV_GPR(r31)(r1) | 478 | PPC_STL r31, HOST_NV_GPR(r31)(r1) |
479 | 479 | ||
480 | /* Load guest non-volatiles. */ | 480 | /* Load guest non-volatiles. */ |
481 | PPC_LL r14, VCPU_GPR(r14)(r4) | 481 | PPC_LL r14, VCPU_GPR(R14)(r4) |
482 | PPC_LL r15, VCPU_GPR(r15)(r4) | 482 | PPC_LL r15, VCPU_GPR(R15)(r4) |
483 | PPC_LL r16, VCPU_GPR(r16)(r4) | 483 | PPC_LL r16, VCPU_GPR(R16)(r4) |
484 | PPC_LL r17, VCPU_GPR(r17)(r4) | 484 | PPC_LL r17, VCPU_GPR(R17)(r4) |
485 | PPC_LL r18, VCPU_GPR(r18)(r4) | 485 | PPC_LL r18, VCPU_GPR(R18)(r4) |
486 | PPC_LL r19, VCPU_GPR(r19)(r4) | 486 | PPC_LL r19, VCPU_GPR(R19)(r4) |
487 | PPC_LL r20, VCPU_GPR(r20)(r4) | 487 | PPC_LL r20, VCPU_GPR(R20)(r4) |
488 | PPC_LL r21, VCPU_GPR(r21)(r4) | 488 | PPC_LL r21, VCPU_GPR(R21)(r4) |
489 | PPC_LL r22, VCPU_GPR(r22)(r4) | 489 | PPC_LL r22, VCPU_GPR(R22)(r4) |
490 | PPC_LL r23, VCPU_GPR(r23)(r4) | 490 | PPC_LL r23, VCPU_GPR(R23)(r4) |
491 | PPC_LL r24, VCPU_GPR(r24)(r4) | 491 | PPC_LL r24, VCPU_GPR(R24)(r4) |
492 | PPC_LL r25, VCPU_GPR(r25)(r4) | 492 | PPC_LL r25, VCPU_GPR(R25)(r4) |
493 | PPC_LL r26, VCPU_GPR(r26)(r4) | 493 | PPC_LL r26, VCPU_GPR(R26)(r4) |
494 | PPC_LL r27, VCPU_GPR(r27)(r4) | 494 | PPC_LL r27, VCPU_GPR(R27)(r4) |
495 | PPC_LL r28, VCPU_GPR(r28)(r4) | 495 | PPC_LL r28, VCPU_GPR(R28)(r4) |
496 | PPC_LL r29, VCPU_GPR(r29)(r4) | 496 | PPC_LL r29, VCPU_GPR(R29)(r4) |
497 | PPC_LL r30, VCPU_GPR(r30)(r4) | 497 | PPC_LL r30, VCPU_GPR(R30)(r4) |
498 | PPC_LL r31, VCPU_GPR(r31)(r4) | 498 | PPC_LL r31, VCPU_GPR(R31)(r4) |
499 | 499 | ||
500 | 500 | ||
501 | lightweight_exit: | 501 | lightweight_exit: |
@@ -554,13 +554,13 @@ lightweight_exit: | |||
554 | lwz r7, VCPU_CR(r4) | 554 | lwz r7, VCPU_CR(r4) |
555 | PPC_LL r8, VCPU_PC(r4) | 555 | PPC_LL r8, VCPU_PC(r4) |
556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) | 556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) |
557 | PPC_LL r0, VCPU_GPR(r0)(r4) | 557 | PPC_LL r0, VCPU_GPR(R0)(r4) |
558 | PPC_LL r1, VCPU_GPR(r1)(r4) | 558 | PPC_LL r1, VCPU_GPR(R1)(r4) |
559 | PPC_LL r2, VCPU_GPR(r2)(r4) | 559 | PPC_LL r2, VCPU_GPR(R2)(r4) |
560 | PPC_LL r10, VCPU_GPR(r10)(r4) | 560 | PPC_LL r10, VCPU_GPR(R10)(r4) |
561 | PPC_LL r11, VCPU_GPR(r11)(r4) | 561 | PPC_LL r11, VCPU_GPR(R11)(r4) |
562 | PPC_LL r12, VCPU_GPR(r12)(r4) | 562 | PPC_LL r12, VCPU_GPR(R12)(r4) |
563 | PPC_LL r13, VCPU_GPR(r13)(r4) | 563 | PPC_LL r13, VCPU_GPR(R13)(r4) |
564 | mtlr r3 | 564 | mtlr r3 |
565 | mtxer r5 | 565 | mtxer r5 |
566 | mtctr r6 | 566 | mtctr r6 |
@@ -586,12 +586,12 @@ lightweight_exit: | |||
586 | mtcr r7 | 586 | mtcr r7 |
587 | 587 | ||
588 | /* Finish loading guest volatiles and jump to guest. */ | 588 | /* Finish loading guest volatiles and jump to guest. */ |
589 | PPC_LL r5, VCPU_GPR(r5)(r4) | 589 | PPC_LL r5, VCPU_GPR(R5)(r4) |
590 | PPC_LL r6, VCPU_GPR(r6)(r4) | 590 | PPC_LL r6, VCPU_GPR(R6)(r4) |
591 | PPC_LL r7, VCPU_GPR(r7)(r4) | 591 | PPC_LL r7, VCPU_GPR(R7)(r4) |
592 | PPC_LL r8, VCPU_GPR(r8)(r4) | 592 | PPC_LL r8, VCPU_GPR(R8)(r4) |
593 | PPC_LL r9, VCPU_GPR(r9)(r4) | 593 | PPC_LL r9, VCPU_GPR(R9)(r4) |
594 | 594 | ||
595 | PPC_LL r3, VCPU_GPR(r3)(r4) | 595 | PPC_LL r3, VCPU_GPR(R3)(r4) |
596 | PPC_LL r4, VCPU_GPR(r4)(r4) | 596 | PPC_LL r4, VCPU_GPR(R4)(r4) |
597 | rfi | 597 | rfi |
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 18245af38aea..7ca6871c11ee 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S | |||
@@ -114,9 +114,9 @@ _GLOBAL(csum_partial) | |||
114 | mtctr r6 | 114 | mtctr r6 |
115 | 115 | ||
116 | stdu r1,-STACKFRAMESIZE(r1) | 116 | stdu r1,-STACKFRAMESIZE(r1) |
117 | std r14,STK_REG(r14)(r1) | 117 | std r14,STK_REG(R14)(r1) |
118 | std r15,STK_REG(r15)(r1) | 118 | std r15,STK_REG(R15)(r1) |
119 | std r16,STK_REG(r16)(r1) | 119 | std r16,STK_REG(R16)(r1) |
120 | 120 | ||
121 | ld r6,0(r3) | 121 | ld r6,0(r3) |
122 | ld r9,8(r3) | 122 | ld r9,8(r3) |
@@ -175,9 +175,9 @@ _GLOBAL(csum_partial) | |||
175 | adde r0,r0,r15 | 175 | adde r0,r0,r15 |
176 | adde r0,r0,r16 | 176 | adde r0,r0,r16 |
177 | 177 | ||
178 | ld r14,STK_REG(r14)(r1) | 178 | ld r14,STK_REG(R14)(r1) |
179 | ld r15,STK_REG(r15)(r1) | 179 | ld r15,STK_REG(R15)(r1) |
180 | ld r16,STK_REG(r16)(r1) | 180 | ld r16,STK_REG(R16)(r1) |
181 | addi r1,r1,STACKFRAMESIZE | 181 | addi r1,r1,STACKFRAMESIZE |
182 | 182 | ||
183 | andi. r4,r4,63 | 183 | andi. r4,r4,63 |
@@ -299,9 +299,9 @@ dest; sth r6,0(r4) | |||
299 | mtctr r6 | 299 | mtctr r6 |
300 | 300 | ||
301 | stdu r1,-STACKFRAMESIZE(r1) | 301 | stdu r1,-STACKFRAMESIZE(r1) |
302 | std r14,STK_REG(r14)(r1) | 302 | std r14,STK_REG(R14)(r1) |
303 | std r15,STK_REG(r15)(r1) | 303 | std r15,STK_REG(R15)(r1) |
304 | std r16,STK_REG(r16)(r1) | 304 | std r16,STK_REG(R16)(r1) |
305 | 305 | ||
306 | source; ld r6,0(r3) | 306 | source; ld r6,0(r3) |
307 | source; ld r9,8(r3) | 307 | source; ld r9,8(r3) |
@@ -382,9 +382,9 @@ dest; std r16,56(r4) | |||
382 | adde r0,r0,r15 | 382 | adde r0,r0,r15 |
383 | adde r0,r0,r16 | 383 | adde r0,r0,r16 |
384 | 384 | ||
385 | ld r14,STK_REG(r14)(r1) | 385 | ld r14,STK_REG(R14)(r1) |
386 | ld r15,STK_REG(r15)(r1) | 386 | ld r15,STK_REG(R15)(r1) |
387 | ld r16,STK_REG(r16)(r1) | 387 | ld r16,STK_REG(R16)(r1) |
388 | addi r1,r1,STACKFRAMESIZE | 388 | addi r1,r1,STACKFRAMESIZE |
389 | 389 | ||
390 | andi. r5,r5,63 | 390 | andi. r5,r5,63 |
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index 01e2b5db325f..a2126cebb957 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S | |||
@@ -113,13 +113,13 @@ _GLOBAL(copypage_power7) | |||
113 | #endif | 113 | #endif |
114 | 114 | ||
115 | .Lnonvmx_copy: | 115 | .Lnonvmx_copy: |
116 | std r14,STK_REG(r14)(r1) | 116 | std r14,STK_REG(R14)(r1) |
117 | std r15,STK_REG(r15)(r1) | 117 | std r15,STK_REG(R15)(r1) |
118 | std r16,STK_REG(r16)(r1) | 118 | std r16,STK_REG(R16)(r1) |
119 | std r17,STK_REG(r17)(r1) | 119 | std r17,STK_REG(R17)(r1) |
120 | std r18,STK_REG(r18)(r1) | 120 | std r18,STK_REG(R18)(r1) |
121 | std r19,STK_REG(r19)(r1) | 121 | std r19,STK_REG(R19)(r1) |
122 | std r20,STK_REG(r20)(r1) | 122 | std r20,STK_REG(R20)(r1) |
123 | 123 | ||
124 | 1: ld r0,0(r4) | 124 | 1: ld r0,0(r4) |
125 | ld r5,8(r4) | 125 | ld r5,8(r4) |
@@ -157,12 +157,12 @@ _GLOBAL(copypage_power7) | |||
157 | addi r3,r3,128 | 157 | addi r3,r3,128 |
158 | bdnz 1b | 158 | bdnz 1b |
159 | 159 | ||
160 | ld r14,STK_REG(r14)(r1) | 160 | ld r14,STK_REG(R14)(r1) |
161 | ld r15,STK_REG(r15)(r1) | 161 | ld r15,STK_REG(R15)(r1) |
162 | ld r16,STK_REG(r16)(r1) | 162 | ld r16,STK_REG(R16)(r1) |
163 | ld r17,STK_REG(r17)(r1) | 163 | ld r17,STK_REG(R17)(r1) |
164 | ld r18,STK_REG(r18)(r1) | 164 | ld r18,STK_REG(R18)(r1) |
165 | ld r19,STK_REG(r19)(r1) | 165 | ld r19,STK_REG(R19)(r1) |
166 | ld r20,STK_REG(r20)(r1) | 166 | ld r20,STK_REG(R20)(r1) |
167 | addi r1,r1,STACKFRAMESIZE | 167 | addi r1,r1,STACKFRAMESIZE |
168 | blr | 168 | blr |
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a59014900..f47d05a51eb8 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
@@ -30,7 +30,7 @@ _GLOBAL(__copy_tofrom_user_base) | |||
30 | dcbt 0,r4 | 30 | dcbt 0,r4 |
31 | beq .Lcopy_page_4K | 31 | beq .Lcopy_page_4K |
32 | andi. r6,r6,7 | 32 | andi. r6,r6,7 |
33 | PPC_MTOCRF(0x01,r5) | 33 | PPC_MTOCRF(0x01,R5) |
34 | blt cr1,.Lshort_copy | 34 | blt cr1,.Lshort_copy |
35 | /* Below we want to nop out the bne if we're on a CPU that has the | 35 | /* Below we want to nop out the bne if we're on a CPU that has the |
36 | * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit | 36 | * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit |
@@ -186,7 +186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
186 | blr | 186 | blr |
187 | 187 | ||
188 | .Ldst_unaligned: | 188 | .Ldst_unaligned: |
189 | PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */ | 189 | PPC_MTOCRF(0x01,R6) /* put #bytes to 8B bdry into cr7 */ |
190 | subf r5,r6,r5 | 190 | subf r5,r6,r5 |
191 | li r7,0 | 191 | li r7,0 |
192 | cmpldi cr1,r5,16 | 192 | cmpldi cr1,r5,16 |
@@ -201,7 +201,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
201 | 2: bf cr7*4+1,3f | 201 | 2: bf cr7*4+1,3f |
202 | 37: lwzx r0,r7,r4 | 202 | 37: lwzx r0,r7,r4 |
203 | 83: stwx r0,r7,r3 | 203 | 83: stwx r0,r7,r3 |
204 | 3: PPC_MTOCRF(0x01,r5) | 204 | 3: PPC_MTOCRF(0x01,R5) |
205 | add r4,r6,r4 | 205 | add r4,r6,r4 |
206 | add r3,r6,r3 | 206 | add r3,r6,r3 |
207 | b .Ldst_aligned | 207 | b .Ldst_aligned |
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index 48e3f8c5768c..c8680c0cc3e4 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S | |||
@@ -57,9 +57,9 @@ | |||
57 | 57 | ||
58 | 58 | ||
59 | .Ldo_err4: | 59 | .Ldo_err4: |
60 | ld r16,STK_REG(r16)(r1) | 60 | ld r16,STK_REG(R16)(r1) |
61 | ld r15,STK_REG(r15)(r1) | 61 | ld r15,STK_REG(R15)(r1) |
62 | ld r14,STK_REG(r14)(r1) | 62 | ld r14,STK_REG(R14)(r1) |
63 | .Ldo_err3: | 63 | .Ldo_err3: |
64 | bl .exit_vmx_usercopy | 64 | bl .exit_vmx_usercopy |
65 | ld r0,STACKFRAMESIZE+16(r1) | 65 | ld r0,STACKFRAMESIZE+16(r1) |
@@ -68,15 +68,15 @@ | |||
68 | #endif /* CONFIG_ALTIVEC */ | 68 | #endif /* CONFIG_ALTIVEC */ |
69 | 69 | ||
70 | .Ldo_err2: | 70 | .Ldo_err2: |
71 | ld r22,STK_REG(r22)(r1) | 71 | ld r22,STK_REG(R22)(r1) |
72 | ld r21,STK_REG(r21)(r1) | 72 | ld r21,STK_REG(R21)(r1) |
73 | ld r20,STK_REG(r20)(r1) | 73 | ld r20,STK_REG(R20)(r1) |
74 | ld r19,STK_REG(r19)(r1) | 74 | ld r19,STK_REG(R19)(r1) |
75 | ld r18,STK_REG(r18)(r1) | 75 | ld r18,STK_REG(R18)(r1) |
76 | ld r17,STK_REG(r17)(r1) | 76 | ld r17,STK_REG(R17)(r1) |
77 | ld r16,STK_REG(r16)(r1) | 77 | ld r16,STK_REG(R16)(r1) |
78 | ld r15,STK_REG(r15)(r1) | 78 | ld r15,STK_REG(R15)(r1) |
79 | ld r14,STK_REG(r14)(r1) | 79 | ld r14,STK_REG(R14)(r1) |
80 | .Lexit: | 80 | .Lexit: |
81 | addi r1,r1,STACKFRAMESIZE | 81 | addi r1,r1,STACKFRAMESIZE |
82 | .Ldo_err1: | 82 | .Ldo_err1: |
@@ -137,15 +137,15 @@ err1; stw r0,0(r3) | |||
137 | 137 | ||
138 | mflr r0 | 138 | mflr r0 |
139 | stdu r1,-STACKFRAMESIZE(r1) | 139 | stdu r1,-STACKFRAMESIZE(r1) |
140 | std r14,STK_REG(r14)(r1) | 140 | std r14,STK_REG(R14)(r1) |
141 | std r15,STK_REG(r15)(r1) | 141 | std r15,STK_REG(R15)(r1) |
142 | std r16,STK_REG(r16)(r1) | 142 | std r16,STK_REG(R16)(r1) |
143 | std r17,STK_REG(r17)(r1) | 143 | std r17,STK_REG(R17)(r1) |
144 | std r18,STK_REG(r18)(r1) | 144 | std r18,STK_REG(R18)(r1) |
145 | std r19,STK_REG(r19)(r1) | 145 | std r19,STK_REG(R19)(r1) |
146 | std r20,STK_REG(r20)(r1) | 146 | std r20,STK_REG(R20)(r1) |
147 | std r21,STK_REG(r21)(r1) | 147 | std r21,STK_REG(R21)(r1) |
148 | std r22,STK_REG(r22)(r1) | 148 | std r22,STK_REG(R22)(r1) |
149 | std r0,STACKFRAMESIZE+16(r1) | 149 | std r0,STACKFRAMESIZE+16(r1) |
150 | 150 | ||
151 | srdi r6,r5,7 | 151 | srdi r6,r5,7 |
@@ -192,15 +192,15 @@ err2; std r21,120(r3) | |||
192 | 192 | ||
193 | clrldi r5,r5,(64-7) | 193 | clrldi r5,r5,(64-7) |
194 | 194 | ||
195 | ld r14,STK_REG(r14)(r1) | 195 | ld r14,STK_REG(R14)(r1) |
196 | ld r15,STK_REG(r15)(r1) | 196 | ld r15,STK_REG(R15)(r1) |
197 | ld r16,STK_REG(r16)(r1) | 197 | ld r16,STK_REG(R16)(r1) |
198 | ld r17,STK_REG(r17)(r1) | 198 | ld r17,STK_REG(R17)(r1) |
199 | ld r18,STK_REG(r18)(r1) | 199 | ld r18,STK_REG(R18)(r1) |
200 | ld r19,STK_REG(r19)(r1) | 200 | ld r19,STK_REG(R19)(r1) |
201 | ld r20,STK_REG(r20)(r1) | 201 | ld r20,STK_REG(R20)(r1) |
202 | ld r21,STK_REG(r21)(r1) | 202 | ld r21,STK_REG(R21)(r1) |
203 | ld r22,STK_REG(r22)(r1) | 203 | ld r22,STK_REG(R22)(r1) |
204 | addi r1,r1,STACKFRAMESIZE | 204 | addi r1,r1,STACKFRAMESIZE |
205 | 205 | ||
206 | /* Up to 127B to go */ | 206 | /* Up to 127B to go */ |
@@ -440,9 +440,9 @@ err3; stvx vr0,r3,r11 | |||
440 | 7: sub r5,r5,r6 | 440 | 7: sub r5,r5,r6 |
441 | srdi r6,r5,7 | 441 | srdi r6,r5,7 |
442 | 442 | ||
443 | std r14,STK_REG(r14)(r1) | 443 | std r14,STK_REG(R14)(r1) |
444 | std r15,STK_REG(r15)(r1) | 444 | std r15,STK_REG(R15)(r1) |
445 | std r16,STK_REG(r16)(r1) | 445 | std r16,STK_REG(R16)(r1) |
446 | 446 | ||
447 | li r12,64 | 447 | li r12,64 |
448 | li r14,80 | 448 | li r14,80 |
@@ -477,9 +477,9 @@ err4; stvx vr0,r3,r16 | |||
477 | addi r3,r3,128 | 477 | addi r3,r3,128 |
478 | bdnz 8b | 478 | bdnz 8b |
479 | 479 | ||
480 | ld r14,STK_REG(r14)(r1) | 480 | ld r14,STK_REG(R14)(r1) |
481 | ld r15,STK_REG(r15)(r1) | 481 | ld r15,STK_REG(R15)(r1) |
482 | ld r16,STK_REG(r16)(r1) | 482 | ld r16,STK_REG(R16)(r1) |
483 | 483 | ||
484 | /* Up to 127B to go */ | 484 | /* Up to 127B to go */ |
485 | clrldi r5,r5,(64-7) | 485 | clrldi r5,r5,(64-7) |
@@ -625,9 +625,9 @@ err3; stvx vr11,r3,r11 | |||
625 | 7: sub r5,r5,r6 | 625 | 7: sub r5,r5,r6 |
626 | srdi r6,r5,7 | 626 | srdi r6,r5,7 |
627 | 627 | ||
628 | std r14,STK_REG(r14)(r1) | 628 | std r14,STK_REG(R14)(r1) |
629 | std r15,STK_REG(r15)(r1) | 629 | std r15,STK_REG(R15)(r1) |
630 | std r16,STK_REG(r16)(r1) | 630 | std r16,STK_REG(R16)(r1) |
631 | 631 | ||
632 | li r12,64 | 632 | li r12,64 |
633 | li r14,80 | 633 | li r14,80 |
@@ -670,9 +670,9 @@ err4; stvx vr15,r3,r16 | |||
670 | addi r3,r3,128 | 670 | addi r3,r3,128 |
671 | bdnz 8b | 671 | bdnz 8b |
672 | 672 | ||
673 | ld r14,STK_REG(r14)(r1) | 673 | ld r14,STK_REG(R14)(r1) |
674 | ld r15,STK_REG(r15)(r1) | 674 | ld r15,STK_REG(R15)(r1) |
675 | ld r16,STK_REG(r16)(r1) | 675 | ld r16,STK_REG(R16)(r1) |
676 | 676 | ||
677 | /* Up to 127B to go */ | 677 | /* Up to 127B to go */ |
678 | clrldi r5,r5,(64-7) | 678 | clrldi r5,r5,(64-7) |
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S index fda27868cf8c..9b96ff2ecd4d 100644 --- a/arch/powerpc/lib/hweight_64.S +++ b/arch/powerpc/lib/hweight_64.S | |||
@@ -28,7 +28,7 @@ BEGIN_FTR_SECTION | |||
28 | nop | 28 | nop |
29 | nop | 29 | nop |
30 | FTR_SECTION_ELSE | 30 | FTR_SECTION_ELSE |
31 | PPC_POPCNTB(r3,r3) | 31 | PPC_POPCNTB(R3,R3) |
32 | clrldi r3,r3,64-8 | 32 | clrldi r3,r3,64-8 |
33 | blr | 33 | blr |
34 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) | 34 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) |
@@ -42,14 +42,14 @@ BEGIN_FTR_SECTION | |||
42 | nop | 42 | nop |
43 | FTR_SECTION_ELSE | 43 | FTR_SECTION_ELSE |
44 | BEGIN_FTR_SECTION_NESTED(50) | 44 | BEGIN_FTR_SECTION_NESTED(50) |
45 | PPC_POPCNTB(r3,r3) | 45 | PPC_POPCNTB(R3,R3) |
46 | srdi r4,r3,8 | 46 | srdi r4,r3,8 |
47 | add r3,r4,r3 | 47 | add r3,r4,r3 |
48 | clrldi r3,r3,64-8 | 48 | clrldi r3,r3,64-8 |
49 | blr | 49 | blr |
50 | FTR_SECTION_ELSE_NESTED(50) | 50 | FTR_SECTION_ELSE_NESTED(50) |
51 | clrlwi r3,r3,16 | 51 | clrlwi r3,r3,16 |
52 | PPC_POPCNTW(r3,r3) | 52 | PPC_POPCNTW(R3,R3) |
53 | clrldi r3,r3,64-8 | 53 | clrldi r3,r3,64-8 |
54 | blr | 54 | blr |
55 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) | 55 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) |
@@ -66,7 +66,7 @@ BEGIN_FTR_SECTION | |||
66 | nop | 66 | nop |
67 | FTR_SECTION_ELSE | 67 | FTR_SECTION_ELSE |
68 | BEGIN_FTR_SECTION_NESTED(51) | 68 | BEGIN_FTR_SECTION_NESTED(51) |
69 | PPC_POPCNTB(r3,r3) | 69 | PPC_POPCNTB(R3,R3) |
70 | srdi r4,r3,16 | 70 | srdi r4,r3,16 |
71 | add r3,r4,r3 | 71 | add r3,r4,r3 |
72 | srdi r4,r3,8 | 72 | srdi r4,r3,8 |
@@ -74,7 +74,7 @@ FTR_SECTION_ELSE | |||
74 | clrldi r3,r3,64-8 | 74 | clrldi r3,r3,64-8 |
75 | blr | 75 | blr |
76 | FTR_SECTION_ELSE_NESTED(51) | 76 | FTR_SECTION_ELSE_NESTED(51) |
77 | PPC_POPCNTW(r3,r3) | 77 | PPC_POPCNTW(R3,R3) |
78 | clrldi r3,r3,64-8 | 78 | clrldi r3,r3,64-8 |
79 | blr | 79 | blr |
80 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) | 80 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) |
@@ -93,7 +93,7 @@ BEGIN_FTR_SECTION | |||
93 | nop | 93 | nop |
94 | FTR_SECTION_ELSE | 94 | FTR_SECTION_ELSE |
95 | BEGIN_FTR_SECTION_NESTED(52) | 95 | BEGIN_FTR_SECTION_NESTED(52) |
96 | PPC_POPCNTB(r3,r3) | 96 | PPC_POPCNTB(R3,R3) |
97 | srdi r4,r3,32 | 97 | srdi r4,r3,32 |
98 | add r3,r4,r3 | 98 | add r3,r4,r3 |
99 | srdi r4,r3,16 | 99 | srdi r4,r3,16 |
@@ -103,7 +103,7 @@ FTR_SECTION_ELSE | |||
103 | clrldi r3,r3,64-8 | 103 | clrldi r3,r3,64-8 |
104 | blr | 104 | blr |
105 | FTR_SECTION_ELSE_NESTED(52) | 105 | FTR_SECTION_ELSE_NESTED(52) |
106 | PPC_POPCNTD(r3,r3) | 106 | PPC_POPCNTD(R3,R3) |
107 | clrldi r3,r3,64-8 | 107 | clrldi r3,r3,64-8 |
108 | blr | 108 | blr |
109 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52) | 109 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52) |
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S index 6a85380520b6..3abae6bc7b4b 100644 --- a/arch/powerpc/lib/ldstfp.S +++ b/arch/powerpc/lib/ldstfp.S | |||
@@ -330,13 +330,13 @@ _GLOBAL(do_lxvd2x) | |||
330 | MTMSRD(r7) | 330 | MTMSRD(r7) |
331 | isync | 331 | isync |
332 | beq cr7,1f | 332 | beq cr7,1f |
333 | STXVD2X(0,r1,r8) | 333 | STXVD2X(0,R1,R8) |
334 | 1: li r9,-EFAULT | 334 | 1: li r9,-EFAULT |
335 | 2: LXVD2X(0,0,r4) | 335 | 2: LXVD2X(0,0,R4) |
336 | li r9,0 | 336 | li r9,0 |
337 | 3: beq cr7,4f | 337 | 3: beq cr7,4f |
338 | bl put_vsr | 338 | bl put_vsr |
339 | LXVD2X(0,r1,r8) | 339 | LXVD2X(0,R1,R8) |
340 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 340 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
341 | mtlr r0 | 341 | mtlr r0 |
342 | MTMSRD(r6) | 342 | MTMSRD(r6) |
@@ -358,13 +358,13 @@ _GLOBAL(do_stxvd2x) | |||
358 | MTMSRD(r7) | 358 | MTMSRD(r7) |
359 | isync | 359 | isync |
360 | beq cr7,1f | 360 | beq cr7,1f |
361 | STXVD2X(0,r1,r8) | 361 | STXVD2X(0,R1,R8) |
362 | bl get_vsr | 362 | bl get_vsr |
363 | 1: li r9,-EFAULT | 363 | 1: li r9,-EFAULT |
364 | 2: STXVD2X(0,0,r4) | 364 | 2: STXVD2X(0,0,R4) |
365 | li r9,0 | 365 | li r9,0 |
366 | 3: beq cr7,4f | 366 | 3: beq cr7,4f |
367 | LXVD2X(0,r1,r8) | 367 | LXVD2X(0,R1,R8) |
368 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 368 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
369 | mtlr r0 | 369 | mtlr r0 |
370 | MTMSRD(r6) | 370 | MTMSRD(r6) |
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index f4fcb0bc6563..886bfc780681 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S | |||
@@ -19,7 +19,7 @@ _GLOBAL(memset) | |||
19 | rlwimi r4,r4,16,0,15 | 19 | rlwimi r4,r4,16,0,15 |
20 | cmplw cr1,r5,r0 /* do we get that far? */ | 20 | cmplw cr1,r5,r0 /* do we get that far? */ |
21 | rldimi r4,r4,32,0 | 21 | rldimi r4,r4,32,0 |
22 | PPC_MTOCRF(1,r0) | 22 | PPC_MTOCRF(1,R0) |
23 | mr r6,r3 | 23 | mr r6,r3 |
24 | blt cr1,8f | 24 | blt cr1,8f |
25 | beq+ 3f /* if already 8-byte aligned */ | 25 | beq+ 3f /* if already 8-byte aligned */ |
@@ -49,7 +49,7 @@ _GLOBAL(memset) | |||
49 | bdnz 4b | 49 | bdnz 4b |
50 | 5: srwi. r0,r5,3 | 50 | 5: srwi. r0,r5,3 |
51 | clrlwi r5,r5,29 | 51 | clrlwi r5,r5,29 |
52 | PPC_MTOCRF(1,r0) | 52 | PPC_MTOCRF(1,R0) |
53 | beq 8f | 53 | beq 8f |
54 | bf 29,6f | 54 | bf 29,6f |
55 | std r4,0(r6) | 55 | std r4,0(r6) |
@@ -65,7 +65,7 @@ _GLOBAL(memset) | |||
65 | std r4,0(r6) | 65 | std r4,0(r6) |
66 | addi r6,r6,8 | 66 | addi r6,r6,8 |
67 | 8: cmpwi r5,0 | 67 | 8: cmpwi r5,0 |
68 | PPC_MTOCRF(1,r5) | 68 | PPC_MTOCRF(1,R5) |
69 | beqlr+ | 69 | beqlr+ |
70 | bf 29,9f | 70 | bf 29,9f |
71 | stw r4,0(r6) | 71 | stw r4,0(r6) |
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index d2bbbc8d7dc0..0a87b37e16fe 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S | |||
@@ -16,7 +16,7 @@ BEGIN_FTR_SECTION | |||
16 | FTR_SECTION_ELSE | 16 | FTR_SECTION_ELSE |
17 | b memcpy_power7 | 17 | b memcpy_power7 |
18 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) | 18 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) |
19 | PPC_MTOCRF(0x01,r5) | 19 | PPC_MTOCRF(0x01,R5) |
20 | cmpldi cr1,r5,16 | 20 | cmpldi cr1,r5,16 |
21 | neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry | 21 | neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry |
22 | andi. r6,r6,7 | 22 | andi. r6,r6,7 |
@@ -158,7 +158,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
158 | blr | 158 | blr |
159 | 159 | ||
160 | .Ldst_unaligned: | 160 | .Ldst_unaligned: |
161 | PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7 | 161 | PPC_MTOCRF(0x01,R6) # put #bytes to 8B bdry into cr7 |
162 | subf r5,r6,r5 | 162 | subf r5,r6,r5 |
163 | li r7,0 | 163 | li r7,0 |
164 | cmpldi cr1,r5,16 | 164 | cmpldi cr1,r5,16 |
@@ -173,7 +173,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
173 | 2: bf cr7*4+1,3f | 173 | 2: bf cr7*4+1,3f |
174 | lwzx r0,r7,r4 | 174 | lwzx r0,r7,r4 |
175 | stwx r0,r7,r3 | 175 | stwx r0,r7,r3 |
176 | 3: PPC_MTOCRF(0x01,r5) | 176 | 3: PPC_MTOCRF(0x01,R5) |
177 | add r4,r6,r4 | 177 | add r4,r6,r4 |
178 | add r3,r6,r3 | 178 | add r3,r6,r3 |
179 | b .Ldst_aligned | 179 | b .Ldst_aligned |
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 84674d897937..04524a2a0b88 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S | |||
@@ -69,15 +69,15 @@ _GLOBAL(memcpy_power7) | |||
69 | 69 | ||
70 | mflr r0 | 70 | mflr r0 |
71 | stdu r1,-STACKFRAMESIZE(r1) | 71 | stdu r1,-STACKFRAMESIZE(r1) |
72 | std r14,STK_REG(r14)(r1) | 72 | std r14,STK_REG(R14)(r1) |
73 | std r15,STK_REG(r15)(r1) | 73 | std r15,STK_REG(R15)(r1) |
74 | std r16,STK_REG(r16)(r1) | 74 | std r16,STK_REG(R16)(r1) |
75 | std r17,STK_REG(r17)(r1) | 75 | std r17,STK_REG(R17)(r1) |
76 | std r18,STK_REG(r18)(r1) | 76 | std r18,STK_REG(R18)(r1) |
77 | std r19,STK_REG(r19)(r1) | 77 | std r19,STK_REG(R19)(r1) |
78 | std r20,STK_REG(r20)(r1) | 78 | std r20,STK_REG(R20)(r1) |
79 | std r21,STK_REG(r21)(r1) | 79 | std r21,STK_REG(R21)(r1) |
80 | std r22,STK_REG(r22)(r1) | 80 | std r22,STK_REG(R22)(r1) |
81 | std r0,STACKFRAMESIZE+16(r1) | 81 | std r0,STACKFRAMESIZE+16(r1) |
82 | 82 | ||
83 | srdi r6,r5,7 | 83 | srdi r6,r5,7 |
@@ -124,15 +124,15 @@ _GLOBAL(memcpy_power7) | |||
124 | 124 | ||
125 | clrldi r5,r5,(64-7) | 125 | clrldi r5,r5,(64-7) |
126 | 126 | ||
127 | ld r14,STK_REG(r14)(r1) | 127 | ld r14,STK_REG(R14)(r1) |
128 | ld r15,STK_REG(r15)(r1) | 128 | ld r15,STK_REG(R15)(r1) |
129 | ld r16,STK_REG(r16)(r1) | 129 | ld r16,STK_REG(R16)(r1) |
130 | ld r17,STK_REG(r17)(r1) | 130 | ld r17,STK_REG(R17)(r1) |
131 | ld r18,STK_REG(r18)(r1) | 131 | ld r18,STK_REG(R18)(r1) |
132 | ld r19,STK_REG(r19)(r1) | 132 | ld r19,STK_REG(R19)(r1) |
133 | ld r20,STK_REG(r20)(r1) | 133 | ld r20,STK_REG(R20)(r1) |
134 | ld r21,STK_REG(r21)(r1) | 134 | ld r21,STK_REG(R21)(r1) |
135 | ld r22,STK_REG(r22)(r1) | 135 | ld r22,STK_REG(R22)(r1) |
136 | addi r1,r1,STACKFRAMESIZE | 136 | addi r1,r1,STACKFRAMESIZE |
137 | 137 | ||
138 | /* Up to 127B to go */ | 138 | /* Up to 127B to go */ |
@@ -343,9 +343,9 @@ _GLOBAL(memcpy_power7) | |||
343 | 7: sub r5,r5,r6 | 343 | 7: sub r5,r5,r6 |
344 | srdi r6,r5,7 | 344 | srdi r6,r5,7 |
345 | 345 | ||
346 | std r14,STK_REG(r14)(r1) | 346 | std r14,STK_REG(R14)(r1) |
347 | std r15,STK_REG(r15)(r1) | 347 | std r15,STK_REG(R15)(r1) |
348 | std r16,STK_REG(r16)(r1) | 348 | std r16,STK_REG(R16)(r1) |
349 | 349 | ||
350 | li r12,64 | 350 | li r12,64 |
351 | li r14,80 | 351 | li r14,80 |
@@ -380,9 +380,9 @@ _GLOBAL(memcpy_power7) | |||
380 | addi r3,r3,128 | 380 | addi r3,r3,128 |
381 | bdnz 8b | 381 | bdnz 8b |
382 | 382 | ||
383 | ld r14,STK_REG(r14)(r1) | 383 | ld r14,STK_REG(R14)(r1) |
384 | ld r15,STK_REG(r15)(r1) | 384 | ld r15,STK_REG(R15)(r1) |
385 | ld r16,STK_REG(r16)(r1) | 385 | ld r16,STK_REG(R16)(r1) |
386 | 386 | ||
387 | /* Up to 127B to go */ | 387 | /* Up to 127B to go */ |
388 | clrldi r5,r5,(64-7) | 388 | clrldi r5,r5,(64-7) |
@@ -529,9 +529,9 @@ _GLOBAL(memcpy_power7) | |||
529 | 7: sub r5,r5,r6 | 529 | 7: sub r5,r5,r6 |
530 | srdi r6,r5,7 | 530 | srdi r6,r5,7 |
531 | 531 | ||
532 | std r14,STK_REG(r14)(r1) | 532 | std r14,STK_REG(R14)(r1) |
533 | std r15,STK_REG(r15)(r1) | 533 | std r15,STK_REG(R15)(r1) |
534 | std r16,STK_REG(r16)(r1) | 534 | std r16,STK_REG(R16)(r1) |
535 | 535 | ||
536 | li r12,64 | 536 | li r12,64 |
537 | li r14,80 | 537 | li r14,80 |
@@ -574,9 +574,9 @@ _GLOBAL(memcpy_power7) | |||
574 | addi r3,r3,128 | 574 | addi r3,r3,128 |
575 | bdnz 8b | 575 | bdnz 8b |
576 | 576 | ||
577 | ld r14,STK_REG(r14)(r1) | 577 | ld r14,STK_REG(R14)(r1) |
578 | ld r15,STK_REG(r15)(r1) | 578 | ld r15,STK_REG(R15)(r1) |
579 | ld r16,STK_REG(r16)(r1) | 579 | ld r16,STK_REG(R16)(r1) |
580 | 580 | ||
581 | /* Up to 127B to go */ | 581 | /* Up to 127B to go */ |
582 | clrldi r5,r5,(64-7) | 582 | clrldi r5,r5,(64-7) |
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index a242b5d7cbe4..113dcb0f61df 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -64,9 +64,9 @@ _GLOBAL(__hash_page_4K) | |||
64 | std r0,16(r1) | 64 | std r0,16(r1) |
65 | stdu r1,-STACKFRAMESIZE(r1) | 65 | stdu r1,-STACKFRAMESIZE(r1) |
66 | /* Save all params that we need after a function call */ | 66 | /* Save all params that we need after a function call */ |
67 | std r6,STK_PARM(r6)(r1) | 67 | std r6,STK_PARM(R6)(r1) |
68 | std r8,STK_PARM(r8)(r1) | 68 | std r8,STK_PARM(R8)(r1) |
69 | std r9,STK_PARM(r9)(r1) | 69 | std r9,STK_PARM(R9)(r1) |
70 | 70 | ||
71 | /* Save non-volatile registers. | 71 | /* Save non-volatile registers. |
72 | * r31 will hold "old PTE" | 72 | * r31 will hold "old PTE" |
@@ -75,11 +75,11 @@ _GLOBAL(__hash_page_4K) | |||
75 | * r28 is a hash value | 75 | * r28 is a hash value |
76 | * r27 is hashtab mask (maybe dynamic patched instead ?) | 76 | * r27 is hashtab mask (maybe dynamic patched instead ?) |
77 | */ | 77 | */ |
78 | std r27,STK_REG(r27)(r1) | 78 | std r27,STK_REG(R27)(r1) |
79 | std r28,STK_REG(r28)(r1) | 79 | std r28,STK_REG(R28)(r1) |
80 | std r29,STK_REG(r29)(r1) | 80 | std r29,STK_REG(R29)(r1) |
81 | std r30,STK_REG(r30)(r1) | 81 | std r30,STK_REG(R30)(r1) |
82 | std r31,STK_REG(r31)(r1) | 82 | std r31,STK_REG(R31)(r1) |
83 | 83 | ||
84 | /* Step 1: | 84 | /* Step 1: |
85 | * | 85 | * |
@@ -162,7 +162,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | |||
162 | /* At this point, r3 contains new PP bits, save them in | 162 | /* At this point, r3 contains new PP bits, save them in |
163 | * place of "access" in the param area (sic) | 163 | * place of "access" in the param area (sic) |
164 | */ | 164 | */ |
165 | std r3,STK_PARM(r4)(r1) | 165 | std r3,STK_PARM(R4)(r1) |
166 | 166 | ||
167 | /* Get htab_hash_mask */ | 167 | /* Get htab_hash_mask */ |
168 | ld r4,htab_hash_mask@got(2) | 168 | ld r4,htab_hash_mask@got(2) |
@@ -192,11 +192,11 @@ htab_insert_pte: | |||
192 | rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ | 192 | rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ |
193 | 193 | ||
194 | /* Call ppc_md.hpte_insert */ | 194 | /* Call ppc_md.hpte_insert */ |
195 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ | 195 | ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ |
196 | mr r4,r29 /* Retrieve va */ | 196 | mr r4,r29 /* Retrieve va */ |
197 | li r7,0 /* !bolted, !secondary */ | 197 | li r7,0 /* !bolted, !secondary */ |
198 | li r8,MMU_PAGE_4K /* page size */ | 198 | li r8,MMU_PAGE_4K /* page size */ |
199 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 199 | ld r9,STK_PARM(R9)(r1) /* segment size */ |
200 | _GLOBAL(htab_call_hpte_insert1) | 200 | _GLOBAL(htab_call_hpte_insert1) |
201 | bl . /* Patched by htab_finish_init() */ | 201 | bl . /* Patched by htab_finish_init() */ |
202 | cmpdi 0,r3,0 | 202 | cmpdi 0,r3,0 |
@@ -215,11 +215,11 @@ _GLOBAL(htab_call_hpte_insert1) | |||
215 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 215 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
216 | 216 | ||
217 | /* Call ppc_md.hpte_insert */ | 217 | /* Call ppc_md.hpte_insert */ |
218 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ | 218 | ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ |
219 | mr r4,r29 /* Retrieve va */ | 219 | mr r4,r29 /* Retrieve va */ |
220 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 220 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
221 | li r8,MMU_PAGE_4K /* page size */ | 221 | li r8,MMU_PAGE_4K /* page size */ |
222 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 222 | ld r9,STK_PARM(R9)(r1) /* segment size */ |
223 | _GLOBAL(htab_call_hpte_insert2) | 223 | _GLOBAL(htab_call_hpte_insert2) |
224 | bl . /* Patched by htab_finish_init() */ | 224 | bl . /* Patched by htab_finish_init() */ |
225 | cmpdi 0,r3,0 | 225 | cmpdi 0,r3,0 |
@@ -255,15 +255,15 @@ htab_pte_insert_ok: | |||
255 | * (maybe add eieio may be good still ?) | 255 | * (maybe add eieio may be good still ?) |
256 | */ | 256 | */ |
257 | htab_write_out_pte: | 257 | htab_write_out_pte: |
258 | ld r6,STK_PARM(r6)(r1) | 258 | ld r6,STK_PARM(R6)(r1) |
259 | std r30,0(r6) | 259 | std r30,0(r6) |
260 | li r3, 0 | 260 | li r3, 0 |
261 | htab_bail: | 261 | htab_bail: |
262 | ld r27,STK_REG(r27)(r1) | 262 | ld r27,STK_REG(R27)(r1) |
263 | ld r28,STK_REG(r28)(r1) | 263 | ld r28,STK_REG(R28)(r1) |
264 | ld r29,STK_REG(r29)(r1) | 264 | ld r29,STK_REG(R29)(r1) |
265 | ld r30,STK_REG(r30)(r1) | 265 | ld r30,STK_REG(R30)(r1) |
266 | ld r31,STK_REG(r31)(r1) | 266 | ld r31,STK_REG(R31)(r1) |
267 | addi r1,r1,STACKFRAMESIZE | 267 | addi r1,r1,STACKFRAMESIZE |
268 | ld r0,16(r1) | 268 | ld r0,16(r1) |
269 | mtlr r0 | 269 | mtlr r0 |
@@ -288,8 +288,8 @@ htab_modify_pte: | |||
288 | /* Call ppc_md.hpte_updatepp */ | 288 | /* Call ppc_md.hpte_updatepp */ |
289 | mr r5,r29 /* va */ | 289 | mr r5,r29 /* va */ |
290 | li r6,MMU_PAGE_4K /* page size */ | 290 | li r6,MMU_PAGE_4K /* page size */ |
291 | ld r7,STK_PARM(r9)(r1) /* segment size */ | 291 | ld r7,STK_PARM(R9)(r1) /* segment size */ |
292 | ld r8,STK_PARM(r8)(r1) /* get "local" param */ | 292 | ld r8,STK_PARM(R8)(r1) /* get "local" param */ |
293 | _GLOBAL(htab_call_hpte_updatepp) | 293 | _GLOBAL(htab_call_hpte_updatepp) |
294 | bl . /* Patched by htab_finish_init() */ | 294 | bl . /* Patched by htab_finish_init() */ |
295 | 295 | ||
@@ -312,7 +312,7 @@ htab_wrong_access: | |||
312 | 312 | ||
313 | htab_pte_insert_failure: | 313 | htab_pte_insert_failure: |
314 | /* Bail out restoring old PTE */ | 314 | /* Bail out restoring old PTE */ |
315 | ld r6,STK_PARM(r6)(r1) | 315 | ld r6,STK_PARM(R6)(r1) |
316 | std r31,0(r6) | 316 | std r31,0(r6) |
317 | li r3,-1 | 317 | li r3,-1 |
318 | b htab_bail | 318 | b htab_bail |
@@ -340,9 +340,9 @@ _GLOBAL(__hash_page_4K) | |||
340 | std r0,16(r1) | 340 | std r0,16(r1) |
341 | stdu r1,-STACKFRAMESIZE(r1) | 341 | stdu r1,-STACKFRAMESIZE(r1) |
342 | /* Save all params that we need after a function call */ | 342 | /* Save all params that we need after a function call */ |
343 | std r6,STK_PARM(r6)(r1) | 343 | std r6,STK_PARM(R6)(r1) |
344 | std r8,STK_PARM(r8)(r1) | 344 | std r8,STK_PARM(R8)(r1) |
345 | std r9,STK_PARM(r9)(r1) | 345 | std r9,STK_PARM(R9)(r1) |
346 | 346 | ||
347 | /* Save non-volatile registers. | 347 | /* Save non-volatile registers. |
348 | * r31 will hold "old PTE" | 348 | * r31 will hold "old PTE" |
@@ -353,13 +353,13 @@ _GLOBAL(__hash_page_4K) | |||
353 | * r26 is the hidx mask | 353 | * r26 is the hidx mask |
354 | * r25 is the index in combo page | 354 | * r25 is the index in combo page |
355 | */ | 355 | */ |
356 | std r25,STK_REG(r25)(r1) | 356 | std r25,STK_REG(R25)(r1) |
357 | std r26,STK_REG(r26)(r1) | 357 | std r26,STK_REG(R26)(r1) |
358 | std r27,STK_REG(r27)(r1) | 358 | std r27,STK_REG(R27)(r1) |
359 | std r28,STK_REG(r28)(r1) | 359 | std r28,STK_REG(R28)(r1) |
360 | std r29,STK_REG(r29)(r1) | 360 | std r29,STK_REG(R29)(r1) |
361 | std r30,STK_REG(r30)(r1) | 361 | std r30,STK_REG(R30)(r1) |
362 | std r31,STK_REG(r31)(r1) | 362 | std r31,STK_REG(R31)(r1) |
363 | 363 | ||
364 | /* Step 1: | 364 | /* Step 1: |
365 | * | 365 | * |
@@ -452,7 +452,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | |||
452 | /* At this point, r3 contains new PP bits, save them in | 452 | /* At this point, r3 contains new PP bits, save them in |
453 | * place of "access" in the param area (sic) | 453 | * place of "access" in the param area (sic) |
454 | */ | 454 | */ |
455 | std r3,STK_PARM(r4)(r1) | 455 | std r3,STK_PARM(R4)(r1) |
456 | 456 | ||
457 | /* Get htab_hash_mask */ | 457 | /* Get htab_hash_mask */ |
458 | ld r4,htab_hash_mask@got(2) | 458 | ld r4,htab_hash_mask@got(2) |
@@ -473,7 +473,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | |||
473 | andis. r0,r31,_PAGE_COMBO@h | 473 | andis. r0,r31,_PAGE_COMBO@h |
474 | beq htab_inval_old_hpte | 474 | beq htab_inval_old_hpte |
475 | 475 | ||
476 | ld r6,STK_PARM(r6)(r1) | 476 | ld r6,STK_PARM(R6)(r1) |
477 | ori r26,r6,0x8000 /* Load the hidx mask */ | 477 | ori r26,r6,0x8000 /* Load the hidx mask */ |
478 | ld r26,0(r26) | 478 | ld r26,0(r26) |
479 | addi r5,r25,36 /* Check actual HPTE_SUB bit, this */ | 479 | addi r5,r25,36 /* Check actual HPTE_SUB bit, this */ |
@@ -495,11 +495,11 @@ htab_special_pfn: | |||
495 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | 495 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ |
496 | 496 | ||
497 | /* Call ppc_md.hpte_insert */ | 497 | /* Call ppc_md.hpte_insert */ |
498 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ | 498 | ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ |
499 | mr r4,r29 /* Retrieve va */ | 499 | mr r4,r29 /* Retrieve va */ |
500 | li r7,0 /* !bolted, !secondary */ | 500 | li r7,0 /* !bolted, !secondary */ |
501 | li r8,MMU_PAGE_4K /* page size */ | 501 | li r8,MMU_PAGE_4K /* page size */ |
502 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 502 | ld r9,STK_PARM(R9)(r1) /* segment size */ |
503 | _GLOBAL(htab_call_hpte_insert1) | 503 | _GLOBAL(htab_call_hpte_insert1) |
504 | bl . /* patched by htab_finish_init() */ | 504 | bl . /* patched by htab_finish_init() */ |
505 | cmpdi 0,r3,0 | 505 | cmpdi 0,r3,0 |
@@ -522,11 +522,11 @@ _GLOBAL(htab_call_hpte_insert1) | |||
522 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 522 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
523 | 523 | ||
524 | /* Call ppc_md.hpte_insert */ | 524 | /* Call ppc_md.hpte_insert */ |
525 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ | 525 | ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ |
526 | mr r4,r29 /* Retrieve va */ | 526 | mr r4,r29 /* Retrieve va */ |
527 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 527 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
528 | li r8,MMU_PAGE_4K /* page size */ | 528 | li r8,MMU_PAGE_4K /* page size */ |
529 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 529 | ld r9,STK_PARM(R9)(r1) /* segment size */ |
530 | _GLOBAL(htab_call_hpte_insert2) | 530 | _GLOBAL(htab_call_hpte_insert2) |
531 | bl . /* patched by htab_finish_init() */ | 531 | bl . /* patched by htab_finish_init() */ |
532 | cmpdi 0,r3,0 | 532 | cmpdi 0,r3,0 |
@@ -559,8 +559,8 @@ htab_inval_old_hpte: | |||
559 | mr r4,r31 /* PTE.pte */ | 559 | mr r4,r31 /* PTE.pte */ |
560 | li r5,0 /* PTE.hidx */ | 560 | li r5,0 /* PTE.hidx */ |
561 | li r6,MMU_PAGE_64K /* psize */ | 561 | li r6,MMU_PAGE_64K /* psize */ |
562 | ld r7,STK_PARM(r9)(r1) /* ssize */ | 562 | ld r7,STK_PARM(R9)(r1) /* ssize */ |
563 | ld r8,STK_PARM(r8)(r1) /* local */ | 563 | ld r8,STK_PARM(R8)(r1) /* local */ |
564 | bl .flush_hash_page | 564 | bl .flush_hash_page |
565 | /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ | 565 | /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ |
566 | lis r0,_PAGE_HPTE_SUB@h | 566 | lis r0,_PAGE_HPTE_SUB@h |
@@ -576,7 +576,7 @@ htab_pte_insert_ok: | |||
576 | /* Insert slot number & secondary bit in PTE second half, | 576 | /* Insert slot number & secondary bit in PTE second half, |
577 | * clear _PAGE_BUSY and set approriate HPTE slot bit | 577 | * clear _PAGE_BUSY and set approriate HPTE slot bit |
578 | */ | 578 | */ |
579 | ld r6,STK_PARM(r6)(r1) | 579 | ld r6,STK_PARM(R6)(r1) |
580 | li r0,_PAGE_BUSY | 580 | li r0,_PAGE_BUSY |
581 | andc r30,r30,r0 | 581 | andc r30,r30,r0 |
582 | /* HPTE SUB bit */ | 582 | /* HPTE SUB bit */ |
@@ -597,13 +597,13 @@ htab_pte_insert_ok: | |||
597 | std r30,0(r6) | 597 | std r30,0(r6) |
598 | li r3, 0 | 598 | li r3, 0 |
599 | htab_bail: | 599 | htab_bail: |
600 | ld r25,STK_REG(r25)(r1) | 600 | ld r25,STK_REG(R25)(r1) |
601 | ld r26,STK_REG(r26)(r1) | 601 | ld r26,STK_REG(R26)(r1) |
602 | ld r27,STK_REG(r27)(r1) | 602 | ld r27,STK_REG(R27)(r1) |
603 | ld r28,STK_REG(r28)(r1) | 603 | ld r28,STK_REG(R28)(r1) |
604 | ld r29,STK_REG(r29)(r1) | 604 | ld r29,STK_REG(R29)(r1) |
605 | ld r30,STK_REG(r30)(r1) | 605 | ld r30,STK_REG(R30)(r1) |
606 | ld r31,STK_REG(r31)(r1) | 606 | ld r31,STK_REG(R31)(r1) |
607 | addi r1,r1,STACKFRAMESIZE | 607 | addi r1,r1,STACKFRAMESIZE |
608 | ld r0,16(r1) | 608 | ld r0,16(r1) |
609 | mtlr r0 | 609 | mtlr r0 |
@@ -630,8 +630,8 @@ htab_modify_pte: | |||
630 | /* Call ppc_md.hpte_updatepp */ | 630 | /* Call ppc_md.hpte_updatepp */ |
631 | mr r5,r29 /* va */ | 631 | mr r5,r29 /* va */ |
632 | li r6,MMU_PAGE_4K /* page size */ | 632 | li r6,MMU_PAGE_4K /* page size */ |
633 | ld r7,STK_PARM(r9)(r1) /* segment size */ | 633 | ld r7,STK_PARM(R9)(r1) /* segment size */ |
634 | ld r8,STK_PARM(r8)(r1) /* get "local" param */ | 634 | ld r8,STK_PARM(R8)(r1) /* get "local" param */ |
635 | _GLOBAL(htab_call_hpte_updatepp) | 635 | _GLOBAL(htab_call_hpte_updatepp) |
636 | bl . /* patched by htab_finish_init() */ | 636 | bl . /* patched by htab_finish_init() */ |
637 | 637 | ||
@@ -644,7 +644,7 @@ _GLOBAL(htab_call_hpte_updatepp) | |||
644 | /* Clear the BUSY bit and Write out the PTE */ | 644 | /* Clear the BUSY bit and Write out the PTE */ |
645 | li r0,_PAGE_BUSY | 645 | li r0,_PAGE_BUSY |
646 | andc r30,r30,r0 | 646 | andc r30,r30,r0 |
647 | ld r6,STK_PARM(r6)(r1) | 647 | ld r6,STK_PARM(R6)(r1) |
648 | std r30,0(r6) | 648 | std r30,0(r6) |
649 | li r3,0 | 649 | li r3,0 |
650 | b htab_bail | 650 | b htab_bail |
@@ -657,7 +657,7 @@ htab_wrong_access: | |||
657 | 657 | ||
658 | htab_pte_insert_failure: | 658 | htab_pte_insert_failure: |
659 | /* Bail out restoring old PTE */ | 659 | /* Bail out restoring old PTE */ |
660 | ld r6,STK_PARM(r6)(r1) | 660 | ld r6,STK_PARM(R6)(r1) |
661 | std r31,0(r6) | 661 | std r31,0(r6) |
662 | li r3,-1 | 662 | li r3,-1 |
663 | b htab_bail | 663 | b htab_bail |
@@ -677,9 +677,9 @@ _GLOBAL(__hash_page_64K) | |||
677 | std r0,16(r1) | 677 | std r0,16(r1) |
678 | stdu r1,-STACKFRAMESIZE(r1) | 678 | stdu r1,-STACKFRAMESIZE(r1) |
679 | /* Save all params that we need after a function call */ | 679 | /* Save all params that we need after a function call */ |
680 | std r6,STK_PARM(r6)(r1) | 680 | std r6,STK_PARM(R6)(r1) |
681 | std r8,STK_PARM(r8)(r1) | 681 | std r8,STK_PARM(R8)(r1) |
682 | std r9,STK_PARM(r9)(r1) | 682 | std r9,STK_PARM(R9)(r1) |
683 | 683 | ||
684 | /* Save non-volatile registers. | 684 | /* Save non-volatile registers. |
685 | * r31 will hold "old PTE" | 685 | * r31 will hold "old PTE" |
@@ -688,11 +688,11 @@ _GLOBAL(__hash_page_64K) | |||
688 | * r28 is a hash value | 688 | * r28 is a hash value |
689 | * r27 is hashtab mask (maybe dynamic patched instead ?) | 689 | * r27 is hashtab mask (maybe dynamic patched instead ?) |
690 | */ | 690 | */ |
691 | std r27,STK_REG(r27)(r1) | 691 | std r27,STK_REG(R27)(r1) |
692 | std r28,STK_REG(r28)(r1) | 692 | std r28,STK_REG(R28)(r1) |
693 | std r29,STK_REG(r29)(r1) | 693 | std r29,STK_REG(R29)(r1) |
694 | std r30,STK_REG(r30)(r1) | 694 | std r30,STK_REG(R30)(r1) |
695 | std r31,STK_REG(r31)(r1) | 695 | std r31,STK_REG(R31)(r1) |
696 | 696 | ||
697 | /* Step 1: | 697 | /* Step 1: |
698 | * | 698 | * |
@@ -780,7 +780,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | |||
780 | /* At this point, r3 contains new PP bits, save them in | 780 | /* At this point, r3 contains new PP bits, save them in |
781 | * place of "access" in the param area (sic) | 781 | * place of "access" in the param area (sic) |
782 | */ | 782 | */ |
783 | std r3,STK_PARM(r4)(r1) | 783 | std r3,STK_PARM(R4)(r1) |
784 | 784 | ||
785 | /* Get htab_hash_mask */ | 785 | /* Get htab_hash_mask */ |
786 | ld r4,htab_hash_mask@got(2) | 786 | ld r4,htab_hash_mask@got(2) |
@@ -813,11 +813,11 @@ ht64_insert_pte: | |||
813 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | 813 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ |
814 | 814 | ||
815 | /* Call ppc_md.hpte_insert */ | 815 | /* Call ppc_md.hpte_insert */ |
816 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ | 816 | ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ |
817 | mr r4,r29 /* Retrieve va */ | 817 | mr r4,r29 /* Retrieve va */ |
818 | li r7,0 /* !bolted, !secondary */ | 818 | li r7,0 /* !bolted, !secondary */ |
819 | li r8,MMU_PAGE_64K | 819 | li r8,MMU_PAGE_64K |
820 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 820 | ld r9,STK_PARM(R9)(r1) /* segment size */ |
821 | _GLOBAL(ht64_call_hpte_insert1) | 821 | _GLOBAL(ht64_call_hpte_insert1) |
822 | bl . /* patched by htab_finish_init() */ | 822 | bl . /* patched by htab_finish_init() */ |
823 | cmpdi 0,r3,0 | 823 | cmpdi 0,r3,0 |
@@ -836,11 +836,11 @@ _GLOBAL(ht64_call_hpte_insert1) | |||
836 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 836 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
837 | 837 | ||
838 | /* Call ppc_md.hpte_insert */ | 838 | /* Call ppc_md.hpte_insert */ |
839 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ | 839 | ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ |
840 | mr r4,r29 /* Retrieve va */ | 840 | mr r4,r29 /* Retrieve va */ |
841 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 841 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
842 | li r8,MMU_PAGE_64K | 842 | li r8,MMU_PAGE_64K |
843 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 843 | ld r9,STK_PARM(R9)(r1) /* segment size */ |
844 | _GLOBAL(ht64_call_hpte_insert2) | 844 | _GLOBAL(ht64_call_hpte_insert2) |
845 | bl . /* patched by htab_finish_init() */ | 845 | bl . /* patched by htab_finish_init() */ |
846 | cmpdi 0,r3,0 | 846 | cmpdi 0,r3,0 |
@@ -876,15 +876,15 @@ ht64_pte_insert_ok: | |||
876 | * (maybe add eieio may be good still ?) | 876 | * (maybe add eieio may be good still ?) |
877 | */ | 877 | */ |
878 | ht64_write_out_pte: | 878 | ht64_write_out_pte: |
879 | ld r6,STK_PARM(r6)(r1) | 879 | ld r6,STK_PARM(R6)(r1) |
880 | std r30,0(r6) | 880 | std r30,0(r6) |
881 | li r3, 0 | 881 | li r3, 0 |
882 | ht64_bail: | 882 | ht64_bail: |
883 | ld r27,STK_REG(r27)(r1) | 883 | ld r27,STK_REG(R27)(r1) |
884 | ld r28,STK_REG(r28)(r1) | 884 | ld r28,STK_REG(R28)(r1) |
885 | ld r29,STK_REG(r29)(r1) | 885 | ld r29,STK_REG(R29)(r1) |
886 | ld r30,STK_REG(r30)(r1) | 886 | ld r30,STK_REG(R30)(r1) |
887 | ld r31,STK_REG(r31)(r1) | 887 | ld r31,STK_REG(R31)(r1) |
888 | addi r1,r1,STACKFRAMESIZE | 888 | addi r1,r1,STACKFRAMESIZE |
889 | ld r0,16(r1) | 889 | ld r0,16(r1) |
890 | mtlr r0 | 890 | mtlr r0 |
@@ -909,8 +909,8 @@ ht64_modify_pte: | |||
909 | /* Call ppc_md.hpte_updatepp */ | 909 | /* Call ppc_md.hpte_updatepp */ |
910 | mr r5,r29 /* va */ | 910 | mr r5,r29 /* va */ |
911 | li r6,MMU_PAGE_64K | 911 | li r6,MMU_PAGE_64K |
912 | ld r7,STK_PARM(r9)(r1) /* segment size */ | 912 | ld r7,STK_PARM(R9)(r1) /* segment size */ |
913 | ld r8,STK_PARM(r8)(r1) /* get "local" param */ | 913 | ld r8,STK_PARM(R8)(r1) /* get "local" param */ |
914 | _GLOBAL(ht64_call_hpte_updatepp) | 914 | _GLOBAL(ht64_call_hpte_updatepp) |
915 | bl . /* patched by htab_finish_init() */ | 915 | bl . /* patched by htab_finish_init() */ |
916 | 916 | ||
@@ -933,7 +933,7 @@ ht64_wrong_access: | |||
933 | 933 | ||
934 | ht64_pte_insert_failure: | 934 | ht64_pte_insert_failure: |
935 | /* Bail out restoring old PTE */ | 935 | /* Bail out restoring old PTE */ |
936 | ld r6,STK_PARM(r6)(r1) | 936 | ld r6,STK_PARM(R6)(r1) |
937 | std r31,0(r6) | 937 | std r31,0(r6) |
938 | li r3,-1 | 938 | li r3,-1 |
939 | b ht64_bail | 939 | b ht64_bail |
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index ff672bd8fea9..4b9e2643d21b 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S | |||
@@ -126,7 +126,7 @@ BEGIN_MMU_FTR_SECTION | |||
126 | /* Set the TLB reservation and search for existing entry. Then load | 126 | /* Set the TLB reservation and search for existing entry. Then load |
127 | * the entry. | 127 | * the entry. |
128 | */ | 128 | */ |
129 | PPC_TLBSRX_DOT(0,r16) | 129 | PPC_TLBSRX_DOT(R0,R16) |
130 | ldx r14,r14,r15 /* grab pgd entry */ | 130 | ldx r14,r14,r15 /* grab pgd entry */ |
131 | beq normal_tlb_miss_done /* tlb exists already, bail */ | 131 | beq normal_tlb_miss_done /* tlb exists already, bail */ |
132 | MMU_FTR_SECTION_ELSE | 132 | MMU_FTR_SECTION_ELSE |
@@ -395,7 +395,7 @@ BEGIN_MMU_FTR_SECTION | |||
395 | /* Set the TLB reservation and search for existing entry. Then load | 395 | /* Set the TLB reservation and search for existing entry. Then load |
396 | * the entry. | 396 | * the entry. |
397 | */ | 397 | */ |
398 | PPC_TLBSRX_DOT(0,r16) | 398 | PPC_TLBSRX_DOT(R0,R16) |
399 | ld r14,0(r10) | 399 | ld r14,0(r10) |
400 | beq normal_tlb_miss_done | 400 | beq normal_tlb_miss_done |
401 | MMU_FTR_SECTION_ELSE | 401 | MMU_FTR_SECTION_ELSE |
@@ -528,7 +528,7 @@ BEGIN_MMU_FTR_SECTION | |||
528 | /* Search if we already have a TLB entry for that virtual address, and | 528 | /* Search if we already have a TLB entry for that virtual address, and |
529 | * if we do, bail out. | 529 | * if we do, bail out. |
530 | */ | 530 | */ |
531 | PPC_TLBSRX_DOT(0,r16) | 531 | PPC_TLBSRX_DOT(R0,R16) |
532 | beq virt_page_table_tlb_miss_done | 532 | beq virt_page_table_tlb_miss_done |
533 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) | 533 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) |
534 | 534 | ||
@@ -779,7 +779,7 @@ htw_tlb_miss: | |||
779 | * | 779 | * |
780 | * MAS1:IND should be already set based on MAS4 | 780 | * MAS1:IND should be already set based on MAS4 |
781 | */ | 781 | */ |
782 | PPC_TLBSRX_DOT(0,r16) | 782 | PPC_TLBSRX_DOT(R0,R16) |
783 | beq htw_tlb_miss_done | 783 | beq htw_tlb_miss_done |
784 | 784 | ||
785 | /* Now, we need to walk the page tables. First check if we are in | 785 | /* Now, we need to walk the page tables. First check if we are in |
@@ -919,7 +919,7 @@ tlb_load_linear: | |||
919 | mtspr SPRN_MAS1,r15 | 919 | mtspr SPRN_MAS1,r15 |
920 | 920 | ||
921 | /* Already somebody there ? */ | 921 | /* Already somebody there ? */ |
922 | PPC_TLBSRX_DOT(0,r16) | 922 | PPC_TLBSRX_DOT(R0,R16) |
923 | beq tlb_load_linear_done | 923 | beq tlb_load_linear_done |
924 | 924 | ||
925 | /* Now we build the remaining MAS. MAS0 and 2 should be fine | 925 | /* Now we build the remaining MAS. MAS0 and 2 should be fine |
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index 7c63c0ed4f1b..5a1285a9109f 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S | |||
@@ -313,7 +313,7 @@ BEGIN_MMU_FTR_SECTION | |||
313 | mtspr SPRN_MAS1,r4 | 313 | mtspr SPRN_MAS1,r4 |
314 | tlbwe | 314 | tlbwe |
315 | MMU_FTR_SECTION_ELSE | 315 | MMU_FTR_SECTION_ELSE |
316 | PPC_TLBILX_VA(0,r3) | 316 | PPC_TLBILX_VA(R0,R3) |
317 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) | 317 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) |
318 | msync | 318 | msync |
319 | isync | 319 | isync |
@@ -364,7 +364,7 @@ _GLOBAL(_tlbil_va) | |||
364 | beq 1f | 364 | beq 1f |
365 | rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND | 365 | rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND |
366 | 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ | 366 | 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ |
367 | PPC_TLBILX_VA(0,r3) | 367 | PPC_TLBILX_VA(R0,R3) |
368 | msync | 368 | msync |
369 | isync | 369 | isync |
370 | wrtee r10 | 370 | wrtee r10 |
@@ -379,7 +379,7 @@ _GLOBAL(_tlbivax_bcast) | |||
379 | beq 1f | 379 | beq 1f |
380 | rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND | 380 | rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND |
381 | 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ | 381 | 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ |
382 | PPC_TLBIVAX(0,r3) | 382 | PPC_TLBIVAX(R0,R3) |
383 | eieio | 383 | eieio |
384 | tlbsync | 384 | tlbsync |
385 | sync | 385 | sync |
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 2dc8b1484845..dd1130642d07 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c | |||
@@ -39,7 +39,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, | |||
39 | /* Make stackframe */ | 39 | /* Make stackframe */ |
40 | if (ctx->seen & SEEN_DATAREF) { | 40 | if (ctx->seen & SEEN_DATAREF) { |
41 | /* If we call any helpers (for loads), save LR */ | 41 | /* If we call any helpers (for loads), save LR */ |
42 | EMIT(PPC_INST_MFLR | __PPC_RT(0)); | 42 | EMIT(PPC_INST_MFLR | __PPC_RT(R0)); |
43 | PPC_STD(0, 1, 16); | 43 | PPC_STD(0, 1, 16); |
44 | 44 | ||
45 | /* Back up non-volatile regs. */ | 45 | /* Back up non-volatile regs. */ |
@@ -56,7 +56,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, | |||
56 | PPC_STD(i, 1, -(8*(32-i))); | 56 | PPC_STD(i, 1, -(8*(32-i))); |
57 | } | 57 | } |
58 | } | 58 | } |
59 | EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) | | 59 | EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | |
60 | (-BPF_PPC_STACKFRAME & 0xfffc)); | 60 | (-BPF_PPC_STACKFRAME & 0xfffc)); |
61 | } | 61 | } |
62 | 62 | ||
diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S index 74c817448948..079165eff3a2 100644 --- a/arch/powerpc/platforms/cell/beat_hvCall.S +++ b/arch/powerpc/platforms/cell/beat_hvCall.S | |||
@@ -74,7 +74,7 @@ _GLOBAL(beat_hcall_norets8) | |||
74 | mr r6,r7 | 74 | mr r6,r7 |
75 | mr r7,r8 | 75 | mr r7,r8 |
76 | mr r8,r9 | 76 | mr r8,r9 |
77 | ld r10,STK_PARM(r10)(r1) | 77 | ld r10,STK_PARM(R10)(r1) |
78 | 78 | ||
79 | HVSC /* invoke the hypervisor */ | 79 | HVSC /* invoke the hypervisor */ |
80 | 80 | ||
@@ -94,7 +94,7 @@ _GLOBAL(beat_hcall1) | |||
94 | 94 | ||
95 | HCALL_INST_PRECALL | 95 | HCALL_INST_PRECALL |
96 | 96 | ||
97 | std r4,STK_PARM(r4)(r1) /* save ret buffer */ | 97 | std r4,STK_PARM(R4)(r1) /* save ret buffer */ |
98 | 98 | ||
99 | mr r11,r3 | 99 | mr r11,r3 |
100 | mr r3,r5 | 100 | mr r3,r5 |
@@ -108,7 +108,7 @@ _GLOBAL(beat_hcall1) | |||
108 | 108 | ||
109 | HCALL_INST_POSTCALL | 109 | HCALL_INST_POSTCALL |
110 | 110 | ||
111 | ld r12,STK_PARM(r4)(r1) | 111 | ld r12,STK_PARM(R4)(r1) |
112 | std r4, 0(r12) | 112 | std r4, 0(r12) |
113 | 113 | ||
114 | lwz r0,8(r1) | 114 | lwz r0,8(r1) |
@@ -125,7 +125,7 @@ _GLOBAL(beat_hcall2) | |||
125 | 125 | ||
126 | HCALL_INST_PRECALL | 126 | HCALL_INST_PRECALL |
127 | 127 | ||
128 | std r4,STK_PARM(r4)(r1) /* save ret buffer */ | 128 | std r4,STK_PARM(R4)(r1) /* save ret buffer */ |
129 | 129 | ||
130 | mr r11,r3 | 130 | mr r11,r3 |
131 | mr r3,r5 | 131 | mr r3,r5 |
@@ -139,7 +139,7 @@ _GLOBAL(beat_hcall2) | |||
139 | 139 | ||
140 | HCALL_INST_POSTCALL | 140 | HCALL_INST_POSTCALL |
141 | 141 | ||
142 | ld r12,STK_PARM(r4)(r1) | 142 | ld r12,STK_PARM(R4)(r1) |
143 | std r4, 0(r12) | 143 | std r4, 0(r12) |
144 | std r5, 8(r12) | 144 | std r5, 8(r12) |
145 | 145 | ||
@@ -157,7 +157,7 @@ _GLOBAL(beat_hcall3) | |||
157 | 157 | ||
158 | HCALL_INST_PRECALL | 158 | HCALL_INST_PRECALL |
159 | 159 | ||
160 | std r4,STK_PARM(r4)(r1) /* save ret buffer */ | 160 | std r4,STK_PARM(R4)(r1) /* save ret buffer */ |
161 | 161 | ||
162 | mr r11,r3 | 162 | mr r11,r3 |
163 | mr r3,r5 | 163 | mr r3,r5 |
@@ -171,7 +171,7 @@ _GLOBAL(beat_hcall3) | |||
171 | 171 | ||
172 | HCALL_INST_POSTCALL | 172 | HCALL_INST_POSTCALL |
173 | 173 | ||
174 | ld r12,STK_PARM(r4)(r1) | 174 | ld r12,STK_PARM(R4)(r1) |
175 | std r4, 0(r12) | 175 | std r4, 0(r12) |
176 | std r5, 8(r12) | 176 | std r5, 8(r12) |
177 | std r6, 16(r12) | 177 | std r6, 16(r12) |
@@ -190,7 +190,7 @@ _GLOBAL(beat_hcall4) | |||
190 | 190 | ||
191 | HCALL_INST_PRECALL | 191 | HCALL_INST_PRECALL |
192 | 192 | ||
193 | std r4,STK_PARM(r4)(r1) /* save ret buffer */ | 193 | std r4,STK_PARM(R4)(r1) /* save ret buffer */ |
194 | 194 | ||
195 | mr r11,r3 | 195 | mr r11,r3 |
196 | mr r3,r5 | 196 | mr r3,r5 |
@@ -204,7 +204,7 @@ _GLOBAL(beat_hcall4) | |||
204 | 204 | ||
205 | HCALL_INST_POSTCALL | 205 | HCALL_INST_POSTCALL |
206 | 206 | ||
207 | ld r12,STK_PARM(r4)(r1) | 207 | ld r12,STK_PARM(R4)(r1) |
208 | std r4, 0(r12) | 208 | std r4, 0(r12) |
209 | std r5, 8(r12) | 209 | std r5, 8(r12) |
210 | std r6, 16(r12) | 210 | std r6, 16(r12) |
@@ -224,7 +224,7 @@ _GLOBAL(beat_hcall5) | |||
224 | 224 | ||
225 | HCALL_INST_PRECALL | 225 | HCALL_INST_PRECALL |
226 | 226 | ||
227 | std r4,STK_PARM(r4)(r1) /* save ret buffer */ | 227 | std r4,STK_PARM(R4)(r1) /* save ret buffer */ |
228 | 228 | ||
229 | mr r11,r3 | 229 | mr r11,r3 |
230 | mr r3,r5 | 230 | mr r3,r5 |
@@ -238,7 +238,7 @@ _GLOBAL(beat_hcall5) | |||
238 | 238 | ||
239 | HCALL_INST_POSTCALL | 239 | HCALL_INST_POSTCALL |
240 | 240 | ||
241 | ld r12,STK_PARM(r4)(r1) | 241 | ld r12,STK_PARM(R4)(r1) |
242 | std r4, 0(r12) | 242 | std r4, 0(r12) |
243 | std r5, 8(r12) | 243 | std r5, 8(r12) |
244 | std r6, 16(r12) | 244 | std r6, 16(r12) |
@@ -259,7 +259,7 @@ _GLOBAL(beat_hcall6) | |||
259 | 259 | ||
260 | HCALL_INST_PRECALL | 260 | HCALL_INST_PRECALL |
261 | 261 | ||
262 | std r4,STK_PARM(r4)(r1) /* save ret buffer */ | 262 | std r4,STK_PARM(R4)(r1) /* save ret buffer */ |
263 | 263 | ||
264 | mr r11,r3 | 264 | mr r11,r3 |
265 | mr r3,r5 | 265 | mr r3,r5 |
@@ -273,7 +273,7 @@ _GLOBAL(beat_hcall6) | |||
273 | 273 | ||
274 | HCALL_INST_POSTCALL | 274 | HCALL_INST_POSTCALL |
275 | 275 | ||
276 | ld r12,STK_PARM(r4)(r1) | 276 | ld r12,STK_PARM(R4)(r1) |
277 | std r4, 0(r12) | 277 | std r4, 0(r12) |
278 | std r5, 8(r12) | 278 | std r5, 8(r12) |
279 | std r6, 16(r12) | 279 | std r6, 16(r12) |
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S index 77b48b2b9309..1bb7768abe4c 100644 --- a/arch/powerpc/platforms/powernv/opal-takeover.S +++ b/arch/powerpc/platforms/powernv/opal-takeover.S | |||
@@ -23,14 +23,14 @@ | |||
23 | _GLOBAL(opal_query_takeover) | 23 | _GLOBAL(opal_query_takeover) |
24 | mfcr r0 | 24 | mfcr r0 |
25 | stw r0,8(r1) | 25 | stw r0,8(r1) |
26 | std r3,STK_PARAM(r3)(r1) | 26 | std r3,STK_PARAM(R3)(r1) |
27 | std r4,STK_PARAM(r4)(r1) | 27 | std r4,STK_PARAM(R4)(r1) |
28 | li r3,H_HAL_TAKEOVER | 28 | li r3,H_HAL_TAKEOVER |
29 | li r4,H_HAL_TAKEOVER_QUERY_MAGIC | 29 | li r4,H_HAL_TAKEOVER_QUERY_MAGIC |
30 | HVSC | 30 | HVSC |
31 | ld r10,STK_PARAM(r3)(r1) | 31 | ld r10,STK_PARAM(R3)(r1) |
32 | std r4,0(r10) | 32 | std r4,0(r10) |
33 | ld r10,STK_PARAM(r4)(r1) | 33 | ld r10,STK_PARAM(R4)(r1) |
34 | std r5,0(r10) | 34 | std r5,0(r10) |
35 | lwz r0,8(r1) | 35 | lwz r0,8(r1) |
36 | mtcrf 0xff,r0 | 36 | mtcrf 0xff,r0 |
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 3bb07e5e43cd..afcddec5d3a1 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S | |||
@@ -32,7 +32,7 @@ | |||
32 | std r12,PACASAVEDMSR(r13); \ | 32 | std r12,PACASAVEDMSR(r13); \ |
33 | andc r12,r12,r0; \ | 33 | andc r12,r12,r0; \ |
34 | mtmsrd r12,1; \ | 34 | mtmsrd r12,1; \ |
35 | LOAD_REG_ADDR(r0,.opal_return); \ | 35 | LOAD_REG_ADDR(R0,.opal_return); \ |
36 | mtlr r0; \ | 36 | mtlr r0; \ |
37 | li r0,MSR_DR|MSR_IR; \ | 37 | li r0,MSR_DR|MSR_IR; \ |
38 | andc r12,r12,r0; \ | 38 | andc r12,r12,r0; \ |
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 3ce73d0052b1..04b8efdee5b7 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S | |||
@@ -40,28 +40,28 @@ END_FTR_SECTION(0, 1); \ | |||
40 | cmpdi r12,0; \ | 40 | cmpdi r12,0; \ |
41 | beq+ 1f; \ | 41 | beq+ 1f; \ |
42 | mflr r0; \ | 42 | mflr r0; \ |
43 | std r3,STK_PARM(r3)(r1); \ | 43 | std r3,STK_PARM(R3)(r1); \ |
44 | std r4,STK_PARM(r4)(r1); \ | 44 | std r4,STK_PARM(R4)(r1); \ |
45 | std r5,STK_PARM(r5)(r1); \ | 45 | std r5,STK_PARM(R5)(r1); \ |
46 | std r6,STK_PARM(r6)(r1); \ | 46 | std r6,STK_PARM(R6)(r1); \ |
47 | std r7,STK_PARM(r7)(r1); \ | 47 | std r7,STK_PARM(R7)(r1); \ |
48 | std r8,STK_PARM(r8)(r1); \ | 48 | std r8,STK_PARM(R8)(r1); \ |
49 | std r9,STK_PARM(r9)(r1); \ | 49 | std r9,STK_PARM(R9)(r1); \ |
50 | std r10,STK_PARM(r10)(r1); \ | 50 | std r10,STK_PARM(R10)(r1); \ |
51 | std r0,16(r1); \ | 51 | std r0,16(r1); \ |
52 | addi r4,r1,STK_PARM(FIRST_REG); \ | 52 | addi r4,r1,STK_PARM(FIRST_REG); \ |
53 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ | 53 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ |
54 | bl .__trace_hcall_entry; \ | 54 | bl .__trace_hcall_entry; \ |
55 | addi r1,r1,STACK_FRAME_OVERHEAD; \ | 55 | addi r1,r1,STACK_FRAME_OVERHEAD; \ |
56 | ld r0,16(r1); \ | 56 | ld r0,16(r1); \ |
57 | ld r3,STK_PARM(r3)(r1); \ | 57 | ld r3,STK_PARM(R3)(r1); \ |
58 | ld r4,STK_PARM(r4)(r1); \ | 58 | ld r4,STK_PARM(R4)(r1); \ |
59 | ld r5,STK_PARM(r5)(r1); \ | 59 | ld r5,STK_PARM(R5)(r1); \ |
60 | ld r6,STK_PARM(r6)(r1); \ | 60 | ld r6,STK_PARM(R6)(r1); \ |
61 | ld r7,STK_PARM(r7)(r1); \ | 61 | ld r7,STK_PARM(R7)(r1); \ |
62 | ld r8,STK_PARM(r8)(r1); \ | 62 | ld r8,STK_PARM(R8)(r1); \ |
63 | ld r9,STK_PARM(r9)(r1); \ | 63 | ld r9,STK_PARM(R9)(r1); \ |
64 | ld r10,STK_PARM(r10)(r1); \ | 64 | ld r10,STK_PARM(R10)(r1); \ |
65 | mtlr r0; \ | 65 | mtlr r0; \ |
66 | 1: | 66 | 1: |
67 | 67 | ||
@@ -79,8 +79,8 @@ END_FTR_SECTION(0, 1); \ | |||
79 | cmpdi r12,0; \ | 79 | cmpdi r12,0; \ |
80 | beq+ 1f; \ | 80 | beq+ 1f; \ |
81 | mflr r0; \ | 81 | mflr r0; \ |
82 | ld r6,STK_PARM(r3)(r1); \ | 82 | ld r6,STK_PARM(R3)(r1); \ |
83 | std r3,STK_PARM(r3)(r1); \ | 83 | std r3,STK_PARM(R3)(r1); \ |
84 | mr r4,r3; \ | 84 | mr r4,r3; \ |
85 | mr r3,r6; \ | 85 | mr r3,r6; \ |
86 | std r0,16(r1); \ | 86 | std r0,16(r1); \ |
@@ -88,7 +88,7 @@ END_FTR_SECTION(0, 1); \ | |||
88 | bl .__trace_hcall_exit; \ | 88 | bl .__trace_hcall_exit; \ |
89 | addi r1,r1,STACK_FRAME_OVERHEAD; \ | 89 | addi r1,r1,STACK_FRAME_OVERHEAD; \ |
90 | ld r0,16(r1); \ | 90 | ld r0,16(r1); \ |
91 | ld r3,STK_PARM(r3)(r1); \ | 91 | ld r3,STK_PARM(R3)(r1); \ |
92 | mtlr r0; \ | 92 | mtlr r0; \ |
93 | 1: | 93 | 1: |
94 | 94 | ||
@@ -114,7 +114,7 @@ _GLOBAL(plpar_hcall_norets) | |||
114 | mfcr r0 | 114 | mfcr r0 |
115 | stw r0,8(r1) | 115 | stw r0,8(r1) |
116 | 116 | ||
117 | HCALL_INST_PRECALL(r4) | 117 | HCALL_INST_PRECALL(R4) |
118 | 118 | ||
119 | HVSC /* invoke the hypervisor */ | 119 | HVSC /* invoke the hypervisor */ |
120 | 120 | ||
@@ -130,9 +130,9 @@ _GLOBAL(plpar_hcall) | |||
130 | mfcr r0 | 130 | mfcr r0 |
131 | stw r0,8(r1) | 131 | stw r0,8(r1) |
132 | 132 | ||
133 | HCALL_INST_PRECALL(r5) | 133 | HCALL_INST_PRECALL(R5) |
134 | 134 | ||
135 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 135 | std r4,STK_PARM(R4)(r1) /* Save ret buffer */ |
136 | 136 | ||
137 | mr r4,r5 | 137 | mr r4,r5 |
138 | mr r5,r6 | 138 | mr r5,r6 |
@@ -143,7 +143,7 @@ _GLOBAL(plpar_hcall) | |||
143 | 143 | ||
144 | HVSC /* invoke the hypervisor */ | 144 | HVSC /* invoke the hypervisor */ |
145 | 145 | ||
146 | ld r12,STK_PARM(r4)(r1) | 146 | ld r12,STK_PARM(R4)(r1) |
147 | std r4, 0(r12) | 147 | std r4, 0(r12) |
148 | std r5, 8(r12) | 148 | std r5, 8(r12) |
149 | std r6, 16(r12) | 149 | std r6, 16(r12) |
@@ -168,7 +168,7 @@ _GLOBAL(plpar_hcall_raw) | |||
168 | mfcr r0 | 168 | mfcr r0 |
169 | stw r0,8(r1) | 169 | stw r0,8(r1) |
170 | 170 | ||
171 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 171 | std r4,STK_PARM(R4)(r1) /* Save ret buffer */ |
172 | 172 | ||
173 | mr r4,r5 | 173 | mr r4,r5 |
174 | mr r5,r6 | 174 | mr r5,r6 |
@@ -179,7 +179,7 @@ _GLOBAL(plpar_hcall_raw) | |||
179 | 179 | ||
180 | HVSC /* invoke the hypervisor */ | 180 | HVSC /* invoke the hypervisor */ |
181 | 181 | ||
182 | ld r12,STK_PARM(r4)(r1) | 182 | ld r12,STK_PARM(R4)(r1) |
183 | std r4, 0(r12) | 183 | std r4, 0(r12) |
184 | std r5, 8(r12) | 184 | std r5, 8(r12) |
185 | std r6, 16(r12) | 185 | std r6, 16(r12) |
@@ -196,9 +196,9 @@ _GLOBAL(plpar_hcall9) | |||
196 | mfcr r0 | 196 | mfcr r0 |
197 | stw r0,8(r1) | 197 | stw r0,8(r1) |
198 | 198 | ||
199 | HCALL_INST_PRECALL(r5) | 199 | HCALL_INST_PRECALL(R5) |
200 | 200 | ||
201 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 201 | std r4,STK_PARM(R4)(r1) /* Save ret buffer */ |
202 | 202 | ||
203 | mr r4,r5 | 203 | mr r4,r5 |
204 | mr r5,r6 | 204 | mr r5,r6 |
@@ -206,14 +206,14 @@ _GLOBAL(plpar_hcall9) | |||
206 | mr r7,r8 | 206 | mr r7,r8 |
207 | mr r8,r9 | 207 | mr r8,r9 |
208 | mr r9,r10 | 208 | mr r9,r10 |
209 | ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ | 209 | ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */ |
210 | ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ | 210 | ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */ |
211 | ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ | 211 | ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */ |
212 | 212 | ||
213 | HVSC /* invoke the hypervisor */ | 213 | HVSC /* invoke the hypervisor */ |
214 | 214 | ||
215 | mr r0,r12 | 215 | mr r0,r12 |
216 | ld r12,STK_PARM(r4)(r1) | 216 | ld r12,STK_PARM(R4)(r1) |
217 | std r4, 0(r12) | 217 | std r4, 0(r12) |
218 | std r5, 8(r12) | 218 | std r5, 8(r12) |
219 | std r6, 16(r12) | 219 | std r6, 16(r12) |
@@ -238,7 +238,7 @@ _GLOBAL(plpar_hcall9_raw) | |||
238 | mfcr r0 | 238 | mfcr r0 |
239 | stw r0,8(r1) | 239 | stw r0,8(r1) |
240 | 240 | ||
241 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 241 | std r4,STK_PARM(R4)(r1) /* Save ret buffer */ |
242 | 242 | ||
243 | mr r4,r5 | 243 | mr r4,r5 |
244 | mr r5,r6 | 244 | mr r5,r6 |
@@ -246,14 +246,14 @@ _GLOBAL(plpar_hcall9_raw) | |||
246 | mr r7,r8 | 246 | mr r7,r8 |
247 | mr r8,r9 | 247 | mr r8,r9 |
248 | mr r9,r10 | 248 | mr r9,r10 |
249 | ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ | 249 | ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */ |
250 | ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ | 250 | ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */ |
251 | ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ | 251 | ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */ |
252 | 252 | ||
253 | HVSC /* invoke the hypervisor */ | 253 | HVSC /* invoke the hypervisor */ |
254 | 254 | ||
255 | mr r0,r12 | 255 | mr r0,r12 |
256 | ld r12,STK_PARM(r4)(r1) | 256 | ld r12,STK_PARM(R4)(r1) |
257 | std r4, 0(r12) | 257 | std r4, 0(r12) |
258 | std r5, 8(r12) | 258 | std r5, 8(r12) |
259 | std r6, 16(r12) | 259 | std r6, 16(r12) |