diff options
author | Michael Neuling <mikey@neuling.org> | 2012-06-25 09:33:10 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-07-10 05:17:55 -0400 |
commit | c75df6f96c59beed8632e3aced5fb4faabaa6c5b (patch) | |
tree | b21ce9394028ec4520a71d87391dad8ab29edd67 /arch/powerpc/kvm/book3s_hv_rmhandlers.S | |
parent | 564aa5cfd3e33ef69a5ca6c170a0fe79c6805e52 (diff) |
powerpc: Fix usage of register macros getting ready for %r0 change
Anything that uses a constructed instruction (ie. from ppc-opcode.h),
need to use the new R0 macro, as %r0 is not going to work.
Also convert usages of macros where we are just determining an offset
(usually for a load/store), like:
std r14,STK_REG(r14)(r1)
Can't use STK_REG(r14) as %r14 doesn't work in the STK_REG macro since
it's just calculating an offset.
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rmhandlers.S')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 220 |
1 files changed, 110 insertions, 110 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a1044f43becd..bc99015030c3 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
206 | /* Load up FP, VMX and VSX registers */ | 206 | /* Load up FP, VMX and VSX registers */ |
207 | bl kvmppc_load_fp | 207 | bl kvmppc_load_fp |
208 | 208 | ||
209 | ld r14, VCPU_GPR(r14)(r4) | 209 | ld r14, VCPU_GPR(R14)(r4) |
210 | ld r15, VCPU_GPR(r15)(r4) | 210 | ld r15, VCPU_GPR(R15)(r4) |
211 | ld r16, VCPU_GPR(r16)(r4) | 211 | ld r16, VCPU_GPR(R16)(r4) |
212 | ld r17, VCPU_GPR(r17)(r4) | 212 | ld r17, VCPU_GPR(R17)(r4) |
213 | ld r18, VCPU_GPR(r18)(r4) | 213 | ld r18, VCPU_GPR(R18)(r4) |
214 | ld r19, VCPU_GPR(r19)(r4) | 214 | ld r19, VCPU_GPR(R19)(r4) |
215 | ld r20, VCPU_GPR(r20)(r4) | 215 | ld r20, VCPU_GPR(R20)(r4) |
216 | ld r21, VCPU_GPR(r21)(r4) | 216 | ld r21, VCPU_GPR(R21)(r4) |
217 | ld r22, VCPU_GPR(r22)(r4) | 217 | ld r22, VCPU_GPR(R22)(r4) |
218 | ld r23, VCPU_GPR(r23)(r4) | 218 | ld r23, VCPU_GPR(R23)(r4) |
219 | ld r24, VCPU_GPR(r24)(r4) | 219 | ld r24, VCPU_GPR(R24)(r4) |
220 | ld r25, VCPU_GPR(r25)(r4) | 220 | ld r25, VCPU_GPR(R25)(r4) |
221 | ld r26, VCPU_GPR(r26)(r4) | 221 | ld r26, VCPU_GPR(R26)(r4) |
222 | ld r27, VCPU_GPR(r27)(r4) | 222 | ld r27, VCPU_GPR(R27)(r4) |
223 | ld r28, VCPU_GPR(r28)(r4) | 223 | ld r28, VCPU_GPR(R28)(r4) |
224 | ld r29, VCPU_GPR(r29)(r4) | 224 | ld r29, VCPU_GPR(R29)(r4) |
225 | ld r30, VCPU_GPR(r30)(r4) | 225 | ld r30, VCPU_GPR(R30)(r4) |
226 | ld r31, VCPU_GPR(r31)(r4) | 226 | ld r31, VCPU_GPR(R31)(r4) |
227 | 227 | ||
228 | BEGIN_FTR_SECTION | 228 | BEGIN_FTR_SECTION |
229 | /* Switch DSCR to guest value */ | 229 | /* Switch DSCR to guest value */ |
@@ -547,21 +547,21 @@ fast_guest_return: | |||
547 | mtlr r5 | 547 | mtlr r5 |
548 | mtcr r6 | 548 | mtcr r6 |
549 | 549 | ||
550 | ld r0, VCPU_GPR(r0)(r4) | 550 | ld r0, VCPU_GPR(R0)(r4) |
551 | ld r1, VCPU_GPR(r1)(r4) | 551 | ld r1, VCPU_GPR(R1)(r4) |
552 | ld r2, VCPU_GPR(r2)(r4) | 552 | ld r2, VCPU_GPR(R2)(r4) |
553 | ld r3, VCPU_GPR(r3)(r4) | 553 | ld r3, VCPU_GPR(R3)(r4) |
554 | ld r5, VCPU_GPR(r5)(r4) | 554 | ld r5, VCPU_GPR(R5)(r4) |
555 | ld r6, VCPU_GPR(r6)(r4) | 555 | ld r6, VCPU_GPR(R6)(r4) |
556 | ld r7, VCPU_GPR(r7)(r4) | 556 | ld r7, VCPU_GPR(R7)(r4) |
557 | ld r8, VCPU_GPR(r8)(r4) | 557 | ld r8, VCPU_GPR(R8)(r4) |
558 | ld r9, VCPU_GPR(r9)(r4) | 558 | ld r9, VCPU_GPR(R9)(r4) |
559 | ld r10, VCPU_GPR(r10)(r4) | 559 | ld r10, VCPU_GPR(R10)(r4) |
560 | ld r11, VCPU_GPR(r11)(r4) | 560 | ld r11, VCPU_GPR(R11)(r4) |
561 | ld r12, VCPU_GPR(r12)(r4) | 561 | ld r12, VCPU_GPR(R12)(r4) |
562 | ld r13, VCPU_GPR(r13)(r4) | 562 | ld r13, VCPU_GPR(R13)(r4) |
563 | 563 | ||
564 | ld r4, VCPU_GPR(r4)(r4) | 564 | ld r4, VCPU_GPR(R4)(r4) |
565 | 565 | ||
566 | hrfid | 566 | hrfid |
567 | b . | 567 | b . |
@@ -590,22 +590,22 @@ kvmppc_interrupt: | |||
590 | 590 | ||
591 | /* Save registers */ | 591 | /* Save registers */ |
592 | 592 | ||
593 | std r0, VCPU_GPR(r0)(r9) | 593 | std r0, VCPU_GPR(R0)(r9) |
594 | std r1, VCPU_GPR(r1)(r9) | 594 | std r1, VCPU_GPR(R1)(r9) |
595 | std r2, VCPU_GPR(r2)(r9) | 595 | std r2, VCPU_GPR(R2)(r9) |
596 | std r3, VCPU_GPR(r3)(r9) | 596 | std r3, VCPU_GPR(R3)(r9) |
597 | std r4, VCPU_GPR(r4)(r9) | 597 | std r4, VCPU_GPR(R4)(r9) |
598 | std r5, VCPU_GPR(r5)(r9) | 598 | std r5, VCPU_GPR(R5)(r9) |
599 | std r6, VCPU_GPR(r6)(r9) | 599 | std r6, VCPU_GPR(R6)(r9) |
600 | std r7, VCPU_GPR(r7)(r9) | 600 | std r7, VCPU_GPR(R7)(r9) |
601 | std r8, VCPU_GPR(r8)(r9) | 601 | std r8, VCPU_GPR(R8)(r9) |
602 | ld r0, HSTATE_HOST_R2(r13) | 602 | ld r0, HSTATE_HOST_R2(r13) |
603 | std r0, VCPU_GPR(r9)(r9) | 603 | std r0, VCPU_GPR(R9)(r9) |
604 | std r10, VCPU_GPR(r10)(r9) | 604 | std r10, VCPU_GPR(R10)(r9) |
605 | std r11, VCPU_GPR(r11)(r9) | 605 | std r11, VCPU_GPR(R11)(r9) |
606 | ld r3, HSTATE_SCRATCH0(r13) | 606 | ld r3, HSTATE_SCRATCH0(r13) |
607 | lwz r4, HSTATE_SCRATCH1(r13) | 607 | lwz r4, HSTATE_SCRATCH1(r13) |
608 | std r3, VCPU_GPR(r12)(r9) | 608 | std r3, VCPU_GPR(R12)(r9) |
609 | stw r4, VCPU_CR(r9) | 609 | stw r4, VCPU_CR(r9) |
610 | 610 | ||
611 | /* Restore R1/R2 so we can handle faults */ | 611 | /* Restore R1/R2 so we can handle faults */ |
@@ -626,7 +626,7 @@ kvmppc_interrupt: | |||
626 | 626 | ||
627 | GET_SCRATCH0(r3) | 627 | GET_SCRATCH0(r3) |
628 | mflr r4 | 628 | mflr r4 |
629 | std r3, VCPU_GPR(r13)(r9) | 629 | std r3, VCPU_GPR(R13)(r9) |
630 | std r4, VCPU_LR(r9) | 630 | std r4, VCPU_LR(r9) |
631 | 631 | ||
632 | /* Unset guest mode */ | 632 | /* Unset guest mode */ |
@@ -968,24 +968,24 @@ BEGIN_FTR_SECTION | |||
968 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 968 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
969 | 969 | ||
970 | /* Save non-volatile GPRs */ | 970 | /* Save non-volatile GPRs */ |
971 | std r14, VCPU_GPR(r14)(r9) | 971 | std r14, VCPU_GPR(R14)(r9) |
972 | std r15, VCPU_GPR(r15)(r9) | 972 | std r15, VCPU_GPR(R15)(r9) |
973 | std r16, VCPU_GPR(r16)(r9) | 973 | std r16, VCPU_GPR(R16)(r9) |
974 | std r17, VCPU_GPR(r17)(r9) | 974 | std r17, VCPU_GPR(R17)(r9) |
975 | std r18, VCPU_GPR(r18)(r9) | 975 | std r18, VCPU_GPR(R18)(r9) |
976 | std r19, VCPU_GPR(r19)(r9) | 976 | std r19, VCPU_GPR(R19)(r9) |
977 | std r20, VCPU_GPR(r20)(r9) | 977 | std r20, VCPU_GPR(R20)(r9) |
978 | std r21, VCPU_GPR(r21)(r9) | 978 | std r21, VCPU_GPR(R21)(r9) |
979 | std r22, VCPU_GPR(r22)(r9) | 979 | std r22, VCPU_GPR(R22)(r9) |
980 | std r23, VCPU_GPR(r23)(r9) | 980 | std r23, VCPU_GPR(R23)(r9) |
981 | std r24, VCPU_GPR(r24)(r9) | 981 | std r24, VCPU_GPR(R24)(r9) |
982 | std r25, VCPU_GPR(r25)(r9) | 982 | std r25, VCPU_GPR(R25)(r9) |
983 | std r26, VCPU_GPR(r26)(r9) | 983 | std r26, VCPU_GPR(R26)(r9) |
984 | std r27, VCPU_GPR(r27)(r9) | 984 | std r27, VCPU_GPR(R27)(r9) |
985 | std r28, VCPU_GPR(r28)(r9) | 985 | std r28, VCPU_GPR(R28)(r9) |
986 | std r29, VCPU_GPR(r29)(r9) | 986 | std r29, VCPU_GPR(R29)(r9) |
987 | std r30, VCPU_GPR(r30)(r9) | 987 | std r30, VCPU_GPR(R30)(r9) |
988 | std r31, VCPU_GPR(r31)(r9) | 988 | std r31, VCPU_GPR(R31)(r9) |
989 | 989 | ||
990 | /* Save SPRGs */ | 990 | /* Save SPRGs */ |
991 | mfspr r3, SPRN_SPRG0 | 991 | mfspr r3, SPRN_SPRG0 |
@@ -1160,7 +1160,7 @@ kvmppc_hdsi: | |||
1160 | andi. r0, r11, MSR_DR /* data relocation enabled? */ | 1160 | andi. r0, r11, MSR_DR /* data relocation enabled? */ |
1161 | beq 3f | 1161 | beq 3f |
1162 | clrrdi r0, r4, 28 | 1162 | clrrdi r0, r4, 28 |
1163 | PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ | 1163 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
1164 | bne 1f /* if no SLB entry found */ | 1164 | bne 1f /* if no SLB entry found */ |
1165 | 4: std r4, VCPU_FAULT_DAR(r9) | 1165 | 4: std r4, VCPU_FAULT_DAR(r9) |
1166 | stw r6, VCPU_FAULT_DSISR(r9) | 1166 | stw r6, VCPU_FAULT_DSISR(r9) |
@@ -1234,7 +1234,7 @@ kvmppc_hisi: | |||
1234 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ | 1234 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ |
1235 | beq 3f | 1235 | beq 3f |
1236 | clrrdi r0, r10, 28 | 1236 | clrrdi r0, r10, 28 |
1237 | PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ | 1237 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
1238 | bne 1f /* if no SLB entry found */ | 1238 | bne 1f /* if no SLB entry found */ |
1239 | 4: | 1239 | 4: |
1240 | /* Search the hash table. */ | 1240 | /* Search the hash table. */ |
@@ -1278,7 +1278,7 @@ kvmppc_hisi: | |||
1278 | */ | 1278 | */ |
1279 | .globl hcall_try_real_mode | 1279 | .globl hcall_try_real_mode |
1280 | hcall_try_real_mode: | 1280 | hcall_try_real_mode: |
1281 | ld r3,VCPU_GPR(r3)(r9) | 1281 | ld r3,VCPU_GPR(R3)(r9) |
1282 | andi. r0,r11,MSR_PR | 1282 | andi. r0,r11,MSR_PR |
1283 | bne hcall_real_cont | 1283 | bne hcall_real_cont |
1284 | clrrdi r3,r3,2 | 1284 | clrrdi r3,r3,2 |
@@ -1291,12 +1291,12 @@ hcall_try_real_mode: | |||
1291 | add r3,r3,r4 | 1291 | add r3,r3,r4 |
1292 | mtctr r3 | 1292 | mtctr r3 |
1293 | mr r3,r9 /* get vcpu pointer */ | 1293 | mr r3,r9 /* get vcpu pointer */ |
1294 | ld r4,VCPU_GPR(r4)(r9) | 1294 | ld r4,VCPU_GPR(R4)(r9) |
1295 | bctrl | 1295 | bctrl |
1296 | cmpdi r3,H_TOO_HARD | 1296 | cmpdi r3,H_TOO_HARD |
1297 | beq hcall_real_fallback | 1297 | beq hcall_real_fallback |
1298 | ld r4,HSTATE_KVM_VCPU(r13) | 1298 | ld r4,HSTATE_KVM_VCPU(r13) |
1299 | std r3,VCPU_GPR(r3)(r4) | 1299 | std r3,VCPU_GPR(R3)(r4) |
1300 | ld r10,VCPU_PC(r4) | 1300 | ld r10,VCPU_PC(r4) |
1301 | ld r11,VCPU_MSR(r4) | 1301 | ld r11,VCPU_MSR(r4) |
1302 | b fast_guest_return | 1302 | b fast_guest_return |
@@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede) | |||
1424 | li r0,0 /* set trap to 0 to say hcall is handled */ | 1424 | li r0,0 /* set trap to 0 to say hcall is handled */ |
1425 | stw r0,VCPU_TRAP(r3) | 1425 | stw r0,VCPU_TRAP(r3) |
1426 | li r0,H_SUCCESS | 1426 | li r0,H_SUCCESS |
1427 | std r0,VCPU_GPR(r3)(r3) | 1427 | std r0,VCPU_GPR(R3)(r3) |
1428 | BEGIN_FTR_SECTION | 1428 | BEGIN_FTR_SECTION |
1429 | b 2f /* just send it up to host on 970 */ | 1429 | b 2f /* just send it up to host on 970 */ |
1430 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | 1430 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) |
@@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1443 | addi r6,r5,VCORE_NAPPING_THREADS | 1443 | addi r6,r5,VCORE_NAPPING_THREADS |
1444 | 31: lwarx r4,0,r6 | 1444 | 31: lwarx r4,0,r6 |
1445 | or r4,r4,r0 | 1445 | or r4,r4,r0 |
1446 | PPC_POPCNTW(r7,r4) | 1446 | PPC_POPCNTW(R7,R4) |
1447 | cmpw r7,r8 | 1447 | cmpw r7,r8 |
1448 | bge 2f | 1448 | bge 2f |
1449 | stwcx. r4,0,r6 | 1449 | stwcx. r4,0,r6 |
@@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1464 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. | 1464 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. |
1465 | */ | 1465 | */ |
1466 | /* Save non-volatile GPRs */ | 1466 | /* Save non-volatile GPRs */ |
1467 | std r14, VCPU_GPR(r14)(r3) | 1467 | std r14, VCPU_GPR(R14)(r3) |
1468 | std r15, VCPU_GPR(r15)(r3) | 1468 | std r15, VCPU_GPR(R15)(r3) |
1469 | std r16, VCPU_GPR(r16)(r3) | 1469 | std r16, VCPU_GPR(R16)(r3) |
1470 | std r17, VCPU_GPR(r17)(r3) | 1470 | std r17, VCPU_GPR(R17)(r3) |
1471 | std r18, VCPU_GPR(r18)(r3) | 1471 | std r18, VCPU_GPR(R18)(r3) |
1472 | std r19, VCPU_GPR(r19)(r3) | 1472 | std r19, VCPU_GPR(R19)(r3) |
1473 | std r20, VCPU_GPR(r20)(r3) | 1473 | std r20, VCPU_GPR(R20)(r3) |
1474 | std r21, VCPU_GPR(r21)(r3) | 1474 | std r21, VCPU_GPR(R21)(r3) |
1475 | std r22, VCPU_GPR(r22)(r3) | 1475 | std r22, VCPU_GPR(R22)(r3) |
1476 | std r23, VCPU_GPR(r23)(r3) | 1476 | std r23, VCPU_GPR(R23)(r3) |
1477 | std r24, VCPU_GPR(r24)(r3) | 1477 | std r24, VCPU_GPR(R24)(r3) |
1478 | std r25, VCPU_GPR(r25)(r3) | 1478 | std r25, VCPU_GPR(R25)(r3) |
1479 | std r26, VCPU_GPR(r26)(r3) | 1479 | std r26, VCPU_GPR(R26)(r3) |
1480 | std r27, VCPU_GPR(r27)(r3) | 1480 | std r27, VCPU_GPR(R27)(r3) |
1481 | std r28, VCPU_GPR(r28)(r3) | 1481 | std r28, VCPU_GPR(R28)(r3) |
1482 | std r29, VCPU_GPR(r29)(r3) | 1482 | std r29, VCPU_GPR(R29)(r3) |
1483 | std r30, VCPU_GPR(r30)(r3) | 1483 | std r30, VCPU_GPR(R30)(r3) |
1484 | std r31, VCPU_GPR(r31)(r3) | 1484 | std r31, VCPU_GPR(R31)(r3) |
1485 | 1485 | ||
1486 | /* save FP state */ | 1486 | /* save FP state */ |
1487 | bl .kvmppc_save_fp | 1487 | bl .kvmppc_save_fp |
@@ -1513,24 +1513,24 @@ kvm_end_cede: | |||
1513 | bl kvmppc_load_fp | 1513 | bl kvmppc_load_fp |
1514 | 1514 | ||
1515 | /* Load NV GPRS */ | 1515 | /* Load NV GPRS */ |
1516 | ld r14, VCPU_GPR(r14)(r4) | 1516 | ld r14, VCPU_GPR(R14)(r4) |
1517 | ld r15, VCPU_GPR(r15)(r4) | 1517 | ld r15, VCPU_GPR(R15)(r4) |
1518 | ld r16, VCPU_GPR(r16)(r4) | 1518 | ld r16, VCPU_GPR(R16)(r4) |
1519 | ld r17, VCPU_GPR(r17)(r4) | 1519 | ld r17, VCPU_GPR(R17)(r4) |
1520 | ld r18, VCPU_GPR(r18)(r4) | 1520 | ld r18, VCPU_GPR(R18)(r4) |
1521 | ld r19, VCPU_GPR(r19)(r4) | 1521 | ld r19, VCPU_GPR(R19)(r4) |
1522 | ld r20, VCPU_GPR(r20)(r4) | 1522 | ld r20, VCPU_GPR(R20)(r4) |
1523 | ld r21, VCPU_GPR(r21)(r4) | 1523 | ld r21, VCPU_GPR(R21)(r4) |
1524 | ld r22, VCPU_GPR(r22)(r4) | 1524 | ld r22, VCPU_GPR(R22)(r4) |
1525 | ld r23, VCPU_GPR(r23)(r4) | 1525 | ld r23, VCPU_GPR(R23)(r4) |
1526 | ld r24, VCPU_GPR(r24)(r4) | 1526 | ld r24, VCPU_GPR(R24)(r4) |
1527 | ld r25, VCPU_GPR(r25)(r4) | 1527 | ld r25, VCPU_GPR(R25)(r4) |
1528 | ld r26, VCPU_GPR(r26)(r4) | 1528 | ld r26, VCPU_GPR(R26)(r4) |
1529 | ld r27, VCPU_GPR(r27)(r4) | 1529 | ld r27, VCPU_GPR(R27)(r4) |
1530 | ld r28, VCPU_GPR(r28)(r4) | 1530 | ld r28, VCPU_GPR(R28)(r4) |
1531 | ld r29, VCPU_GPR(r29)(r4) | 1531 | ld r29, VCPU_GPR(R29)(r4) |
1532 | ld r30, VCPU_GPR(r30)(r4) | 1532 | ld r30, VCPU_GPR(R30)(r4) |
1533 | ld r31, VCPU_GPR(r31)(r4) | 1533 | ld r31, VCPU_GPR(R31)(r4) |
1534 | 1534 | ||
1535 | /* clear our bit in vcore->napping_threads */ | 1535 | /* clear our bit in vcore->napping_threads */ |
1536 | 33: ld r5,HSTATE_KVM_VCORE(r13) | 1536 | 33: ld r5,HSTATE_KVM_VCORE(r13) |
@@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION | |||
1649 | reg = 0 | 1649 | reg = 0 |
1650 | .rept 32 | 1650 | .rept 32 |
1651 | li r6,reg*16+VCPU_VSRS | 1651 | li r6,reg*16+VCPU_VSRS |
1652 | STXVD2X(reg,r6,r3) | 1652 | STXVD2X(reg,R6,R3) |
1653 | reg = reg + 1 | 1653 | reg = reg + 1 |
1654 | .endr | 1654 | .endr |
1655 | FTR_SECTION_ELSE | 1655 | FTR_SECTION_ELSE |
@@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION | |||
1711 | reg = 0 | 1711 | reg = 0 |
1712 | .rept 32 | 1712 | .rept 32 |
1713 | li r7,reg*16+VCPU_VSRS | 1713 | li r7,reg*16+VCPU_VSRS |
1714 | LXVD2X(reg,r7,r4) | 1714 | LXVD2X(reg,R7,R4) |
1715 | reg = reg + 1 | 1715 | reg = reg + 1 |
1716 | .endr | 1716 | .endr |
1717 | FTR_SECTION_ELSE | 1717 | FTR_SECTION_ELSE |