diff options
author | Michael Neuling <mikey@neuling.org> | 2012-06-25 09:33:23 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-07-10 05:18:30 -0400 |
commit | 0b7673c35e9240a364594ac4f2c2dd2c111c0aba (patch) | |
tree | ceb3e3eb6b06a81ace2337fe975ce7fd0b8c4d5e | |
parent | 0972def44fd76899fea8682ec8e3c47d429f33ca (diff) |
powerpc: Enforce usage of R0-R31 where possible
Enforce the use of R0-R31 in macros where possible now we have all the
fixes in.
R0-R31 macros are removed here so that can't be used anymore. They
should not be defined anywhere.
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/include/asm/ppc-opcode.h | 41 | ||||
-rw-r--r-- | arch/powerpc/include/asm/ppc_asm.h | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 12 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 3 |
4 files changed, 23 insertions, 50 deletions
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index c74e00778f72..d14508f82247 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -15,39 +15,6 @@ | |||
15 | #include <linux/stringify.h> | 15 | #include <linux/stringify.h> |
16 | #include <asm/asm-compat.h> | 16 | #include <asm/asm-compat.h> |
17 | 17 | ||
18 | #define R0 0 | ||
19 | #define R1 1 | ||
20 | #define R2 2 | ||
21 | #define R3 3 | ||
22 | #define R4 4 | ||
23 | #define R5 5 | ||
24 | #define R6 6 | ||
25 | #define R7 7 | ||
26 | #define R8 8 | ||
27 | #define R9 9 | ||
28 | #define R10 10 | ||
29 | #define R11 11 | ||
30 | #define R12 12 | ||
31 | #define R13 13 | ||
32 | #define R14 14 | ||
33 | #define R15 15 | ||
34 | #define R16 16 | ||
35 | #define R17 17 | ||
36 | #define R18 18 | ||
37 | #define R19 19 | ||
38 | #define R20 20 | ||
39 | #define R21 21 | ||
40 | #define R22 22 | ||
41 | #define R23 23 | ||
42 | #define R24 24 | ||
43 | #define R25 25 | ||
44 | #define R26 26 | ||
45 | #define R27 27 | ||
46 | #define R28 28 | ||
47 | #define R29 29 | ||
48 | #define R30 30 | ||
49 | #define R31 31 | ||
50 | |||
51 | #define __REG_R0 0 | 18 | #define __REG_R0 0 |
52 | #define __REG_R1 1 | 19 | #define __REG_R1 1 |
53 | #define __REG_R2 2 | 20 | #define __REG_R2 2 |
@@ -181,10 +148,10 @@ | |||
181 | #define ___PPC_RB(b) (((b) & 0x1f) << 11) | 148 | #define ___PPC_RB(b) (((b) & 0x1f) << 11) |
182 | #define ___PPC_RS(s) (((s) & 0x1f) << 21) | 149 | #define ___PPC_RS(s) (((s) & 0x1f) << 21) |
183 | #define ___PPC_RT(t) ___PPC_RS(t) | 150 | #define ___PPC_RT(t) ___PPC_RS(t) |
184 | #define __PPC_RA(a) (((a) & 0x1f) << 16) | 151 | #define __PPC_RA(a) ___PPC_RA(__REG_##a) |
185 | #define __PPC_RB(b) (((b) & 0x1f) << 11) | 152 | #define __PPC_RB(b) ___PPC_RB(__REG_##b) |
186 | #define __PPC_RS(s) (((s) & 0x1f) << 21) | 153 | #define __PPC_RS(s) ___PPC_RS(__REG_##s) |
187 | #define __PPC_RT(s) __PPC_RS(s) | 154 | #define __PPC_RT(t) ___PPC_RT(__REG_##t) |
188 | #define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3)) | 155 | #define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3)) |
189 | #define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4)) | 156 | #define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4)) |
190 | #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) | 157 | #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) |
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index dbc768358ac1..ea2a86e8ff95 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -126,26 +126,26 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
126 | #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) | 126 | #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) |
127 | 127 | ||
128 | /* Save the lower 32 VSRs in the thread VSR region */ | 128 | /* Save the lower 32 VSRs in the thread VSR region */ |
129 | #define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,base,b) | 129 | #define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) |
130 | #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) | 130 | #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) |
131 | #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) | 131 | #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) |
132 | #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) | 132 | #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) |
133 | #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) | 133 | #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) |
134 | #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) | 134 | #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) |
135 | #define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,base,b) | 135 | #define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) |
136 | #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) | 136 | #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) |
137 | #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) | 137 | #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) |
138 | #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) | 138 | #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) |
139 | #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) | 139 | #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) |
140 | #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) | 140 | #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) |
141 | /* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */ | 141 | /* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */ |
142 | #define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,base,b) | 142 | #define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,R##base,R##b) |
143 | #define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base) | 143 | #define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base) |
144 | #define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base) | 144 | #define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base) |
145 | #define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base) | 145 | #define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base) |
146 | #define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base) | 146 | #define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base) |
147 | #define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base) | 147 | #define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base) |
148 | #define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,base,b) | 148 | #define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,R##base,R##b) |
149 | #define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base) | 149 | #define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base) |
150 | #define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base) | 150 | #define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base) |
151 | #define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base) | 151 | #define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base) |
@@ -183,15 +183,18 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
183 | #else | 183 | #else |
184 | #define ULONG_SIZE 4 | 184 | #define ULONG_SIZE 4 |
185 | #endif | 185 | #endif |
186 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | 186 | #define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) |
187 | #define VCPU_GPR(n) __VCPU_GPR(__REG_##n) | ||
187 | 188 | ||
188 | #ifdef __KERNEL__ | 189 | #ifdef __KERNEL__ |
189 | #ifdef CONFIG_PPC64 | 190 | #ifdef CONFIG_PPC64 |
190 | 191 | ||
191 | #define STACKFRAMESIZE 256 | 192 | #define STACKFRAMESIZE 256 |
192 | #define STK_REG(i) (112 + ((i)-14)*8) | 193 | #define __STK_REG(i) (112 + ((i)-14)*8) |
194 | #define STK_REG(i) __STK_REG(__REG_##i) | ||
193 | 195 | ||
194 | #define STK_PARAM(i) (48 + ((i)-3)*8) | 196 | #define __STK_PARAM(i) (48 + ((i)-3)*8) |
197 | #define STK_PARAM(i) __STK_PARAM(__REG_##i) | ||
195 | 198 | ||
196 | #define XGLUE(a,b) a##b | 199 | #define XGLUE(a,b) a##b |
197 | #define GLUE(a,b) XGLUE(a,b) | 200 | #define GLUE(a,b) XGLUE(a,b) |
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 71c1c73bc65f..e0ada05f2df3 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
27 | 27 | ||
28 | #ifdef CONFIG_VSX | 28 | #ifdef CONFIG_VSX |
29 | #define REST_32FPVSRS(n,c,base) \ | 29 | #define __REST_32FPVSRS(n,c,base) \ |
30 | BEGIN_FTR_SECTION \ | 30 | BEGIN_FTR_SECTION \ |
31 | b 2f; \ | 31 | b 2f; \ |
32 | END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | 32 | END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ |
@@ -35,7 +35,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | |||
35 | 2: REST_32VSRS(n,c,base); \ | 35 | 2: REST_32VSRS(n,c,base); \ |
36 | 3: | 36 | 3: |
37 | 37 | ||
38 | #define SAVE_32FPVSRS(n,c,base) \ | 38 | #define __SAVE_32FPVSRS(n,c,base) \ |
39 | BEGIN_FTR_SECTION \ | 39 | BEGIN_FTR_SECTION \ |
40 | b 2f; \ | 40 | b 2f; \ |
41 | END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | 41 | END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ |
@@ -44,9 +44,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | |||
44 | 2: SAVE_32VSRS(n,c,base); \ | 44 | 2: SAVE_32VSRS(n,c,base); \ |
45 | 3: | 45 | 3: |
46 | #else | 46 | #else |
47 | #define REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) | 47 | #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) |
48 | #define SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) | 48 | #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) |
49 | #endif | 49 | #endif |
50 | #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) | ||
51 | #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) | ||
50 | 52 | ||
51 | /* | 53 | /* |
52 | * This task wants to use the FPU now. | 54 | * This task wants to use the FPU now. |
@@ -79,7 +81,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
79 | beq 1f | 81 | beq 1f |
80 | toreal(r4) | 82 | toreal(r4) |
81 | addi r4,r4,THREAD /* want last_task_used_math->thread */ | 83 | addi r4,r4,THREAD /* want last_task_used_math->thread */ |
82 | SAVE_32FPVSRS(0, r5, r4) | 84 | SAVE_32FPVSRS(0, R5, R4) |
83 | mffs fr0 | 85 | mffs fr0 |
84 | stfd fr0,THREAD_FPSCR(r4) | 86 | stfd fr0,THREAD_FPSCR(r4) |
85 | PPC_LL r5,PT_REGS(r4) | 87 | PPC_LL r5,PT_REGS(r4) |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 91c971bcddd0..8fd4b2a0911b 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -34,7 +34,8 @@ | |||
34 | #define HOST_R2 12 | 34 | #define HOST_R2 12 |
35 | #define HOST_CR 16 | 35 | #define HOST_CR 16 |
36 | #define HOST_NV_GPRS 20 | 36 | #define HOST_NV_GPRS 20 |
37 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) | 37 | #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) |
38 | #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) | ||
38 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) | 39 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) |
39 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ | 40 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ |
40 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ | 41 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ |