diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2012-05-23 10:24:51 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-05-24 04:10:10 -0400 |
commit | f4815ac6c935b8e441fe12504d62e0e8ff7f7ce5 (patch) | |
tree | 32b78252b6b155e7a218c571d9e994c78da1aee0 /arch/s390/include/asm/processor.h | |
parent | da477737c5ec99d37cb78dab909aa0402a0ebf18 (diff) |
s390/headers: replace __s390x__ with CONFIG_64BIT where possible
Replace __s390x__ with CONFIG_64BIT in all places that are not exported
to userspace or guarded with #ifdef __KERNEL__.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/processor.h')
-rw-r--r-- | arch/s390/include/asm/processor.h | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 0aa04f9bc624..62e0d9f9a29a 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -39,27 +39,27 @@ extern int sysctl_ieee_emulation_warnings; | |||
39 | /* | 39 | /* |
40 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. | 40 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. |
41 | */ | 41 | */ |
42 | #ifndef __s390x__ | 42 | #ifndef CONFIG_64BIT |
43 | 43 | ||
44 | #define TASK_SIZE (1UL << 31) | 44 | #define TASK_SIZE (1UL << 31) |
45 | #define TASK_UNMAPPED_BASE (1UL << 30) | 45 | #define TASK_UNMAPPED_BASE (1UL << 30) |
46 | 46 | ||
47 | #else /* __s390x__ */ | 47 | #else /* CONFIG_64BIT */ |
48 | 48 | ||
49 | #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) | 49 | #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) |
50 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ | 50 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ |
51 | (1UL << 30) : (1UL << 41)) | 51 | (1UL << 30) : (1UL << 41)) |
52 | #define TASK_SIZE TASK_SIZE_OF(current) | 52 | #define TASK_SIZE TASK_SIZE_OF(current) |
53 | 53 | ||
54 | #endif /* __s390x__ */ | 54 | #endif /* CONFIG_64BIT */ |
55 | 55 | ||
56 | #ifndef __s390x__ | 56 | #ifndef CONFIG_64BIT |
57 | #define STACK_TOP (1UL << 31) | 57 | #define STACK_TOP (1UL << 31) |
58 | #define STACK_TOP_MAX (1UL << 31) | 58 | #define STACK_TOP_MAX (1UL << 31) |
59 | #else /* __s390x__ */ | 59 | #else /* CONFIG_64BIT */ |
60 | #define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) | 60 | #define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) |
61 | #define STACK_TOP_MAX (1UL << 42) | 61 | #define STACK_TOP_MAX (1UL << 42) |
62 | #endif /* __s390x__ */ | 62 | #endif /* CONFIG_64BIT */ |
63 | 63 | ||
64 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | 64 | #define HAVE_ARCH_PICK_MMAP_LAYOUT |
65 | 65 | ||
@@ -179,7 +179,7 @@ static inline void psw_set_key(unsigned int key) | |||
179 | */ | 179 | */ |
180 | static inline void __load_psw(psw_t psw) | 180 | static inline void __load_psw(psw_t psw) |
181 | { | 181 | { |
182 | #ifndef __s390x__ | 182 | #ifndef CONFIG_64BIT |
183 | asm volatile("lpsw %0" : : "Q" (psw) : "cc"); | 183 | asm volatile("lpsw %0" : : "Q" (psw) : "cc"); |
184 | #else | 184 | #else |
185 | asm volatile("lpswe %0" : : "Q" (psw) : "cc"); | 185 | asm volatile("lpswe %0" : : "Q" (psw) : "cc"); |
@@ -197,7 +197,7 @@ static inline void __load_psw_mask (unsigned long mask) | |||
197 | 197 | ||
198 | psw.mask = mask; | 198 | psw.mask = mask; |
199 | 199 | ||
200 | #ifndef __s390x__ | 200 | #ifndef CONFIG_64BIT |
201 | asm volatile( | 201 | asm volatile( |
202 | " basr %0,0\n" | 202 | " basr %0,0\n" |
203 | "0: ahi %0,1f-0b\n" | 203 | "0: ahi %0,1f-0b\n" |
@@ -205,14 +205,14 @@ static inline void __load_psw_mask (unsigned long mask) | |||
205 | " lpsw %1\n" | 205 | " lpsw %1\n" |
206 | "1:" | 206 | "1:" |
207 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); | 207 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
208 | #else /* __s390x__ */ | 208 | #else /* CONFIG_64BIT */ |
209 | asm volatile( | 209 | asm volatile( |
210 | " larl %0,1f\n" | 210 | " larl %0,1f\n" |
211 | " stg %0,%O1+8(%R1)\n" | 211 | " stg %0,%O1+8(%R1)\n" |
212 | " lpswe %1\n" | 212 | " lpswe %1\n" |
213 | "1:" | 213 | "1:" |
214 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); | 214 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
215 | #endif /* __s390x__ */ | 215 | #endif /* CONFIG_64BIT */ |
216 | } | 216 | } |
217 | 217 | ||
218 | /* | 218 | /* |
@@ -220,7 +220,7 @@ static inline void __load_psw_mask (unsigned long mask) | |||
220 | */ | 220 | */ |
221 | static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) | 221 | static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) |
222 | { | 222 | { |
223 | #ifndef __s390x__ | 223 | #ifndef CONFIG_64BIT |
224 | if (psw.addr & PSW_ADDR_AMODE) | 224 | if (psw.addr & PSW_ADDR_AMODE) |
225 | /* 31 bit mode */ | 225 | /* 31 bit mode */ |
226 | return (psw.addr - ilc) | PSW_ADDR_AMODE; | 226 | return (psw.addr - ilc) | PSW_ADDR_AMODE; |
@@ -250,7 +250,7 @@ static inline void __noreturn disabled_wait(unsigned long code) | |||
250 | * Store status and then load disabled wait psw, | 250 | * Store status and then load disabled wait psw, |
251 | * the processor is dead afterwards | 251 | * the processor is dead afterwards |
252 | */ | 252 | */ |
253 | #ifndef __s390x__ | 253 | #ifndef CONFIG_64BIT |
254 | asm volatile( | 254 | asm volatile( |
255 | " stctl 0,0,0(%2)\n" | 255 | " stctl 0,0,0(%2)\n" |
256 | " ni 0(%2),0xef\n" /* switch off protection */ | 256 | " ni 0(%2),0xef\n" /* switch off protection */ |
@@ -269,7 +269,7 @@ static inline void __noreturn disabled_wait(unsigned long code) | |||
269 | " lpsw 0(%1)" | 269 | " lpsw 0(%1)" |
270 | : "=m" (ctl_buf) | 270 | : "=m" (ctl_buf) |
271 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc"); | 271 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc"); |
272 | #else /* __s390x__ */ | 272 | #else /* CONFIG_64BIT */ |
273 | asm volatile( | 273 | asm volatile( |
274 | " stctg 0,0,0(%2)\n" | 274 | " stctg 0,0,0(%2)\n" |
275 | " ni 4(%2),0xef\n" /* switch off protection */ | 275 | " ni 4(%2),0xef\n" /* switch off protection */ |
@@ -302,7 +302,7 @@ static inline void __noreturn disabled_wait(unsigned long code) | |||
302 | " lpswe 0(%1)" | 302 | " lpswe 0(%1)" |
303 | : "=m" (ctl_buf) | 303 | : "=m" (ctl_buf) |
304 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); | 304 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); |
305 | #endif /* __s390x__ */ | 305 | #endif /* CONFIG_64BIT */ |
306 | while (1); | 306 | while (1); |
307 | } | 307 | } |
308 | 308 | ||
@@ -338,7 +338,7 @@ extern void (*s390_base_ext_handler_fn)(void); | |||
338 | /* | 338 | /* |
339 | * Helper macro for exception table entries | 339 | * Helper macro for exception table entries |
340 | */ | 340 | */ |
341 | #ifndef __s390x__ | 341 | #ifndef CONFIG_64BIT |
342 | #define EX_TABLE(_fault,_target) \ | 342 | #define EX_TABLE(_fault,_target) \ |
343 | ".section __ex_table,\"a\"\n" \ | 343 | ".section __ex_table,\"a\"\n" \ |
344 | " .align 4\n" \ | 344 | " .align 4\n" \ |