aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGrant Grundler <grundler@parisc-linux.org>2005-10-21 22:46:48 -0400
committerKyle McMartin <kyle@parisc-linux.org>2005-10-21 22:46:48 -0400
commit413059f28e9949d9ad2d04d1070c63169798176e (patch)
treec4609bc151c3f88f7e8f2128e81c32356f842063
parent3499495205a676d85fcc2f3c28e35ec9b43c47e3 (diff)
[PARISC] Replace uses of __LP64__ with CONFIG_64BIT
2.6.12-rc4-pa3 s/__LP64__/CONFIG_64BIT/ and fixup config.h usage Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
-rw-r--r--arch/parisc/kernel/entry.S98
-rw-r--r--arch/parisc/kernel/head.S26
-rw-r--r--arch/parisc/kernel/pacache.S30
-rw-r--r--arch/parisc/kernel/real2.S16
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/kernel/syscall.S37
-rw-r--r--arch/parisc/kernel/syscall_table.S4
7 files changed, 108 insertions, 105 deletions
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ec04e0ad77fa..0ca49710d95e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -37,7 +37,7 @@
37#include <asm/unistd.h> 37#include <asm/unistd.h>
38#include <asm/thread_info.h> 38#include <asm/thread_info.h>
39 39
40#ifdef __LP64__ 40#ifdef CONFIG_64BIT
41#define CMPIB cmpib,* 41#define CMPIB cmpib,*
42#define CMPB cmpb,* 42#define CMPB cmpb,*
43#define COND(x) *x 43#define COND(x) *x
@@ -217,7 +217,7 @@
217 va = r8 /* virtual address for which the trap occured */ 217 va = r8 /* virtual address for which the trap occured */
218 spc = r24 /* space for which the trap occured */ 218 spc = r24 /* space for which the trap occured */
219 219
220#ifndef __LP64__ 220#ifndef CONFIG_64BIT
221 221
222 /* 222 /*
223 * itlb miss interruption handler (parisc 1.1 - 32 bit) 223 * itlb miss interruption handler (parisc 1.1 - 32 bit)
@@ -239,7 +239,7 @@
239 239
240 .macro itlb_20 code 240 .macro itlb_20 code
241 mfctl %pcsq, spc 241 mfctl %pcsq, spc
242#ifdef __LP64__ 242#ifdef CONFIG_64BIT
243 b itlb_miss_20w 243 b itlb_miss_20w
244#else 244#else
245 b itlb_miss_20 245 b itlb_miss_20
@@ -249,7 +249,7 @@
249 .align 32 249 .align 32
250 .endm 250 .endm
251 251
252#ifndef __LP64__ 252#ifndef CONFIG_64BIT
253 /* 253 /*
254 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 254 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255 * 255 *
@@ -286,7 +286,7 @@
286 .macro naitlb_20 code 286 .macro naitlb_20 code
287 287
288 mfctl %isr,spc 288 mfctl %isr,spc
289#ifdef __LP64__ 289#ifdef CONFIG_64BIT
290 b itlb_miss_20w 290 b itlb_miss_20w
291#else 291#else
292 b itlb_miss_20 292 b itlb_miss_20
@@ -299,7 +299,7 @@
299 .align 32 299 .align 32
300 .endm 300 .endm
301 301
302#ifndef __LP64__ 302#ifndef CONFIG_64BIT
303 /* 303 /*
304 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 304 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
305 */ 305 */
@@ -321,7 +321,7 @@
321 .macro dtlb_20 code 321 .macro dtlb_20 code
322 322
323 mfctl %isr, spc 323 mfctl %isr, spc
324#ifdef __LP64__ 324#ifdef CONFIG_64BIT
325 b dtlb_miss_20w 325 b dtlb_miss_20w
326#else 326#else
327 b dtlb_miss_20 327 b dtlb_miss_20
@@ -331,7 +331,7 @@
331 .align 32 331 .align 32
332 .endm 332 .endm
333 333
334#ifndef __LP64__ 334#ifndef CONFIG_64BIT
335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336 336
337 .macro nadtlb_11 code 337 .macro nadtlb_11 code
@@ -349,7 +349,7 @@
349 .macro nadtlb_20 code 349 .macro nadtlb_20 code
350 350
351 mfctl %isr,spc 351 mfctl %isr,spc
352#ifdef __LP64__ 352#ifdef CONFIG_64BIT
353 b nadtlb_miss_20w 353 b nadtlb_miss_20w
354#else 354#else
355 b nadtlb_miss_20 355 b nadtlb_miss_20
@@ -359,7 +359,7 @@
359 .align 32 359 .align 32
360 .endm 360 .endm
361 361
362#ifndef __LP64__ 362#ifndef CONFIG_64BIT
363 /* 363 /*
364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
365 */ 365 */
@@ -381,7 +381,7 @@
381 .macro dbit_20 code 381 .macro dbit_20 code
382 382
383 mfctl %isr,spc 383 mfctl %isr,spc
384#ifdef __LP64__ 384#ifdef CONFIG_64BIT
385 b dbit_trap_20w 385 b dbit_trap_20w
386#else 386#else
387 b dbit_trap_20 387 b dbit_trap_20
@@ -394,7 +394,7 @@
394 /* The following are simple 32 vs 64 bit instruction 394 /* The following are simple 32 vs 64 bit instruction
395 * abstractions for the macros */ 395 * abstractions for the macros */
396 .macro EXTR reg1,start,length,reg2 396 .macro EXTR reg1,start,length,reg2
397#ifdef __LP64__ 397#ifdef CONFIG_64BIT
398 extrd,u \reg1,32+\start,\length,\reg2 398 extrd,u \reg1,32+\start,\length,\reg2
399#else 399#else
400 extrw,u \reg1,\start,\length,\reg2 400 extrw,u \reg1,\start,\length,\reg2
@@ -402,7 +402,7 @@
402 .endm 402 .endm
403 403
404 .macro DEP reg1,start,length,reg2 404 .macro DEP reg1,start,length,reg2
405#ifdef __LP64__ 405#ifdef CONFIG_64BIT
406 depd \reg1,32+\start,\length,\reg2 406 depd \reg1,32+\start,\length,\reg2
407#else 407#else
408 depw \reg1,\start,\length,\reg2 408 depw \reg1,\start,\length,\reg2
@@ -410,7 +410,7 @@
410 .endm 410 .endm
411 411
412 .macro DEPI val,start,length,reg 412 .macro DEPI val,start,length,reg
413#ifdef __LP64__ 413#ifdef CONFIG_64BIT
414 depdi \val,32+\start,\length,\reg 414 depdi \val,32+\start,\length,\reg
415#else 415#else
416 depwi \val,\start,\length,\reg 416 depwi \val,\start,\length,\reg
@@ -421,7 +421,7 @@
421 * fault. We have to extract this and place it in the va, 421 * fault. We have to extract this and place it in the va,
422 * zeroing the corresponding bits in the space register */ 422 * zeroing the corresponding bits in the space register */
423 .macro space_adjust spc,va,tmp 423 .macro space_adjust spc,va,tmp
424#ifdef __LP64__ 424#ifdef CONFIG_64BIT
425 extrd,u \spc,63,SPACEID_SHIFT,\tmp 425 extrd,u \spc,63,SPACEID_SHIFT,\tmp
426 depd %r0,63,SPACEID_SHIFT,\spc 426 depd %r0,63,SPACEID_SHIFT,\spc
427 depd \tmp,31,SPACEID_SHIFT,\va 427 depd \tmp,31,SPACEID_SHIFT,\va
@@ -479,7 +479,7 @@
479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
481 copy \pmd,%r9 481 copy \pmd,%r9
482#ifdef __LP64__ 482#ifdef CONFIG_64BIT
483 shld %r9,PxD_VALUE_SHIFT,\pmd 483 shld %r9,PxD_VALUE_SHIFT,\pmd
484#else 484#else
485 shlw %r9,PxD_VALUE_SHIFT,\pmd 485 shlw %r9,PxD_VALUE_SHIFT,\pmd
@@ -610,7 +610,7 @@
610 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault 610 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
611 cmpib,COND(<>),n 0,\spc,\fault 611 cmpib,COND(<>),n 0,\spc,\fault
612 ldil L%(TMPALIAS_MAP_START),\tmp 612 ldil L%(TMPALIAS_MAP_START),\tmp
613#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000) 613#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
614 /* on LP64, ldi will sign extend into the upper 32 bits, 614 /* on LP64, ldi will sign extend into the upper 32 bits,
615 * which is behaviour we don't want */ 615 * which is behaviour we don't want */
616 depdi 0,31,32,\tmp 616 depdi 0,31,32,\tmp
@@ -624,7 +624,7 @@
624 * OK, it is in the temp alias region, check whether "from" or "to". 624 * OK, it is in the temp alias region, check whether "from" or "to".
625 * Check "subtle" note in pacache.S re: r23/r26. 625 * Check "subtle" note in pacache.S re: r23/r26.
626 */ 626 */
627#ifdef __LP64__ 627#ifdef CONFIG_64BIT
628 extrd,u,*= \va,41,1,%r0 628 extrd,u,*= \va,41,1,%r0
629#else 629#else
630 extrw,u,= \va,9,1,%r0 630 extrw,u,= \va,9,1,%r0
@@ -691,7 +691,7 @@ fault_vector_20:
691 def 30 691 def 30
692 def 31 692 def 31
693 693
694#ifndef __LP64__ 694#ifndef CONFIG_64BIT
695 695
696 .export fault_vector_11 696 .export fault_vector_11
697 697
@@ -764,7 +764,7 @@ __kernel_thread:
764 764
765 copy %r30, %r1 765 copy %r30, %r1
766 ldo PT_SZ_ALGN(%r30),%r30 766 ldo PT_SZ_ALGN(%r30),%r30
767#ifdef __LP64__ 767#ifdef CONFIG_64BIT
768 /* Yo, function pointers in wide mode are little structs... -PB */ 768 /* Yo, function pointers in wide mode are little structs... -PB */
769 ldd 24(%r26), %r2 769 ldd 24(%r26), %r2
770 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ 770 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
@@ -780,7 +780,7 @@ __kernel_thread:
780 or %r26, %r24, %r26 /* will have kernel mappings. */ 780 or %r26, %r24, %r26 /* will have kernel mappings. */
781 ldi 1, %r25 /* stack_start, signals kernel thread */ 781 ldi 1, %r25 /* stack_start, signals kernel thread */
782 stw %r0, -52(%r30) /* user_tid */ 782 stw %r0, -52(%r30) /* user_tid */
783#ifdef __LP64__ 783#ifdef CONFIG_64BIT
784 ldo -16(%r30),%r29 /* Reference param save area */ 784 ldo -16(%r30),%r29 /* Reference param save area */
785#endif 785#endif
786 BL do_fork, %r2 786 BL do_fork, %r2
@@ -809,7 +809,7 @@ ret_from_kernel_thread:
809 809
810 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 810 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
811 LDREG TASK_PT_GR25(%r1), %r26 811 LDREG TASK_PT_GR25(%r1), %r26
812#ifdef __LP64__ 812#ifdef CONFIG_64BIT
813 LDREG TASK_PT_GR27(%r1), %r27 813 LDREG TASK_PT_GR27(%r1), %r27
814 LDREG TASK_PT_GR22(%r1), %r22 814 LDREG TASK_PT_GR22(%r1), %r22
815#endif 815#endif
@@ -817,7 +817,7 @@ ret_from_kernel_thread:
817 ble 0(%sr7, %r1) 817 ble 0(%sr7, %r1)
818 copy %r31, %r2 818 copy %r31, %r2
819 819
820#ifdef __LP64__ 820#ifdef CONFIG_64BIT
821 ldo -16(%r30),%r29 /* Reference param save area */ 821 ldo -16(%r30),%r29 /* Reference param save area */
822 loadgp /* Thread could have been in a module */ 822 loadgp /* Thread could have been in a module */
823#endif 823#endif
@@ -838,7 +838,7 @@ __execve:
838 STREG %r26, PT_GR26(%r16) 838 STREG %r26, PT_GR26(%r16)
839 STREG %r25, PT_GR25(%r16) 839 STREG %r25, PT_GR25(%r16)
840 STREG %r24, PT_GR24(%r16) 840 STREG %r24, PT_GR24(%r16)
841#ifdef __LP64__ 841#ifdef CONFIG_64BIT
842 ldo -16(%r30),%r29 /* Reference param save area */ 842 ldo -16(%r30),%r29 /* Reference param save area */
843#endif 843#endif
844 BL sys_execve, %r2 844 BL sys_execve, %r2
@@ -916,7 +916,7 @@ syscall_exit_rfi:
916 STREG %r19,PT_IAOQ1(%r16) 916 STREG %r19,PT_IAOQ1(%r16)
917 LDREG PT_PSW(%r16),%r19 917 LDREG PT_PSW(%r16),%r19
918 load32 USER_PSW_MASK,%r1 918 load32 USER_PSW_MASK,%r1
919#ifdef __LP64__ 919#ifdef CONFIG_64BIT
920 load32 USER_PSW_HI_MASK,%r20 920 load32 USER_PSW_HI_MASK,%r20
921 depd %r20,31,32,%r1 921 depd %r20,31,32,%r1
922#endif 922#endif
@@ -960,7 +960,7 @@ intr_return:
960 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount 960 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
961 ** irq_stat[] is defined using ____cacheline_aligned. 961 ** irq_stat[] is defined using ____cacheline_aligned.
962 */ 962 */
963#ifdef __LP64__ 963#ifdef CONFIG_64BIT
964 shld %r1, 6, %r20 964 shld %r1, 6, %r20
965#else 965#else
966 shlw %r1, 5, %r20 966 shlw %r1, 5, %r20
@@ -1018,7 +1018,7 @@ intr_restore:
1018 .import do_softirq,code 1018 .import do_softirq,code
1019intr_do_softirq: 1019intr_do_softirq:
1020 BL do_softirq,%r2 1020 BL do_softirq,%r2
1021#ifdef __LP64__ 1021#ifdef CONFIG_64BIT
1022 ldo -16(%r30),%r29 /* Reference param save area */ 1022 ldo -16(%r30),%r29 /* Reference param save area */
1023#else 1023#else
1024 nop 1024 nop
@@ -1036,7 +1036,7 @@ intr_do_resched:
1036 CMPIB= 0,%r20,intr_restore /* backward */ 1036 CMPIB= 0,%r20,intr_restore /* backward */
1037 nop 1037 nop
1038 1038
1039#ifdef __LP64__ 1039#ifdef CONFIG_64BIT
1040 ldo -16(%r30),%r29 /* Reference param save area */ 1040 ldo -16(%r30),%r29 /* Reference param save area */
1041#endif 1041#endif
1042 1042
@@ -1069,7 +1069,7 @@ intr_do_signal:
1069 1069
1070 copy %r0, %r24 /* unsigned long in_syscall */ 1070 copy %r0, %r24 /* unsigned long in_syscall */
1071 copy %r16, %r25 /* struct pt_regs *regs */ 1071 copy %r16, %r25 /* struct pt_regs *regs */
1072#ifdef __LP64__ 1072#ifdef CONFIG_64BIT
1073 ldo -16(%r30),%r29 /* Reference param save area */ 1073 ldo -16(%r30),%r29 /* Reference param save area */
1074#endif 1074#endif
1075 1075
@@ -1093,7 +1093,7 @@ intr_extint:
1093 mfctl %cr31,%r1 1093 mfctl %cr31,%r1
1094 copy %r30,%r17 1094 copy %r30,%r17
1095 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/ 1095 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1096#ifdef __LP64__ 1096#ifdef CONFIG_64BIT
1097 depdi 0,63,15,%r17 1097 depdi 0,63,15,%r17
1098#else 1098#else
1099 depi 0,31,15,%r17 1099 depi 0,31,15,%r17
@@ -1120,7 +1120,7 @@ intr_extint:
1120 1120
1121 ldil L%intr_return, %r2 1121 ldil L%intr_return, %r2
1122 1122
1123#ifdef __LP64__ 1123#ifdef CONFIG_64BIT
1124 ldo -16(%r30),%r29 /* Reference param save area */ 1124 ldo -16(%r30),%r29 /* Reference param save area */
1125#endif 1125#endif
1126 1126
@@ -1164,7 +1164,7 @@ intr_save:
1164 mfctl %cr21, %r17 /* ior */ 1164 mfctl %cr21, %r17 /* ior */
1165 1165
1166 1166
1167#ifdef __LP64__ 1167#ifdef CONFIG_64BIT
1168 /* 1168 /*
1169 * If the interrupted code was running with W bit off (32 bit), 1169 * If the interrupted code was running with W bit off (32 bit),
1170 * clear the b bits (bits 0 & 1) in the ior. 1170 * clear the b bits (bits 0 & 1) in the ior.
@@ -1199,7 +1199,7 @@ skip_save_ior:
1199 loadgp 1199 loadgp
1200 1200
1201 copy %r29, %r25 /* arg1 is pt_regs */ 1201 copy %r29, %r25 /* arg1 is pt_regs */
1202#ifdef __LP64__ 1202#ifdef CONFIG_64BIT
1203 ldo -16(%r30),%r29 /* Reference param save area */ 1203 ldo -16(%r30),%r29 /* Reference param save area */
1204#endif 1204#endif
1205 1205
@@ -1237,7 +1237,7 @@ skip_save_ior:
1237 spc = r24 /* space for which the trap occured */ 1237 spc = r24 /* space for which the trap occured */
1238 ptp = r25 /* page directory/page table pointer */ 1238 ptp = r25 /* page directory/page table pointer */
1239 1239
1240#ifdef __LP64__ 1240#ifdef CONFIG_64BIT
1241 1241
1242dtlb_miss_20w: 1242dtlb_miss_20w:
1243 space_adjust spc,va,t0 1243 space_adjust spc,va,t0
@@ -1528,7 +1528,7 @@ nadtlb_probe_check:
1528 nop 1528 nop
1529 1529
1530 1530
1531#ifdef __LP64__ 1531#ifdef CONFIG_64BIT
1532itlb_miss_20w: 1532itlb_miss_20w:
1533 1533
1534 /* 1534 /*
@@ -1595,7 +1595,7 @@ itlb_miss_20:
1595 1595
1596#endif 1596#endif
1597 1597
1598#ifdef __LP64__ 1598#ifdef CONFIG_64BIT
1599 1599
1600dbit_trap_20w: 1600dbit_trap_20w:
1601 space_adjust spc,va,t0 1601 space_adjust spc,va,t0
@@ -1804,7 +1804,7 @@ sys_fork_wrapper:
1804 1804
1805 STREG %r2,-RP_OFFSET(%r30) 1805 STREG %r2,-RP_OFFSET(%r30)
1806 ldo FRAME_SIZE(%r30),%r30 1806 ldo FRAME_SIZE(%r30),%r30
1807#ifdef __LP64__ 1807#ifdef CONFIG_64BIT
1808 ldo -16(%r30),%r29 /* Reference param save area */ 1808 ldo -16(%r30),%r29 /* Reference param save area */
1809#endif 1809#endif
1810 1810
@@ -1854,7 +1854,7 @@ sys_clone_wrapper:
1854 1854
1855 STREG %r2,-RP_OFFSET(%r30) 1855 STREG %r2,-RP_OFFSET(%r30)
1856 ldo FRAME_SIZE(%r30),%r30 1856 ldo FRAME_SIZE(%r30),%r30
1857#ifdef __LP64__ 1857#ifdef CONFIG_64BIT
1858 ldo -16(%r30),%r29 /* Reference param save area */ 1858 ldo -16(%r30),%r29 /* Reference param save area */
1859#endif 1859#endif
1860 1860
@@ -1876,7 +1876,7 @@ sys_vfork_wrapper:
1876 1876
1877 STREG %r2,-RP_OFFSET(%r30) 1877 STREG %r2,-RP_OFFSET(%r30)
1878 ldo FRAME_SIZE(%r30),%r30 1878 ldo FRAME_SIZE(%r30),%r30
1879#ifdef __LP64__ 1879#ifdef CONFIG_64BIT
1880 ldo -16(%r30),%r29 /* Reference param save area */ 1880 ldo -16(%r30),%r29 /* Reference param save area */
1881#endif 1881#endif
1882 1882
@@ -1904,7 +1904,7 @@ sys_vfork_wrapper:
1904 1904
1905 STREG %r2,-RP_OFFSET(%r30) 1905 STREG %r2,-RP_OFFSET(%r30)
1906 ldo FRAME_SIZE(%r30),%r30 1906 ldo FRAME_SIZE(%r30),%r30
1907#ifdef __LP64__ 1907#ifdef CONFIG_64BIT
1908 ldo -16(%r30),%r29 /* Reference param save area */ 1908 ldo -16(%r30),%r29 /* Reference param save area */
1909#endif 1909#endif
1910 BL \execve,%r2 1910 BL \execve,%r2
@@ -1930,7 +1930,7 @@ error_\execve:
1930sys_execve_wrapper: 1930sys_execve_wrapper:
1931 execve_wrapper sys_execve 1931 execve_wrapper sys_execve
1932 1932
1933#ifdef __LP64__ 1933#ifdef CONFIG_64BIT
1934 .export sys32_execve_wrapper 1934 .export sys32_execve_wrapper
1935 .import sys32_execve 1935 .import sys32_execve
1936 1936
@@ -1944,7 +1944,7 @@ sys_rt_sigreturn_wrapper:
1944 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1944 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1945 /* Don't save regs, we are going to restore them from sigcontext. */ 1945 /* Don't save regs, we are going to restore them from sigcontext. */
1946 STREG %r2, -RP_OFFSET(%r30) 1946 STREG %r2, -RP_OFFSET(%r30)
1947#ifdef __LP64__ 1947#ifdef CONFIG_64BIT
1948 ldo FRAME_SIZE(%r30), %r30 1948 ldo FRAME_SIZE(%r30), %r30
1949 BL sys_rt_sigreturn,%r2 1949 BL sys_rt_sigreturn,%r2
1950 ldo -16(%r30),%r29 /* Reference param save area */ 1950 ldo -16(%r30),%r29 /* Reference param save area */
@@ -1975,7 +1975,7 @@ sys_sigaltstack_wrapper:
1975 ldo TASK_REGS(%r1),%r24 /* get pt regs */ 1975 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1976 LDREG TASK_PT_GR30(%r24),%r24 1976 LDREG TASK_PT_GR30(%r24),%r24
1977 STREG %r2, -RP_OFFSET(%r30) 1977 STREG %r2, -RP_OFFSET(%r30)
1978#ifdef __LP64__ 1978#ifdef CONFIG_64BIT
1979 ldo FRAME_SIZE(%r30), %r30 1979 ldo FRAME_SIZE(%r30), %r30
1980 b,l do_sigaltstack,%r2 1980 b,l do_sigaltstack,%r2
1981 ldo -16(%r30),%r29 /* Reference param save area */ 1981 ldo -16(%r30),%r29 /* Reference param save area */
@@ -1989,7 +1989,7 @@ sys_sigaltstack_wrapper:
1989 bv %r0(%r2) 1989 bv %r0(%r2)
1990 nop 1990 nop
1991 1991
1992#ifdef __LP64__ 1992#ifdef CONFIG_64BIT
1993 .export sys32_sigaltstack_wrapper 1993 .export sys32_sigaltstack_wrapper
1994sys32_sigaltstack_wrapper: 1994sys32_sigaltstack_wrapper:
1995 /* Get the user stack pointer */ 1995 /* Get the user stack pointer */
@@ -2013,7 +2013,7 @@ sys_rt_sigsuspend_wrapper:
2013 reg_save %r24 2013 reg_save %r24
2014 2014
2015 STREG %r2, -RP_OFFSET(%r30) 2015 STREG %r2, -RP_OFFSET(%r30)
2016#ifdef __LP64__ 2016#ifdef CONFIG_64BIT
2017 ldo FRAME_SIZE(%r30), %r30 2017 ldo FRAME_SIZE(%r30), %r30
2018 b,l sys_rt_sigsuspend,%r2 2018 b,l sys_rt_sigsuspend,%r2
2019 ldo -16(%r30),%r29 /* Reference param save area */ 2019 ldo -16(%r30),%r29 /* Reference param save area */
@@ -2086,7 +2086,7 @@ syscall_check_bh:
2086 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ 2086 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2087 2087
2088 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ 2088 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2089#ifdef __LP64__ 2089#ifdef CONFIG_64BIT
2090 shld %r26, 6, %r20 2090 shld %r26, 6, %r20
2091#else 2091#else
2092 shlw %r26, 5, %r20 2092 shlw %r26, 5, %r20
@@ -2151,7 +2151,7 @@ syscall_restore:
2151 2151
2152 depi 3,31,2,%r31 /* ensure return to user mode. */ 2152 depi 3,31,2,%r31 /* ensure return to user mode. */
2153 2153
2154#ifdef __LP64__ 2154#ifdef CONFIG_64BIT
2155 /* decide whether to reset the wide mode bit 2155 /* decide whether to reset the wide mode bit
2156 * 2156 *
2157 * For a syscall, the W bit is stored in the lowest bit 2157 * For a syscall, the W bit is stored in the lowest bit
@@ -2247,7 +2247,7 @@ syscall_do_softirq:
2247 .import schedule,code 2247 .import schedule,code
2248syscall_do_resched: 2248syscall_do_resched:
2249 BL schedule,%r2 2249 BL schedule,%r2
2250#ifdef __LP64__ 2250#ifdef CONFIG_64BIT
2251 ldo -16(%r30),%r29 /* Reference param save area */ 2251 ldo -16(%r30),%r29 /* Reference param save area */
2252#else 2252#else
2253 nop 2253 nop
@@ -2267,7 +2267,7 @@ syscall_do_signal:
2267 2267
2268 ldi 1, %r24 /* unsigned long in_syscall */ 2268 ldi 1, %r24 /* unsigned long in_syscall */
2269 2269
2270#ifdef __LP64__ 2270#ifdef CONFIG_64BIT
2271 ldo -16(%r30),%r29 /* Reference param save area */ 2271 ldo -16(%r30),%r29 /* Reference param save area */
2272#endif 2272#endif
2273 BL do_signal,%r2 2273 BL do_signal,%r2
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 2b8738576ec2..0b47afc20690 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -12,7 +12,7 @@
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de> 12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */ 13 */
14 14
15#include <linux/autoconf.h> /* for CONFIG_SMP */ 15#include <linux/config.h> /* for CONFIG_SMP */
16 16
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18#include <asm/psw.h> 18#include <asm/psw.h>
@@ -36,10 +36,10 @@ boot_args:
36 .align 4 36 .align 4
37 .import init_thread_union,data 37 .import init_thread_union,data
38 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */ 38 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
39#ifndef __LP64__ 39#ifndef CONFIG_64BIT
40 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ 40 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
41 .import $global$ /* forward declaration */ 41 .import $global$ /* forward declaration */
42#endif /*!LP64*/ 42#endif /*!CONFIG_64BIT*/
43 .export stext 43 .export stext
44 .export _stext,data /* Kernel want it this way! */ 44 .export _stext,data /* Kernel want it this way! */
45_stext: 45_stext:
@@ -76,7 +76,7 @@ $bss_loop:
76 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 76 mtctl %r4,%cr24 /* Initialize kernel root pointer */
77 mtctl %r4,%cr25 /* Initialize user root pointer */ 77 mtctl %r4,%cr25 /* Initialize user root pointer */
78 78
79#ifdef __LP64__ 79#ifdef CONFIG_64BIT
80 /* Set pmd in pgd */ 80 /* Set pmd in pgd */
81 load32 PA(pmd0),%r5 81 load32 PA(pmd0),%r5
82 shrd %r5,PxD_VALUE_SHIFT,%r3 82 shrd %r5,PxD_VALUE_SHIFT,%r3
@@ -99,7 +99,7 @@ $bss_loop:
99 stw %r3,0(%r4) 99 stw %r3,0(%r4)
100 ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3 100 ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
101 addib,> -1,%r1,1b 101 addib,> -1,%r1,1b
102#ifdef __LP64__ 102#ifdef CONFIG_64BIT
103 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4 103 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
104#else 104#else
105 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4 105 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
@@ -170,7 +170,7 @@ common_stext:
170 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */ 170 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
171#endif /*CONFIG_SMP*/ 171#endif /*CONFIG_SMP*/
172 172
173#ifdef __LP64__ 173#ifdef CONFIG_64BIT
174 tophys_r1 %sp 174 tophys_r1 %sp
175 175
176 /* Save the rfi target address */ 176 /* Save the rfi target address */
@@ -233,7 +233,7 @@ stext_pdc_ret:
233 * following short sequence of instructions can determine this 233 * following short sequence of instructions can determine this
234 * (without being illegal on a PA1.1 machine). 234 * (without being illegal on a PA1.1 machine).
235 */ 235 */
236#ifndef __LP64__ 236#ifndef CONFIG_64BIT
237 ldi 32,%r10 237 ldi 32,%r10
238 mtctl %r10,%cr11 238 mtctl %r10,%cr11
239 .level 2.0 239 .level 2.0
@@ -246,7 +246,7 @@ stext_pdc_ret:
246 246
247$is_pa20: 247$is_pa20:
248 .level LEVEL /* restore 1.1 || 2.0w */ 248 .level LEVEL /* restore 1.1 || 2.0w */
249#endif /*!LP64*/ 249#endif /*!CONFIG_64BIT*/
250 load32 PA(fault_vector_20),%r10 250 load32 PA(fault_vector_20),%r10
251 251
252$install_iva: 252$install_iva:
@@ -284,7 +284,7 @@ aligned_rfi:
284 .import smp_init_current_idle_task,data 284 .import smp_init_current_idle_task,data
285 .import smp_callin,code 285 .import smp_callin,code
286 286
287#ifndef __LP64__ 287#ifndef CONFIG_64BIT
288smp_callin_rtn: 288smp_callin_rtn:
289 .proc 289 .proc
290 .callinfo 290 .callinfo
@@ -292,7 +292,7 @@ smp_callin_rtn:
292 nop 292 nop
293 nop 293 nop
294 .procend 294 .procend
295#endif /*!LP64*/ 295#endif /*!CONFIG_64BIT*/
296 296
297/*************************************************************************** 297/***************************************************************************
298* smp_slave_stext is executed by all non-monarch Processors when the Monarch 298* smp_slave_stext is executed by all non-monarch Processors when the Monarch
@@ -327,7 +327,7 @@ smp_slave_stext:
327 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 327 mtctl %r4,%cr24 /* Initialize kernel root pointer */
328 mtctl %r4,%cr25 /* Initialize user root pointer */ 328 mtctl %r4,%cr25 /* Initialize user root pointer */
329 329
330#ifdef __LP64__ 330#ifdef CONFIG_64BIT
331 /* Setup PDCE_PROC entry */ 331 /* Setup PDCE_PROC entry */
332 copy %arg0,%r3 332 copy %arg0,%r3
333#else 333#else
@@ -344,7 +344,7 @@ smp_slave_stext:
344 344
345 .procend 345 .procend
346#endif /* CONFIG_SMP */ 346#endif /* CONFIG_SMP */
347#ifndef __LP64__ 347#ifndef CONFIG_64BIT
348 .data 348 .data
349 349
350 .align 4 350 .align 4
@@ -354,4 +354,4 @@ smp_slave_stext:
354 .size $global$,4 354 .size $global$,4
355$global$: 355$global$:
356 .word 0 356 .word 0
357#endif /*!LP64*/ 357#endif /*!CONFIG_64BIT*/
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 71ade44c4618..4a7d9c8903f4 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -26,7 +26,7 @@
26 * can be used. 26 * can be used.
27 */ 27 */
28 28
29#ifdef __LP64__ 29#ifdef CONFIG_64BIT
30#define ADDIB addib,* 30#define ADDIB addib,*
31#define CMPB cmpb,* 31#define CMPB cmpb,*
32#define ANDCM andcm,* 32#define ANDCM andcm,*
@@ -40,6 +40,8 @@
40 .level 2.0 40 .level 2.0
41#endif 41#endif
42 42
43#include <linux/config.h>
44
43#include <asm/psw.h> 45#include <asm/psw.h>
44#include <asm/assembly.h> 46#include <asm/assembly.h>
45#include <asm/pgtable.h> 47#include <asm/pgtable.h>
@@ -294,7 +296,7 @@ copy_user_page_asm:
294 .callinfo NO_CALLS 296 .callinfo NO_CALLS
295 .entry 297 .entry
296 298
297#ifdef __LP64__ 299#ifdef CONFIG_64BIT
298 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. 300 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
299 * Unroll the loop by hand and arrange insn appropriately. 301 * Unroll the loop by hand and arrange insn appropriately.
300 * GCC probably can do this just as well. 302 * GCC probably can do this just as well.
@@ -454,7 +456,7 @@ copy_user_page_asm:
454 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ 456 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
455 457
456 ldil L%(TMPALIAS_MAP_START), %r28 458 ldil L%(TMPALIAS_MAP_START), %r28
457#ifdef __LP64__ 459#ifdef CONFIG_64BIT
458 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ 460 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
459 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ 461 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
460 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ 462 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
@@ -541,7 +543,7 @@ __clear_user_page_asm:
541 tophys_r1 %r26 543 tophys_r1 %r26
542 544
543 ldil L%(TMPALIAS_MAP_START), %r28 545 ldil L%(TMPALIAS_MAP_START), %r28
544#ifdef __LP64__ 546#ifdef CONFIG_64BIT
545#if (TMPALIAS_MAP_START >= 0x80000000) 547#if (TMPALIAS_MAP_START >= 0x80000000)
546 depdi 0, 31,32, %r28 /* clear any sign extension */ 548 depdi 0, 31,32, %r28 /* clear any sign extension */
547#endif 549#endif
@@ -558,7 +560,7 @@ __clear_user_page_asm:
558 560
559 pdtlb 0(%r28) 561 pdtlb 0(%r28)
560 562
561#ifdef __LP64__ 563#ifdef CONFIG_64BIT
562 ldi 32, %r1 /* PAGE_SIZE/128 == 32 */ 564 ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
563 565
564 /* PREFETCH (Write) has not (yet) been proven to help here */ 566 /* PREFETCH (Write) has not (yet) been proven to help here */
@@ -583,7 +585,7 @@ __clear_user_page_asm:
583 ADDIB> -1, %r1, 1b 585 ADDIB> -1, %r1, 1b
584 ldo 128(%r28), %r28 586 ldo 128(%r28), %r28
585 587
586#else /* ! __LP64 */ 588#else /* ! CONFIG_64BIT */
587 589
588 ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ 590 ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
589 591
@@ -606,7 +608,7 @@ __clear_user_page_asm:
606 stw %r0, 60(%r28) 608 stw %r0, 60(%r28)
607 ADDIB> -1, %r1, 1b 609 ADDIB> -1, %r1, 1b
608 ldo 64(%r28), %r28 610 ldo 64(%r28), %r28
609#endif /* __LP64 */ 611#endif /* CONFIG_64BIT */
610 612
611 bv %r0(%r2) 613 bv %r0(%r2)
612 nop 614 nop
@@ -624,7 +626,7 @@ flush_kernel_dcache_page:
624 ldil L%dcache_stride, %r1 626 ldil L%dcache_stride, %r1
625 ldw R%dcache_stride(%r1), %r23 627 ldw R%dcache_stride(%r1), %r23
626 628
627#ifdef __LP64__ 629#ifdef CONFIG_64BIT
628 depdi,z 1, 63-PAGE_SHIFT,1, %r25 630 depdi,z 1, 63-PAGE_SHIFT,1, %r25
629#else 631#else
630 depwi,z 1, 31-PAGE_SHIFT,1, %r25 632 depwi,z 1, 31-PAGE_SHIFT,1, %r25
@@ -668,7 +670,7 @@ flush_user_dcache_page:
668 ldil L%dcache_stride, %r1 670 ldil L%dcache_stride, %r1
669 ldw R%dcache_stride(%r1), %r23 671 ldw R%dcache_stride(%r1), %r23
670 672
671#ifdef __LP64__ 673#ifdef CONFIG_64BIT
672 depdi,z 1,63-PAGE_SHIFT,1, %r25 674 depdi,z 1,63-PAGE_SHIFT,1, %r25
673#else 675#else
674 depwi,z 1,31-PAGE_SHIFT,1, %r25 676 depwi,z 1,31-PAGE_SHIFT,1, %r25
@@ -712,7 +714,7 @@ flush_user_icache_page:
712 ldil L%dcache_stride, %r1 714 ldil L%dcache_stride, %r1
713 ldw R%dcache_stride(%r1), %r23 715 ldw R%dcache_stride(%r1), %r23
714 716
715#ifdef __LP64__ 717#ifdef CONFIG_64BIT
716 depdi,z 1, 63-PAGE_SHIFT,1, %r25 718 depdi,z 1, 63-PAGE_SHIFT,1, %r25
717#else 719#else
718 depwi,z 1, 31-PAGE_SHIFT,1, %r25 720 depwi,z 1, 31-PAGE_SHIFT,1, %r25
@@ -757,7 +759,7 @@ purge_kernel_dcache_page:
757 ldil L%dcache_stride, %r1 759 ldil L%dcache_stride, %r1
758 ldw R%dcache_stride(%r1), %r23 760 ldw R%dcache_stride(%r1), %r23
759 761
760#ifdef __LP64__ 762#ifdef CONFIG_64BIT
761 depdi,z 1, 63-PAGE_SHIFT,1, %r25 763 depdi,z 1, 63-PAGE_SHIFT,1, %r25
762#else 764#else
763 depwi,z 1, 31-PAGE_SHIFT,1, %r25 765 depwi,z 1, 31-PAGE_SHIFT,1, %r25
@@ -805,7 +807,7 @@ flush_alias_page:
805 tophys_r1 %r26 807 tophys_r1 %r26
806 808
807 ldil L%(TMPALIAS_MAP_START), %r28 809 ldil L%(TMPALIAS_MAP_START), %r28
808#ifdef __LP64__ 810#ifdef CONFIG_64BIT
809 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 811 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
810 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 812 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
811 depdi 0, 63,12, %r28 /* Clear any offset bits */ 813 depdi 0, 63,12, %r28 /* Clear any offset bits */
@@ -822,7 +824,7 @@ flush_alias_page:
822 ldil L%dcache_stride, %r1 824 ldil L%dcache_stride, %r1
823 ldw R%dcache_stride(%r1), %r23 825 ldw R%dcache_stride(%r1), %r23
824 826
825#ifdef __LP64__ 827#ifdef CONFIG_64BIT
826 depdi,z 1, 63-PAGE_SHIFT,1, %r29 828 depdi,z 1, 63-PAGE_SHIFT,1, %r29
827#else 829#else
828 depwi,z 1, 31-PAGE_SHIFT,1, %r29 830 depwi,z 1, 31-PAGE_SHIFT,1, %r29
@@ -933,7 +935,7 @@ flush_kernel_icache_page:
933 ldil L%icache_stride, %r1 935 ldil L%icache_stride, %r1
934 ldw R%icache_stride(%r1), %r23 936 ldw R%icache_stride(%r1), %r23
935 937
936#ifdef __LP64__ 938#ifdef CONFIG_64BIT
937 depdi,z 1, 63-PAGE_SHIFT,1, %r25 939 depdi,z 1, 63-PAGE_SHIFT,1, %r25
938#else 940#else
939 depwi,z 1, 31-PAGE_SHIFT,1, %r25 941 depwi,z 1, 31-PAGE_SHIFT,1, %r25
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 2310fc1b06a9..085bff7c569d 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -7,6 +7,8 @@
7 * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com) 7 * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
8 * 8 *
9 */ 9 */
10#include <linux/config.h>
11
10#include <asm/psw.h> 12#include <asm/psw.h>
11#include <asm/assembly.h> 13#include <asm/assembly.h>
12 14
@@ -20,7 +22,7 @@ real32_stack:
20real64_stack: 22real64_stack:
21 .block 8192 23 .block 8192
22 24
23#ifdef __LP64__ 25#ifdef CONFIG_64BIT
24# define REG_SZ 8 26# define REG_SZ 8
25#else 27#else
26# define REG_SZ 4 28# define REG_SZ 4
@@ -50,7 +52,7 @@ save_cr_end:
50 52
51real32_call_asm: 53real32_call_asm:
52 STREG %rp, -RP_OFFSET(%sp) /* save RP */ 54 STREG %rp, -RP_OFFSET(%sp) /* save RP */
53#ifdef __LP64__ 55#ifdef CONFIG_64BIT
54 callee_save 56 callee_save
55 ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */ 57 ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
56 STREG %r27, -1*REG_SZ(%sp) 58 STREG %r27, -1*REG_SZ(%sp)
@@ -77,7 +79,7 @@ real32_call_asm:
77 b,l save_control_regs,%r2 /* modifies r1, r2, r28 */ 79 b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
78 nop 80 nop
79 81
80#ifdef __LP64__ 82#ifdef CONFIG_64BIT
81 rsm PSW_SM_W, %r0 /* go narrow */ 83 rsm PSW_SM_W, %r0 /* go narrow */
82#endif 84#endif
83 85
@@ -85,7 +87,7 @@ real32_call_asm:
85 bv 0(%r31) 87 bv 0(%r31)
86 nop 88 nop
87ric_ret: 89ric_ret:
88#ifdef __LP64__ 90#ifdef CONFIG_64BIT
89 ssm PSW_SM_W, %r0 /* go wide */ 91 ssm PSW_SM_W, %r0 /* go wide */
90#endif 92#endif
91 /* restore CRs before going virtual in case we page fault */ 93 /* restore CRs before going virtual in case we page fault */
@@ -97,7 +99,7 @@ ric_ret:
97 99
98 tovirt_r1 %sp 100 tovirt_r1 %sp
99 LDREG -REG_SZ(%sp), %sp /* restore SP */ 101 LDREG -REG_SZ(%sp), %sp /* restore SP */
100#ifdef __LP64__ 102#ifdef CONFIG_64BIT
101 LDREG -1*REG_SZ(%sp), %r27 103 LDREG -1*REG_SZ(%sp), %r27
102 LDREG -2*REG_SZ(%sp), %r29 104 LDREG -2*REG_SZ(%sp), %r29
103 ldo -2*REG_SZ(%sp), %sp 105 ldo -2*REG_SZ(%sp), %sp
@@ -212,7 +214,7 @@ rfi_r2v_1:
212 bv 0(%r2) 214 bv 0(%r2)
213 nop 215 nop
214 216
215#ifdef __LP64__ 217#ifdef CONFIG_64BIT
216 218
217/************************ 64-bit real-mode calls ***********************/ 219/************************ 64-bit real-mode calls ***********************/
218/* This is only usable in wide kernels right now and will probably stay so */ 220/* This is only usable in wide kernels right now and will probably stay so */
@@ -290,7 +292,7 @@ pc_in_user_space:
290 ** comparing function pointers. 292 ** comparing function pointers.
291 */ 293 */
292__canonicalize_funcptr_for_compare: 294__canonicalize_funcptr_for_compare:
293#ifdef __LP64__ 295#ifdef CONFIG_64BIT
294 bve (%r2) 296 bve (%r2)
295#else 297#else
296 bv %r0(%r2) 298 bv %r0(%r2)
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index bcc7e83f5142..5db3be4e2704 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -18,7 +18,7 @@
18*/ 18*/
19#undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */ 19#undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
20 20
21#include <linux/autoconf.h> 21#include <linux/config.h>
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 8c7a7185cd3b..b29b76b42bb7 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -6,6 +6,7 @@
6 * thanks to Philipp Rumpf, Mike Shaver and various others 6 * thanks to Philipp Rumpf, Mike Shaver and various others
7 * sorry about the wall, puffin.. 7 * sorry about the wall, puffin..
8 */ 8 */
9#include <linux/config.h> /* for CONFIG_SMP */
9 10
10#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
11#include <asm/unistd.h> 12#include <asm/unistd.h>
@@ -22,15 +23,13 @@
22 */ 23 */
23#define KILL_INSN break 0,0 24#define KILL_INSN break 0,0
24 25
25#include <linux/config.h> /* for CONFIG_SMP */ 26#ifdef CONFIG_64BIT
26
27#ifdef __LP64__
28 .level 2.0w 27 .level 2.0w
29#else 28#else
30 .level 1.1 29 .level 1.1
31#endif 30#endif
32 31
33#ifndef __LP64__ 32#ifndef CONFIG_64BIT
34 .macro fixup_branch,lbl 33 .macro fixup_branch,lbl
35 b \lbl 34 b \lbl
36 .endm 35 .endm
@@ -103,7 +102,7 @@ linux_gateway_entry:
103 mfsp %sr7,%r1 /* save user sr7 */ 102 mfsp %sr7,%r1 /* save user sr7 */
104 mtsp %r1,%sr3 /* and store it in sr3 */ 103 mtsp %r1,%sr3 /* and store it in sr3 */
105 104
106#ifdef __LP64__ 105#ifdef CONFIG_64BIT
107 /* for now we can *always* set the W bit on entry to the syscall 106 /* for now we can *always* set the W bit on entry to the syscall
108 * since we don't support wide userland processes. We could 107 * since we don't support wide userland processes. We could
109 * also save the current SM other than in r0 and restore it on 108 * also save the current SM other than in r0 and restore it on
@@ -155,7 +154,7 @@ linux_gateway_entry:
155 STREG %r19, TASK_PT_GR19(%r1) 154 STREG %r19, TASK_PT_GR19(%r1)
156 155
157 LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */ 156 LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
158#ifdef __LP64__ 157#ifdef CONFIG_64BIT
159 extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */ 158 extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
160#if 0 159#if 0
161 xor %r19,%r2,%r2 /* clear bottom bit */ 160 xor %r19,%r2,%r2 /* clear bottom bit */
@@ -186,7 +185,7 @@ linux_gateway_entry:
186 185
187 loadgp 186 loadgp
188 187
189#ifdef __LP64__ 188#ifdef CONFIG_64BIT
190 ldo -16(%r30),%r29 /* Reference param save area */ 189 ldo -16(%r30),%r29 /* Reference param save area */
191 copy %r19,%r2 /* W bit back to r2 */ 190 copy %r19,%r2 /* W bit back to r2 */
192#else 191#else
@@ -205,7 +204,7 @@ linux_gateway_entry:
205 /* Note! We cannot use the syscall table that is mapped 204 /* Note! We cannot use the syscall table that is mapped
206 nearby since the gateway page is mapped execute-only. */ 205 nearby since the gateway page is mapped execute-only. */
207 206
208#ifdef __LP64__ 207#ifdef CONFIG_64BIT
209 ldil L%sys_call_table, %r1 208 ldil L%sys_call_table, %r1
210 or,= %r2,%r2,%r2 209 or,= %r2,%r2,%r2
211 addil L%(sys_call_table64-sys_call_table), %r1 210 addil L%(sys_call_table64-sys_call_table), %r1
@@ -321,7 +320,7 @@ tracesys_next:
321 LDREG TASK_PT_GR25(%r1), %r25 320 LDREG TASK_PT_GR25(%r1), %r25
322 LDREG TASK_PT_GR24(%r1), %r24 321 LDREG TASK_PT_GR24(%r1), %r24
323 LDREG TASK_PT_GR23(%r1), %r23 322 LDREG TASK_PT_GR23(%r1), %r23
324#ifdef __LP64__ 323#ifdef CONFIG_64BIT
325 LDREG TASK_PT_GR22(%r1), %r22 324 LDREG TASK_PT_GR22(%r1), %r22
326 LDREG TASK_PT_GR21(%r1), %r21 325 LDREG TASK_PT_GR21(%r1), %r21
327 ldo -16(%r30),%r29 /* Reference param save area */ 326 ldo -16(%r30),%r29 /* Reference param save area */
@@ -350,7 +349,7 @@ tracesys_next:
350tracesys_exit: 349tracesys_exit:
351 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 350 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
352 LDREG TI_TASK(%r1), %r1 351 LDREG TI_TASK(%r1), %r1
353#ifdef __LP64__ 352#ifdef CONFIG_64BIT
354 ldo -16(%r30),%r29 /* Reference param save area */ 353 ldo -16(%r30),%r29 /* Reference param save area */
355#endif 354#endif
356 bl syscall_trace, %r2 355 bl syscall_trace, %r2
@@ -371,7 +370,7 @@ tracesys_exit:
371tracesys_sigexit: 370tracesys_sigexit:
372 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 371 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
373 LDREG 0(%r1), %r1 372 LDREG 0(%r1), %r1
374#ifdef __LP64__ 373#ifdef CONFIG_64BIT
375 ldo -16(%r30),%r29 /* Reference param save area */ 374 ldo -16(%r30),%r29 /* Reference param save area */
376#endif 375#endif
377 bl syscall_trace, %r2 376 bl syscall_trace, %r2
@@ -404,7 +403,7 @@ lws_start:
404 gate .+8, %r0 403 gate .+8, %r0
405 depi 3, 31, 2, %r31 /* Ensure we return to userspace */ 404 depi 3, 31, 2, %r31 /* Ensure we return to userspace */
406 405
407#ifdef __LP64__ 406#ifdef CONFIG_64BIT
408 /* FIXME: If we are a 64-bit kernel just 407 /* FIXME: If we are a 64-bit kernel just
409 * turn this on unconditionally. 408 * turn this on unconditionally.
410 */ 409 */
@@ -440,7 +439,7 @@ lws_exit_nosys:
440 /* Fall through: Return to userspace */ 439 /* Fall through: Return to userspace */
441 440
442lws_exit: 441lws_exit:
443#ifdef __LP64__ 442#ifdef CONFIG_64BIT
444 /* decide whether to reset the wide mode bit 443 /* decide whether to reset the wide mode bit
445 * 444 *
446 * For a syscall, the W bit is stored in the lowest bit 445 * For a syscall, the W bit is stored in the lowest bit
@@ -486,7 +485,7 @@ lws_exit:
486 485
487 /* ELF64 Process entry path */ 486 /* ELF64 Process entry path */
488lws_compare_and_swap64: 487lws_compare_and_swap64:
489#ifdef __LP64__ 488#ifdef CONFIG_64BIT
490 b,n lws_compare_and_swap 489 b,n lws_compare_and_swap
491#else 490#else
492 /* If we are not a 64-bit kernel, then we don't 491 /* If we are not a 64-bit kernel, then we don't
@@ -497,7 +496,7 @@ lws_compare_and_swap64:
497 496
498 /* ELF32 Process entry path */ 497 /* ELF32 Process entry path */
499lws_compare_and_swap32: 498lws_compare_and_swap32:
500#ifdef __LP64__ 499#ifdef CONFIG_64BIT
501 /* Clip all the input registers */ 500 /* Clip all the input registers */
502 depdi 0, 31, 32, %r26 501 depdi 0, 31, 32, %r26
503 depdi 0, 31, 32, %r25 502 depdi 0, 31, 32, %r25
@@ -608,7 +607,7 @@ cas_action:
608 the other for the store. Either return -EFAULT. 607 the other for the store. Either return -EFAULT.
609 Each of the entries must be relocated. */ 608 Each of the entries must be relocated. */
610 .section __ex_table,"aw" 609 .section __ex_table,"aw"
611#ifdef __LP64__ 610#ifdef CONFIG_64BIT
612 /* Pad the address calculation */ 611 /* Pad the address calculation */
613 .word 0,(2b - linux_gateway_page) 612 .word 0,(2b - linux_gateway_page)
614 .word 0,(3b - linux_gateway_page) 613 .word 0,(3b - linux_gateway_page)
@@ -619,7 +618,7 @@ cas_action:
619 .previous 618 .previous
620 619
621 .section __ex_table,"aw" 620 .section __ex_table,"aw"
622#ifdef __LP64__ 621#ifdef CONFIG_64BIT
623 /* Pad the address calculation */ 622 /* Pad the address calculation */
624 .word 0,(1b - linux_gateway_page) 623 .word 0,(1b - linux_gateway_page)
625 .word 0,(3b - linux_gateway_page) 624 .word 0,(3b - linux_gateway_page)
@@ -638,7 +637,7 @@ end_linux_gateway_page:
638 637
639 /* Relocate symbols assuming linux_gateway_page is mapped 638 /* Relocate symbols assuming linux_gateway_page is mapped
640 to virtual address 0x0 */ 639 to virtual address 0x0 */
641#ifdef __LP64__ 640#ifdef CONFIG_64BIT
642 /* FIXME: The code will always be on the gateay page 641 /* FIXME: The code will always be on the gateay page
643 and thus it will be on the first 4k, the 642 and thus it will be on the first 4k, the
644 assembler seems to think that the final 643 assembler seems to think that the final
@@ -666,7 +665,7 @@ lws_table:
666sys_call_table: 665sys_call_table:
667#include "syscall_table.S" 666#include "syscall_table.S"
668 667
669#ifdef __LP64__ 668#ifdef CONFIG_64BIT
670 .align 4096 669 .align 4096
671 .export sys_call_table64 670 .export sys_call_table64
672.Lsys_call_table64: 671.Lsys_call_table64:
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 065ab809659d..32cbc0489324 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -35,7 +35,7 @@
35#undef ENTRY_UHOH 35#undef ENTRY_UHOH
36#undef ENTRY_COMP 36#undef ENTRY_COMP
37#undef ENTRY_OURS 37#undef ENTRY_OURS
38#if defined(__LP64__) && !defined(SYSCALL_TABLE_64BIT) 38#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
39/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and 39/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
40 * narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific 40 * narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
41 * implementation is required on wide palinux. Use ENTRY_COMP where 41 * implementation is required on wide palinux. Use ENTRY_COMP where
@@ -46,7 +46,7 @@
46#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented 46#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
47#define ENTRY_OURS(_name_) .dword parisc_##_name_ 47#define ENTRY_OURS(_name_) .dword parisc_##_name_
48#define ENTRY_COMP(_name_) .dword compat_sys_##_name_ 48#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
49#elif defined(__LP64__) && defined(SYSCALL_TABLE_64BIT) 49#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
50#define ENTRY_SAME(_name_) .dword sys_##_name_ 50#define ENTRY_SAME(_name_) .dword sys_##_name_
51#define ENTRY_DIFF(_name_) .dword sys_##_name_ 51#define ENTRY_DIFF(_name_) .dword sys_##_name_
52#define ENTRY_UHOH(_name_) .dword sys_##_name_ 52#define ENTRY_UHOH(_name_) .dword sys_##_name_