aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-10-05 20:59:19 -0400
committerPaul Mackerras <paulus@samba.org>2005-10-05 20:59:19 -0400
commitb85a046af3a260e079505e8023ccd10e01cf4f2b (patch)
tree2f5f1af4db85fca6fc88902840463b107721cb14
parent6ce52e6438fd2921891648ceab700d9b867e5ed2 (diff)
powerpc: Define 32/64 bit asm macros and use them in fpu.S
These macros help in writing assembly code that works for both ppc32 and ppc64. With this we now have a common fpu.S. This takes out load_up_fpu from head_64.S. Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/fpu.S72
-rw-r--r--arch/powerpc/kernel/head_64.S58
-rw-r--r--include/asm-powerpc/ppc_asm.h28
3 files changed, 53 insertions, 105 deletions
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 665d7d34304c..04e95e810b21 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -27,13 +27,9 @@
27 * Load up this task's FP registers from its thread_struct, 27 * Load up this task's FP registers from its thread_struct,
28 * enable the FPU for the current task and return to the task. 28 * enable the FPU for the current task and return to the task.
29 */ 29 */
30 .globl load_up_fpu 30_GLOBAL(load_up_fpu)
31load_up_fpu:
32 mfmsr r5 31 mfmsr r5
33 ori r5,r5,MSR_FP 32 ori r5,r5,MSR_FP
34#ifdef CONFIG_PPC64BRIDGE
35 clrldi r5,r5,1 /* turn off 64-bit mode */
36#endif /* CONFIG_PPC64BRIDGE */
37 SYNC 33 SYNC
38 MTMSRD(r5) /* enable use of fpu now */ 34 MTMSRD(r5) /* enable use of fpu now */
39 isync 35 isync
@@ -43,67 +39,57 @@ load_up_fpu:
43 * to another. Instead we call giveup_fpu in switch_to. 39 * to another. Instead we call giveup_fpu in switch_to.
44 */ 40 */
45#ifndef CONFIG_SMP 41#ifndef CONFIG_SMP
46 tophys(r6,0) /* get __pa constant */ 42 LOADBASE(r3, last_task_used_math)
47 addis r3,r6,last_task_used_math@ha 43 tophys(r3,r3)
48 lwz r4,last_task_used_math@l(r3) 44 LDL r4,OFF(last_task_used_math)(r3)
49 cmpwi 0,r4,0 45 CMPI 0,r4,0
50 beq 1f 46 beq 1f
51 add r4,r4,r6 47 tophys(r4,r4)
52 addi r4,r4,THREAD /* want last_task_used_math->thread */ 48 addi r4,r4,THREAD /* want last_task_used_math->thread */
53 SAVE_32FPRS(0, r4) 49 SAVE_32FPRS(0, r4)
54 mffs fr0 50 mffs fr0
55 stfd fr0,THREAD_FPSCR-4(r4) 51 stfd fr0,THREAD_FPSCR-4(r4)
56 lwz r5,PT_REGS(r4) 52 LDL r5,PT_REGS(r4)
57 add r5,r5,r6 53 tophys(r5,r5)
58 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 54 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
59 li r10,MSR_FP|MSR_FE0|MSR_FE1 55 li r10,MSR_FP|MSR_FE0|MSR_FE1
60 andc r4,r4,r10 /* disable FP for previous task */ 56 andc r4,r4,r10 /* disable FP for previous task */
61 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 57 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
621: 581:
63#endif /* CONFIG_SMP */ 59#endif /* CONFIG_SMP */
64 /* enable use of FP after return */ 60 /* enable use of FP after return */
61#ifdef CONFIG_PPC32
65 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 62 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
66 lwz r4,THREAD_FPEXC_MODE(r5) 63 lwz r4,THREAD_FPEXC_MODE(r5)
67 ori r9,r9,MSR_FP /* enable FP for current */ 64 ori r9,r9,MSR_FP /* enable FP for current */
68 or r9,r9,r4 65 or r9,r9,r4
66#else
67 ld r4,PACACURRENT(r13)
68 addi r5,r4,THREAD /* Get THREAD */
69 ld r4,THREAD_FPEXC_MODE(r5)
70 ori r12,r12,MSR_FP
71 or r12,r12,r4
72 std r12,_MSR(r1)
73#endif
69 lfd fr0,THREAD_FPSCR-4(r5) 74 lfd fr0,THREAD_FPSCR-4(r5)
70 mtfsf 0xff,fr0 75 mtfsf 0xff,fr0
71 REST_32FPRS(0, r5) 76 REST_32FPRS(0, r5)
72#ifndef CONFIG_SMP 77#ifndef CONFIG_SMP
73 subi r4,r5,THREAD 78 subi r4,r5,THREAD
74 sub r4,r4,r6 79 tovirt(r4,r4)
75 stw r4,last_task_used_math@l(r3) 80 STL r4,OFF(last_task_used_math)(r3)
76#endif /* CONFIG_SMP */ 81#endif /* CONFIG_SMP */
77 /* restore registers and return */ 82 /* restore registers and return */
78 /* we haven't used ctr or xer or lr */ 83 /* we haven't used ctr or xer or lr */
79 b fast_exception_return 84 b fast_exception_return
80 85
81/* 86/*
82 * FP unavailable trap from kernel - print a message, but let
83 * the task use FP in the kernel until it returns to user mode.
84 */
85 .globl KernelFP
86KernelFP:
87 lwz r3,_MSR(r1)
88 ori r3,r3,MSR_FP
89 stw r3,_MSR(r1) /* enable use of FP after return */
90 lis r3,86f@h
91 ori r3,r3,86f@l
92 mr r4,r2 /* current */
93 lwz r5,_NIP(r1)
94 bl printk
95 b ret_from_except
9686: .string "floating point used in kernel (task=%p, pc=%x)\n"
97 .align 4,0
98
99/*
100 * giveup_fpu(tsk) 87 * giveup_fpu(tsk)
101 * Disable FP for the task given as the argument, 88 * Disable FP for the task given as the argument,
102 * and save the floating-point registers in its thread_struct. 89 * and save the floating-point registers in its thread_struct.
103 * Enables the FPU for use in the kernel on return. 90 * Enables the FPU for use in the kernel on return.
104 */ 91 */
105 .globl giveup_fpu 92_GLOBAL(giveup_fpu)
106giveup_fpu:
107 mfmsr r5 93 mfmsr r5
108 ori r5,r5,MSR_FP 94 ori r5,r5,MSR_FP
109 SYNC_601 95 SYNC_601
@@ -111,23 +97,23 @@ giveup_fpu:
111 MTMSRD(r5) /* enable use of fpu now */ 97 MTMSRD(r5) /* enable use of fpu now */
112 SYNC_601 98 SYNC_601
113 isync 99 isync
114 cmpwi 0,r3,0 100 CMPI 0,r3,0
115 beqlr- /* if no previous owner, done */ 101 beqlr- /* if no previous owner, done */
116 addi r3,r3,THREAD /* want THREAD of task */ 102 addi r3,r3,THREAD /* want THREAD of task */
117 lwz r5,PT_REGS(r3) 103 LDL r5,PT_REGS(r3)
118 cmpwi 0,r5,0 104 CMPI 0,r5,0
119 SAVE_32FPRS(0, r3) 105 SAVE_32FPRS(0, r3)
120 mffs fr0 106 mffs fr0
121 stfd fr0,THREAD_FPSCR-4(r3) 107 stfd fr0,THREAD_FPSCR-4(r3)
122 beq 1f 108 beq 1f
123 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 109 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
124 li r3,MSR_FP|MSR_FE0|MSR_FE1 110 li r3,MSR_FP|MSR_FE0|MSR_FE1
125 andc r4,r4,r3 /* disable FP for previous task */ 111 andc r4,r4,r3 /* disable FP for previous task */
126 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 112 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1271: 1131:
128#ifndef CONFIG_SMP 114#ifndef CONFIG_SMP
129 li r5,0 115 li r5,0
130 lis r4,last_task_used_math@ha 116 LOADBASE(r4,last_task_used_math)
131 stw r5,last_task_used_math@l(r4) 117 STL r5,OFF(last_task_used_math)(r4)
132#endif /* CONFIG_SMP */ 118#endif /* CONFIG_SMP */
133 blr 119 blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index db0cd3587627..a36ee6ecbaea 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -80,7 +80,7 @@ _stext:
80_GLOBAL(__start) 80_GLOBAL(__start)
81 /* NOP this out unconditionally */ 81 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION 82BEGIN_FTR_SECTION
83 b .__start_initialization_multiplatform 83 b .__start_initialization_multiplatform
84END_FTR_SECTION(0, 1) 84END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 85#endif /* CONFIG_PPC_MULTIPLATFORM */
86 86
@@ -857,62 +857,6 @@ fp_unavailable_common:
857 bl .kernel_fp_unavailable_exception 857 bl .kernel_fp_unavailable_exception
858 BUG_OPCODE 858 BUG_OPCODE
859 859
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
916 .align 7 860 .align 7
917 .globl altivec_unavailable_common 861 .globl altivec_unavailable_common
918altivec_unavailable_common: 862altivec_unavailable_common:
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index 4efa71878fa9..6cd52c130332 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -103,12 +103,13 @@
103 oris rn,rn,name##@h; \ 103 oris rn,rn,name##@h; \
104 ori rn,rn,name##@l 104 ori rn,rn,name##@l
105 105
106#define LOADBASE(rn,name) \ 106#define LOADBASE(rn,name) \
107 lis rn,name@highest; \ 107 .section .toc,"aw"; \
108 ori rn,rn,name@higher; \ 1081: .tc name[TC],name; \
109 rldicr rn,rn,32,31; \ 109 .previous; \
110 oris rn,rn,name@ha 110 ld rn,1b@toc(r2)
111 111
112#define OFF(name) 0
112 113
113#define SET_REG_TO_CONST(reg, value) \ 114#define SET_REG_TO_CONST(reg, value) \
114 lis reg,(((value)>>48)&0xFFFF); \ 115 lis reg,(((value)>>48)&0xFFFF); \
@@ -123,6 +124,23 @@
123 rldicr reg,reg,32,31; \ 124 rldicr reg,reg,32,31; \
124 oris reg,reg,(label)@h; \ 125 oris reg,reg,(label)@h; \
125 ori reg,reg,(label)@l; 126 ori reg,reg,(label)@l;
127
128/* operations for longs and pointers */
129#define LDL ld
130#define STL std
131#define CMPI cmpdi
132
133#else /* 32-bit */
134#define LOADBASE(rn,name) \
135 lis rn,name@ha
136
137#define OFF(name) name@l
138
139/* operations for longs and pointers */
140#define LDL lwz
141#define STL stw
142#define CMPI cmpwi
143
126#endif 144#endif
127 145
128/* various errata or part fixups */ 146/* various errata or part fixups */