diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-17 06:12:39 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-17 06:12:39 -0400 |
commit | 7ac59c624992281ff315911dea2a98ca3f3ff06e (patch) | |
tree | cef07b70e33f78cd184a05b16876756fb6e8a4ab /arch/ppc/kernel | |
parent | 3e63b9ec51eb1d9f441e5015427b23d70e5991b3 (diff) |
ppc: Fix various compile errors resulting from ptrace.c merge
This introduces flush_{fp,altivec,spe}_to_thread and fixes a
branch-too-far error in linking.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r-- | arch/ppc/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/ppc/kernel/process.c | 132 |
2 files changed, 98 insertions, 37 deletions
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index 03d4886869f3..68fc61221776 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S | |||
@@ -633,7 +633,8 @@ sigreturn_exit: | |||
633 | rlwinm r12,r1,0,0,18 /* current_thread_info() */ | 633 | rlwinm r12,r1,0,0,18 /* current_thread_info() */ |
634 | lwz r9,TI_FLAGS(r12) | 634 | lwz r9,TI_FLAGS(r12) |
635 | andi. r0,r9,_TIF_SYSCALL_T_OR_A | 635 | andi. r0,r9,_TIF_SYSCALL_T_OR_A |
636 | bnel- do_syscall_trace_leave | 636 | beq+ ret_from_except_full |
637 | bl do_syscall_trace_leave | ||
637 | /* fall through */ | 638 | /* fall through */ |
638 | 639 | ||
639 | .globl ret_from_except_full | 640 | .globl ret_from_except_full |
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c index 0870e5553453..6d60c40598e7 100644 --- a/arch/ppc/kernel/process.c +++ b/arch/ppc/kernel/process.c | |||
@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk) | |||
152 | } | 152 | } |
153 | #endif /* defined(CHECK_STACK) */ | 153 | #endif /* defined(CHECK_STACK) */ |
154 | 154 | ||
155 | #ifdef CONFIG_ALTIVEC | 155 | /* |
156 | int | 156 | * Make sure the floating-point register state in the |
157 | dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) | 157 | * the thread_struct is up to date for task tsk. |
158 | */ | ||
159 | void flush_fp_to_thread(struct task_struct *tsk) | ||
158 | { | 160 | { |
159 | if (regs->msr & MSR_VEC) | 161 | if (tsk->thread.regs) { |
160 | giveup_altivec(current); | 162 | /* |
161 | memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs)); | 163 | * We need to disable preemption here because if we didn't, |
164 | * another process could get scheduled after the regs->msr | ||
165 | * test but before we have finished saving the FP registers | ||
166 | * to the thread_struct. That process could take over the | ||
167 | * FPU, and then when we get scheduled again we would store | ||
168 | * bogus values for the remaining FP registers. | ||
169 | */ | ||
170 | preempt_disable(); | ||
171 | if (tsk->thread.regs->msr & MSR_FP) { | ||
172 | #ifdef CONFIG_SMP | ||
173 | /* | ||
174 | * This should only ever be called for current or | ||
175 | * for a stopped child process. Since we save away | ||
176 | * the FP register state on context switch on SMP, | ||
177 | * there is something wrong if a stopped child appears | ||
178 | * to still have its FP state in the CPU registers. | ||
179 | */ | ||
180 | BUG_ON(tsk != current); | ||
181 | #endif | ||
182 | giveup_fpu(current); | ||
183 | } | ||
184 | preempt_enable(); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | void enable_kernel_fp(void) | ||
189 | { | ||
190 | WARN_ON(preemptible()); | ||
191 | |||
192 | #ifdef CONFIG_SMP | ||
193 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | ||
194 | giveup_fpu(current); | ||
195 | else | ||
196 | giveup_fpu(NULL); /* just enables FP for kernel */ | ||
197 | #else | ||
198 | giveup_fpu(last_task_used_math); | ||
199 | #endif /* CONFIG_SMP */ | ||
200 | } | ||
201 | EXPORT_SYMBOL(enable_kernel_fp); | ||
202 | |||
203 | int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
204 | { | ||
205 | preempt_disable(); | ||
206 | if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) | ||
207 | giveup_fpu(tsk); | ||
208 | preempt_enable(); | ||
209 | memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); | ||
162 | return 1; | 210 | return 1; |
163 | } | 211 | } |
164 | 212 | ||
165 | void | 213 | #ifdef CONFIG_ALTIVEC |
166 | enable_kernel_altivec(void) | 214 | void enable_kernel_altivec(void) |
167 | { | 215 | { |
168 | WARN_ON(preemptible()); | 216 | WARN_ON(preemptible()); |
169 | 217 | ||
@@ -177,19 +225,35 @@ enable_kernel_altivec(void) | |||
177 | #endif /* __SMP __ */ | 225 | #endif /* __SMP __ */ |
178 | } | 226 | } |
179 | EXPORT_SYMBOL(enable_kernel_altivec); | 227 | EXPORT_SYMBOL(enable_kernel_altivec); |
180 | #endif /* CONFIG_ALTIVEC */ | ||
181 | 228 | ||
182 | #ifdef CONFIG_SPE | 229 | /* |
183 | int | 230 | * Make sure the VMX/Altivec register state in the |
184 | dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) | 231 | * the thread_struct is up to date for task tsk. |
232 | */ | ||
233 | void flush_altivec_to_thread(struct task_struct *tsk) | ||
185 | { | 234 | { |
186 | if (regs->msr & MSR_SPE) | 235 | if (tsk->thread.regs) { |
187 | giveup_spe(current); | 236 | preempt_disable(); |
188 | /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ | 237 | if (tsk->thread.regs->msr & MSR_VEC) { |
189 | memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); | 238 | #ifdef CONFIG_SMP |
239 | BUG_ON(tsk != current); | ||
240 | #endif | ||
241 | giveup_altivec(current); | ||
242 | } | ||
243 | preempt_enable(); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) | ||
248 | { | ||
249 | if (regs->msr & MSR_VEC) | ||
250 | giveup_altivec(current); | ||
251 | memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs)); | ||
190 | return 1; | 252 | return 1; |
191 | } | 253 | } |
254 | #endif /* CONFIG_ALTIVEC */ | ||
192 | 255 | ||
256 | #ifdef CONFIG_SPE | ||
193 | void | 257 | void |
194 | enable_kernel_spe(void) | 258 | enable_kernel_spe(void) |
195 | { | 259 | { |
@@ -205,34 +269,30 @@ enable_kernel_spe(void) | |||
205 | #endif /* __SMP __ */ | 269 | #endif /* __SMP __ */ |
206 | } | 270 | } |
207 | EXPORT_SYMBOL(enable_kernel_spe); | 271 | EXPORT_SYMBOL(enable_kernel_spe); |
208 | #endif /* CONFIG_SPE */ | ||
209 | 272 | ||
210 | void | 273 | void flush_spe_to_thread(struct task_struct *tsk) |
211 | enable_kernel_fp(void) | ||
212 | { | 274 | { |
213 | WARN_ON(preemptible()); | 275 | if (tsk->thread.regs) { |
214 | 276 | preempt_disable(); | |
277 | if (tsk->thread.regs->msr & MSR_SPE) { | ||
215 | #ifdef CONFIG_SMP | 278 | #ifdef CONFIG_SMP |
216 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | 279 | BUG_ON(tsk != current); |
217 | giveup_fpu(current); | 280 | #endif |
218 | else | 281 | giveup_spe(current); |
219 | giveup_fpu(NULL); /* just enables FP for kernel */ | 282 | } |
220 | #else | 283 | preempt_enable(); |
221 | giveup_fpu(last_task_used_math); | 284 | } |
222 | #endif /* CONFIG_SMP */ | ||
223 | } | 285 | } |
224 | EXPORT_SYMBOL(enable_kernel_fp); | ||
225 | 286 | ||
226 | int | 287 | int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) |
227 | dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
228 | { | 288 | { |
229 | preempt_disable(); | 289 | if (regs->msr & MSR_SPE) |
230 | if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) | 290 | giveup_spe(current); |
231 | giveup_fpu(tsk); | 291 | /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ |
232 | preempt_enable(); | 292 | memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); |
233 | memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); | ||
234 | return 1; | 293 | return 1; |
235 | } | 294 | } |
295 | #endif /* CONFIG_SPE */ | ||
236 | 296 | ||
237 | struct task_struct *__switch_to(struct task_struct *prev, | 297 | struct task_struct *__switch_to(struct task_struct *prev, |
238 | struct task_struct *new) | 298 | struct task_struct *new) |