diff options
author | Greg Ungerer <gerg@uclinux.org> | 2011-10-19 00:10:03 -0400 |
---|---|---|
committer | Greg Ungerer <gerg@uclinux.org> | 2011-12-29 19:20:47 -0500 |
commit | 0a2796da1182a7dcfba41f796f45986237bc1688 (patch) | |
tree | f3a09049fd2f554241ede9e2c2d8a979ffe1e730 /arch/m68k/kernel | |
parent | e9fcffa49376b37518baf71a47adc15e74b2434c (diff) |
m68k: add ColdFire FPU support for the V4e ColdFire CPUs
The V4e ColdFire CPU family also has an integrated FPU (as well as the MMU).
So add code to support this hardware along side the existing m68k FPU code.
The ColdFire FPU is of course different to all previous 68k FP units. It is
close in operation to the 68060, but not completely compatible. The biggest
issue to deal with is that the ColdFire FPU multi-move instructions are
different. It does not support multi-moving the FP control registers, and
the multi-move of the FP data registers uses a different instruction
mnemonic.
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Acked-by: Matt Waddel <mwaddel@yahoo.com>
Acked-by: Kurt Mahan <kmahan@xmission.com>
Diffstat (limited to 'arch/m68k/kernel')
-rw-r--r-- | arch/m68k/kernel/process_mm.c | 59 | ||||
-rw-r--r-- | arch/m68k/kernel/setup_mm.c | 2 | ||||
-rw-r--r-- | arch/m68k/kernel/signal_mm.c | 187 |
3 files changed, 182 insertions, 66 deletions
diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c index 58a3253f3eb9..125f34e00bf0 100644 --- a/arch/m68k/kernel/process_mm.c +++ b/arch/m68k/kernel/process_mm.c | |||
@@ -172,9 +172,7 @@ void flush_thread(void) | |||
172 | 172 | ||
173 | current->thread.fs = __USER_DS; | 173 | current->thread.fs = __USER_DS; |
174 | if (!FPU_IS_EMU) | 174 | if (!FPU_IS_EMU) |
175 | asm volatile (".chip 68k/68881\n\t" | 175 | asm volatile ("frestore %0@" : : "a" (&zero) : "memory"); |
176 | "frestore %0@\n\t" | ||
177 | ".chip 68k" : : "a" (&zero)); | ||
178 | } | 176 | } |
179 | 177 | ||
180 | /* | 178 | /* |
@@ -248,11 +246,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
248 | /* Copy the current fpu state */ | 246 | /* Copy the current fpu state */ |
249 | asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); | 247 | asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); |
250 | 248 | ||
251 | if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) | 249 | if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) { |
252 | asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" | 250 | if (CPU_IS_COLDFIRE) { |
253 | "fmoveml %/fpiar/%/fpcr/%/fpsr,%1" | 251 | asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t" |
254 | : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0]) | 252 | "fmovel %/fpiar,%1\n\t" |
255 | : "memory"); | 253 | "fmovel %/fpcr,%2\n\t" |
254 | "fmovel %/fpsr,%3" | ||
255 | : | ||
256 | : "m" (p->thread.fp[0]), | ||
257 | "m" (p->thread.fpcntl[0]), | ||
258 | "m" (p->thread.fpcntl[1]), | ||
259 | "m" (p->thread.fpcntl[2]) | ||
260 | : "memory"); | ||
261 | } else { | ||
262 | asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" | ||
263 | "fmoveml %/fpiar/%/fpcr/%/fpsr,%1" | ||
264 | : | ||
265 | : "m" (p->thread.fp[0]), | ||
266 | "m" (p->thread.fpcntl[0]) | ||
267 | : "memory"); | ||
268 | } | ||
269 | } | ||
270 | |||
256 | /* Restore the state in case the fpu was busy */ | 271 | /* Restore the state in case the fpu was busy */ |
257 | asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); | 272 | asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); |
258 | } | 273 | } |
@@ -285,12 +300,28 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu) | |||
285 | if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) | 300 | if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) |
286 | return 0; | 301 | return 0; |
287 | 302 | ||
288 | asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" | 303 | if (CPU_IS_COLDFIRE) { |
289 | :: "m" (fpu->fpcntl[0]) | 304 | asm volatile ("fmovel %/fpiar,%0\n\t" |
290 | : "memory"); | 305 | "fmovel %/fpcr,%1\n\t" |
291 | asm volatile ("fmovemx %/fp0-%/fp7,%0" | 306 | "fmovel %/fpsr,%2\n\t" |
292 | :: "m" (fpu->fpregs[0]) | 307 | "fmovemd %/fp0-%/fp7,%3" |
293 | : "memory"); | 308 | : |
309 | : "m" (fpu->fpcntl[0]), | ||
310 | "m" (fpu->fpcntl[1]), | ||
311 | "m" (fpu->fpcntl[2]), | ||
312 | "m" (fpu->fpregs[0]) | ||
313 | : "memory"); | ||
314 | } else { | ||
315 | asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" | ||
316 | : | ||
317 | : "m" (fpu->fpcntl[0]) | ||
318 | : "memory"); | ||
319 | asm volatile ("fmovemx %/fp0-%/fp7,%0" | ||
320 | : | ||
321 | : "m" (fpu->fpregs[0]) | ||
322 | : "memory"); | ||
323 | } | ||
324 | |||
294 | return 1; | 325 | return 1; |
295 | } | 326 | } |
296 | EXPORT_SYMBOL(dump_fpu); | 327 | EXPORT_SYMBOL(dump_fpu); |
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index b3938adeabea..d872ce4807c9 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c | |||
@@ -236,7 +236,7 @@ void __init setup_arch(char **cmdline_p) | |||
236 | * with them, we should add a test to check_bugs() below] */ | 236 | * with them, we should add a test to check_bugs() below] */ |
237 | #ifndef CONFIG_M68KFPU_EMU_ONLY | 237 | #ifndef CONFIG_M68KFPU_EMU_ONLY |
238 | /* clear the fpu if we have one */ | 238 | /* clear the fpu if we have one */ |
239 | if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) { | 239 | if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) { |
240 | volatile int zero = 0; | 240 | volatile int zero = 0; |
241 | asm volatile ("frestore %0" : : "m" (zero)); | 241 | asm volatile ("frestore %0" : : "m" (zero)); |
242 | } | 242 | } |
diff --git a/arch/m68k/kernel/signal_mm.c b/arch/m68k/kernel/signal_mm.c index 5f6b3d0fcd16..cb856f9da655 100644 --- a/arch/m68k/kernel/signal_mm.c +++ b/arch/m68k/kernel/signal_mm.c | |||
@@ -203,7 +203,8 @@ static inline int restore_fpu_state(struct sigcontext *sc) | |||
203 | 203 | ||
204 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { | 204 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { |
205 | /* Verify the frame format. */ | 205 | /* Verify the frame format. */ |
206 | if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version)) | 206 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && |
207 | (sc->sc_fpstate[0] != fpu_version)) | ||
207 | goto out; | 208 | goto out; |
208 | if (CPU_IS_020_OR_030) { | 209 | if (CPU_IS_020_OR_030) { |
209 | if (m68k_fputype & FPU_68881 && | 210 | if (m68k_fputype & FPU_68881 && |
@@ -222,19 +223,43 @@ static inline int restore_fpu_state(struct sigcontext *sc) | |||
222 | sc->sc_fpstate[3] == 0x60 || | 223 | sc->sc_fpstate[3] == 0x60 || |
223 | sc->sc_fpstate[3] == 0xe0)) | 224 | sc->sc_fpstate[3] == 0xe0)) |
224 | goto out; | 225 | goto out; |
226 | } else if (CPU_IS_COLDFIRE) { | ||
227 | if (!(sc->sc_fpstate[0] == 0x00 || | ||
228 | sc->sc_fpstate[0] == 0x05 || | ||
229 | sc->sc_fpstate[0] == 0xe5)) | ||
230 | goto out; | ||
225 | } else | 231 | } else |
226 | goto out; | 232 | goto out; |
227 | 233 | ||
228 | __asm__ volatile (".chip 68k/68881\n\t" | 234 | if (CPU_IS_COLDFIRE) { |
229 | "fmovemx %0,%%fp0-%%fp1\n\t" | 235 | __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t" |
230 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | 236 | "fmovel %1,%%fpcr\n\t" |
231 | ".chip 68k" | 237 | "fmovel %2,%%fpsr\n\t" |
232 | : /* no outputs */ | 238 | "fmovel %3,%%fpiar" |
233 | : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl)); | 239 | : /* no outputs */ |
240 | : "m" (sc->sc_fpregs[0]), | ||
241 | "m" (sc->sc_fpcntl[0]), | ||
242 | "m" (sc->sc_fpcntl[1]), | ||
243 | "m" (sc->sc_fpcntl[2])); | ||
244 | } else { | ||
245 | __asm__ volatile (".chip 68k/68881\n\t" | ||
246 | "fmovemx %0,%%fp0-%%fp1\n\t" | ||
247 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
248 | ".chip 68k" | ||
249 | : /* no outputs */ | ||
250 | : "m" (*sc->sc_fpregs), | ||
251 | "m" (*sc->sc_fpcntl)); | ||
252 | } | ||
253 | } | ||
254 | |||
255 | if (CPU_IS_COLDFIRE) { | ||
256 | __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate)); | ||
257 | } else { | ||
258 | __asm__ volatile (".chip 68k/68881\n\t" | ||
259 | "frestore %0\n\t" | ||
260 | ".chip 68k" | ||
261 | : : "m" (*sc->sc_fpstate)); | ||
234 | } | 262 | } |
235 | __asm__ volatile (".chip 68k/68881\n\t" | ||
236 | "frestore %0\n\t" | ||
237 | ".chip 68k" : : "m" (*sc->sc_fpstate)); | ||
238 | err = 0; | 263 | err = 0; |
239 | 264 | ||
240 | out: | 265 | out: |
@@ -249,7 +274,7 @@ out: | |||
249 | static inline int rt_restore_fpu_state(struct ucontext __user *uc) | 274 | static inline int rt_restore_fpu_state(struct ucontext __user *uc) |
250 | { | 275 | { |
251 | unsigned char fpstate[FPCONTEXT_SIZE]; | 276 | unsigned char fpstate[FPCONTEXT_SIZE]; |
252 | int context_size = CPU_IS_060 ? 8 : 0; | 277 | int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); |
253 | fpregset_t fpregs; | 278 | fpregset_t fpregs; |
254 | int err = 1; | 279 | int err = 1; |
255 | 280 | ||
@@ -268,10 +293,11 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc) | |||
268 | if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) | 293 | if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) |
269 | goto out; | 294 | goto out; |
270 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { | 295 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { |
271 | if (!CPU_IS_060) | 296 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) |
272 | context_size = fpstate[1]; | 297 | context_size = fpstate[1]; |
273 | /* Verify the frame format. */ | 298 | /* Verify the frame format. */ |
274 | if (!CPU_IS_060 && (fpstate[0] != fpu_version)) | 299 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && |
300 | (fpstate[0] != fpu_version)) | ||
275 | goto out; | 301 | goto out; |
276 | if (CPU_IS_020_OR_030) { | 302 | if (CPU_IS_020_OR_030) { |
277 | if (m68k_fputype & FPU_68881 && | 303 | if (m68k_fputype & FPU_68881 && |
@@ -290,26 +316,50 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc) | |||
290 | fpstate[3] == 0x60 || | 316 | fpstate[3] == 0x60 || |
291 | fpstate[3] == 0xe0)) | 317 | fpstate[3] == 0xe0)) |
292 | goto out; | 318 | goto out; |
319 | } else if (CPU_IS_COLDFIRE) { | ||
320 | if (!(fpstate[3] == 0x00 || | ||
321 | fpstate[3] == 0x05 || | ||
322 | fpstate[3] == 0xe5)) | ||
323 | goto out; | ||
293 | } else | 324 | } else |
294 | goto out; | 325 | goto out; |
295 | if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, | 326 | if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, |
296 | sizeof(fpregs))) | 327 | sizeof(fpregs))) |
297 | goto out; | 328 | goto out; |
298 | __asm__ volatile (".chip 68k/68881\n\t" | 329 | |
299 | "fmovemx %0,%%fp0-%%fp7\n\t" | 330 | if (CPU_IS_COLDFIRE) { |
300 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | 331 | __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t" |
301 | ".chip 68k" | 332 | "fmovel %1,%%fpcr\n\t" |
302 | : /* no outputs */ | 333 | "fmovel %2,%%fpsr\n\t" |
303 | : "m" (*fpregs.f_fpregs), | 334 | "fmovel %3,%%fpiar" |
304 | "m" (*fpregs.f_fpcntl)); | 335 | : /* no outputs */ |
336 | : "m" (fpregs.f_fpregs[0]), | ||
337 | "m" (fpregs.f_fpcntl[0]), | ||
338 | "m" (fpregs.f_fpcntl[1]), | ||
339 | "m" (fpregs.f_fpcntl[2])); | ||
340 | } else { | ||
341 | __asm__ volatile (".chip 68k/68881\n\t" | ||
342 | "fmovemx %0,%%fp0-%%fp7\n\t" | ||
343 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
344 | ".chip 68k" | ||
345 | : /* no outputs */ | ||
346 | : "m" (*fpregs.f_fpregs), | ||
347 | "m" (*fpregs.f_fpcntl)); | ||
348 | } | ||
305 | } | 349 | } |
306 | if (context_size && | 350 | if (context_size && |
307 | __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, | 351 | __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, |
308 | context_size)) | 352 | context_size)) |
309 | goto out; | 353 | goto out; |
310 | __asm__ volatile (".chip 68k/68881\n\t" | 354 | |
311 | "frestore %0\n\t" | 355 | if (CPU_IS_COLDFIRE) { |
312 | ".chip 68k" : : "m" (*fpstate)); | 356 | __asm__ volatile ("frestore %0" : : "m" (*fpstate)); |
357 | } else { | ||
358 | __asm__ volatile (".chip 68k/68881\n\t" | ||
359 | "frestore %0\n\t" | ||
360 | ".chip 68k" | ||
361 | : : "m" (*fpstate)); | ||
362 | } | ||
313 | err = 0; | 363 | err = 0; |
314 | 364 | ||
315 | out: | 365 | out: |
@@ -529,10 +579,15 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | |||
529 | return; | 579 | return; |
530 | } | 580 | } |
531 | 581 | ||
532 | __asm__ volatile (".chip 68k/68881\n\t" | 582 | if (CPU_IS_COLDFIRE) { |
533 | "fsave %0\n\t" | 583 | __asm__ volatile ("fsave %0" |
534 | ".chip 68k" | 584 | : : "m" (*sc->sc_fpstate) : "memory"); |
535 | : : "m" (*sc->sc_fpstate) : "memory"); | 585 | } else { |
586 | __asm__ volatile (".chip 68k/68881\n\t" | ||
587 | "fsave %0\n\t" | ||
588 | ".chip 68k" | ||
589 | : : "m" (*sc->sc_fpstate) : "memory"); | ||
590 | } | ||
536 | 591 | ||
537 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { | 592 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { |
538 | fpu_version = sc->sc_fpstate[0]; | 593 | fpu_version = sc->sc_fpstate[0]; |
@@ -543,21 +598,35 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | |||
543 | if (*(unsigned short *) sc->sc_fpstate == 0x1f38) | 598 | if (*(unsigned short *) sc->sc_fpstate == 0x1f38) |
544 | sc->sc_fpstate[0x38] |= 1 << 3; | 599 | sc->sc_fpstate[0x38] |= 1 << 3; |
545 | } | 600 | } |
546 | __asm__ volatile (".chip 68k/68881\n\t" | 601 | |
547 | "fmovemx %%fp0-%%fp1,%0\n\t" | 602 | if (CPU_IS_COLDFIRE) { |
548 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | 603 | __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t" |
549 | ".chip 68k" | 604 | "fmovel %%fpcr,%1\n\t" |
550 | : "=m" (*sc->sc_fpregs), | 605 | "fmovel %%fpsr,%2\n\t" |
551 | "=m" (*sc->sc_fpcntl) | 606 | "fmovel %%fpiar,%3" |
552 | : /* no inputs */ | 607 | : "=m" (sc->sc_fpregs[0]), |
553 | : "memory"); | 608 | "=m" (sc->sc_fpcntl[0]), |
609 | "=m" (sc->sc_fpcntl[1]), | ||
610 | "=m" (sc->sc_fpcntl[2]) | ||
611 | : /* no inputs */ | ||
612 | : "memory"); | ||
613 | } else { | ||
614 | __asm__ volatile (".chip 68k/68881\n\t" | ||
615 | "fmovemx %%fp0-%%fp1,%0\n\t" | ||
616 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
617 | ".chip 68k" | ||
618 | : "=m" (*sc->sc_fpregs), | ||
619 | "=m" (*sc->sc_fpcntl) | ||
620 | : /* no inputs */ | ||
621 | : "memory"); | ||
622 | } | ||
554 | } | 623 | } |
555 | } | 624 | } |
556 | 625 | ||
557 | static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) | 626 | static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) |
558 | { | 627 | { |
559 | unsigned char fpstate[FPCONTEXT_SIZE]; | 628 | unsigned char fpstate[FPCONTEXT_SIZE]; |
560 | int context_size = CPU_IS_060 ? 8 : 0; | 629 | int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); |
561 | int err = 0; | 630 | int err = 0; |
562 | 631 | ||
563 | if (FPU_IS_EMU) { | 632 | if (FPU_IS_EMU) { |
@@ -570,15 +639,19 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs * | |||
570 | return err; | 639 | return err; |
571 | } | 640 | } |
572 | 641 | ||
573 | __asm__ volatile (".chip 68k/68881\n\t" | 642 | if (CPU_IS_COLDFIRE) { |
574 | "fsave %0\n\t" | 643 | __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory"); |
575 | ".chip 68k" | 644 | } else { |
576 | : : "m" (*fpstate) : "memory"); | 645 | __asm__ volatile (".chip 68k/68881\n\t" |
646 | "fsave %0\n\t" | ||
647 | ".chip 68k" | ||
648 | : : "m" (*fpstate) : "memory"); | ||
649 | } | ||
577 | 650 | ||
578 | err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); | 651 | err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); |
579 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { | 652 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { |
580 | fpregset_t fpregs; | 653 | fpregset_t fpregs; |
581 | if (!CPU_IS_060) | 654 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) |
582 | context_size = fpstate[1]; | 655 | context_size = fpstate[1]; |
583 | fpu_version = fpstate[0]; | 656 | fpu_version = fpstate[0]; |
584 | if (CPU_IS_020_OR_030 && | 657 | if (CPU_IS_020_OR_030 && |
@@ -588,14 +661,27 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs * | |||
588 | if (*(unsigned short *) fpstate == 0x1f38) | 661 | if (*(unsigned short *) fpstate == 0x1f38) |
589 | fpstate[0x38] |= 1 << 3; | 662 | fpstate[0x38] |= 1 << 3; |
590 | } | 663 | } |
591 | __asm__ volatile (".chip 68k/68881\n\t" | 664 | if (CPU_IS_COLDFIRE) { |
592 | "fmovemx %%fp0-%%fp7,%0\n\t" | 665 | __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t" |
593 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | 666 | "fmovel %%fpcr,%1\n\t" |
594 | ".chip 68k" | 667 | "fmovel %%fpsr,%2\n\t" |
595 | : "=m" (*fpregs.f_fpregs), | 668 | "fmovel %%fpiar,%3" |
596 | "=m" (*fpregs.f_fpcntl) | 669 | : "=m" (fpregs.f_fpregs[0]), |
597 | : /* no inputs */ | 670 | "=m" (fpregs.f_fpcntl[0]), |
598 | : "memory"); | 671 | "=m" (fpregs.f_fpcntl[1]), |
672 | "=m" (fpregs.f_fpcntl[2]) | ||
673 | : /* no inputs */ | ||
674 | : "memory"); | ||
675 | } else { | ||
676 | __asm__ volatile (".chip 68k/68881\n\t" | ||
677 | "fmovemx %%fp0-%%fp7,%0\n\t" | ||
678 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
679 | ".chip 68k" | ||
680 | : "=m" (*fpregs.f_fpregs), | ||
681 | "=m" (*fpregs.f_fpcntl) | ||
682 | : /* no inputs */ | ||
683 | : "memory"); | ||
684 | } | ||
599 | err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, | 685 | err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, |
600 | sizeof(fpregs)); | 686 | sizeof(fpregs)); |
601 | } | 687 | } |
@@ -692,8 +778,7 @@ static inline void push_cache (unsigned long vaddr) | |||
692 | "cpushl %%bc,(%0)\n\t" | 778 | "cpushl %%bc,(%0)\n\t" |
693 | ".chip 68k" | 779 | ".chip 68k" |
694 | : : "a" (temp)); | 780 | : : "a" (temp)); |
695 | } | 781 | } else if (!CPU_IS_COLDFIRE) { |
696 | else { | ||
697 | /* | 782 | /* |
698 | * 68030/68020 have no writeback cache; | 783 | * 68030/68020 have no writeback cache; |
699 | * still need to clear icache. | 784 | * still need to clear icache. |