diff options
Diffstat (limited to 'arch/sh/kernel/cpu')
| -rw-r--r-- | arch/sh/kernel/cpu/Makefile | 2 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/fpu.c | 82 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/init.c | 80 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh2a/fpu.c | 111 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/fpu.c | 159 |
5 files changed, 194 insertions, 240 deletions
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index d97c803719e..0e48bc61c27 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile | |||
| @@ -17,5 +17,7 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/ | |||
| 17 | 17 | ||
| 18 | obj-$(CONFIG_SH_ADC) += adc.o | 18 | obj-$(CONFIG_SH_ADC) += adc.o |
| 19 | obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o | 19 | obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o |
| 20 | obj-$(CONFIG_SH_FPU) += fpu.o | ||
| 21 | obj-$(CONFIG_SH_FPU_EMU) += fpu.o | ||
| 20 | 22 | ||
| 21 | obj-y += irq/ init.o clock.o hwblk.o | 23 | obj-y += irq/ init.o clock.o hwblk.o |
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c new file mode 100644 index 00000000000..c23e6727002 --- /dev/null +++ b/arch/sh/kernel/cpu/fpu.c | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | #include <linux/sched.h> | ||
| 2 | #include <asm/processor.h> | ||
| 3 | #include <asm/fpu.h> | ||
| 4 | |||
| 5 | int init_fpu(struct task_struct *tsk) | ||
| 6 | { | ||
| 7 | if (tsk_used_math(tsk)) { | ||
| 8 | if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) | ||
| 9 | unlazy_fpu(tsk, task_pt_regs(tsk)); | ||
| 10 | return 0; | ||
| 11 | } | ||
| 12 | |||
| 13 | /* | ||
| 14 | * Memory allocation at the first usage of the FPU and other state. | ||
| 15 | */ | ||
| 16 | if (!tsk->thread.xstate) { | ||
| 17 | tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | ||
| 18 | GFP_KERNEL); | ||
| 19 | if (!tsk->thread.xstate) | ||
| 20 | return -ENOMEM; | ||
| 21 | } | ||
| 22 | |||
| 23 | if (boot_cpu_data.flags & CPU_HAS_FPU) { | ||
| 24 | struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu; | ||
| 25 | memset(fp, 0, xstate_size); | ||
| 26 | fp->fpscr = FPSCR_INIT; | ||
| 27 | } else { | ||
| 28 | struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu; | ||
| 29 | memset(fp, 0, xstate_size); | ||
| 30 | fp->fpscr = FPSCR_INIT; | ||
| 31 | } | ||
| 32 | |||
| 33 | set_stopped_child_used_math(tsk); | ||
| 34 | return 0; | ||
| 35 | } | ||
| 36 | |||
| 37 | #ifdef CONFIG_SH_FPU | ||
| 38 | void __fpu_state_restore(void) | ||
| 39 | { | ||
| 40 | struct task_struct *tsk = current; | ||
| 41 | |||
| 42 | restore_fpu(tsk); | ||
| 43 | |||
| 44 | task_thread_info(tsk)->status |= TS_USEDFPU; | ||
| 45 | tsk->fpu_counter++; | ||
| 46 | } | ||
| 47 | |||
| 48 | void fpu_state_restore(struct pt_regs *regs) | ||
| 49 | { | ||
| 50 | struct task_struct *tsk = current; | ||
| 51 | |||
| 52 | if (unlikely(!user_mode(regs))) { | ||
| 53 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | ||
| 54 | BUG(); | ||
| 55 | return; | ||
| 56 | } | ||
| 57 | |||
| 58 | if (!tsk_used_math(tsk)) { | ||
| 59 | /* | ||
| 60 | * does a slab alloc which can sleep | ||
| 61 | */ | ||
| 62 | if (init_fpu(tsk)) { | ||
| 63 | /* | ||
| 64 | * ran out of memory! | ||
| 65 | */ | ||
| 66 | do_group_exit(SIGKILL); | ||
| 67 | return; | ||
| 68 | } | ||
| 69 | } | ||
| 70 | |||
| 71 | grab_fpu(regs); | ||
| 72 | |||
| 73 | __fpu_state_restore(); | ||
| 74 | } | ||
| 75 | |||
| 76 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
| 77 | { | ||
| 78 | TRAP_HANDLER_DECL; | ||
| 79 | |||
| 80 | fpu_state_restore(regs); | ||
| 81 | } | ||
| 82 | #endif /* CONFIG_SH_FPU */ | ||
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 89b4b76c0d7..2e23422280a 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
| @@ -28,18 +28,30 @@ | |||
| 28 | #include <asm/ubc.h> | 28 | #include <asm/ubc.h> |
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | #ifdef CONFIG_SH_FPU | ||
| 32 | #define cpu_has_fpu 1 | ||
| 33 | #else | ||
| 34 | #define cpu_has_fpu 0 | ||
| 35 | #endif | ||
| 36 | |||
| 37 | #ifdef CONFIG_SH_DSP | ||
| 38 | #define cpu_has_dsp 1 | ||
| 39 | #else | ||
| 40 | #define cpu_has_dsp 0 | ||
| 41 | #endif | ||
| 42 | |||
| 31 | /* | 43 | /* |
| 32 | * Generic wrapper for command line arguments to disable on-chip | 44 | * Generic wrapper for command line arguments to disable on-chip |
| 33 | * peripherals (nofpu, nodsp, and so forth). | 45 | * peripherals (nofpu, nodsp, and so forth). |
| 34 | */ | 46 | */ |
| 35 | #define onchip_setup(x) \ | 47 | #define onchip_setup(x) \ |
| 36 | static int x##_disabled __initdata = 0; \ | 48 | static int x##_disabled __initdata = !cpu_has_##x; \ |
| 37 | \ | 49 | \ |
| 38 | static int __init x##_setup(char *opts) \ | 50 | static int __init x##_setup(char *opts) \ |
| 39 | { \ | 51 | { \ |
| 40 | x##_disabled = 1; \ | 52 | x##_disabled = 1; \ |
| 41 | return 1; \ | 53 | return 1; \ |
| 42 | } \ | 54 | } \ |
| 43 | __setup("no" __stringify(x), x##_setup); | 55 | __setup("no" __stringify(x), x##_setup); |
| 44 | 56 | ||
| 45 | onchip_setup(fpu); | 57 | onchip_setup(fpu); |
| @@ -207,6 +219,18 @@ static void detect_cache_shape(void) | |||
| 207 | l2_cache_shape = -1; /* No S-cache */ | 219 | l2_cache_shape = -1; /* No S-cache */ |
| 208 | } | 220 | } |
| 209 | 221 | ||
| 222 | static void __init fpu_init(void) | ||
| 223 | { | ||
| 224 | /* Disable the FPU */ | ||
| 225 | if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { | ||
| 226 | printk("FPU Disabled\n"); | ||
| 227 | current_cpu_data.flags &= ~CPU_HAS_FPU; | ||
| 228 | } | ||
| 229 | |||
| 230 | disable_fpu(); | ||
| 231 | clear_used_math(); | ||
| 232 | } | ||
| 233 | |||
| 210 | #ifdef CONFIG_SH_DSP | 234 | #ifdef CONFIG_SH_DSP |
| 211 | static void __init release_dsp(void) | 235 | static void __init release_dsp(void) |
| 212 | { | 236 | { |
| @@ -244,9 +268,17 @@ static void __init dsp_init(void) | |||
| 244 | if (sr & SR_DSP) | 268 | if (sr & SR_DSP) |
| 245 | current_cpu_data.flags |= CPU_HAS_DSP; | 269 | current_cpu_data.flags |= CPU_HAS_DSP; |
| 246 | 270 | ||
| 271 | /* Disable the DSP */ | ||
| 272 | if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) { | ||
| 273 | printk("DSP Disabled\n"); | ||
| 274 | current_cpu_data.flags &= ~CPU_HAS_DSP; | ||
| 275 | } | ||
| 276 | |||
| 247 | /* Now that we've determined the DSP status, clear the DSP bit. */ | 277 | /* Now that we've determined the DSP status, clear the DSP bit. */ |
| 248 | release_dsp(); | 278 | release_dsp(); |
| 249 | } | 279 | } |
| 280 | #else | ||
| 281 | static inline void __init dsp_init(void) { } | ||
| 250 | #endif /* CONFIG_SH_DSP */ | 282 | #endif /* CONFIG_SH_DSP */ |
| 251 | 283 | ||
| 252 | /** | 284 | /** |
| @@ -302,18 +334,8 @@ asmlinkage void __init sh_cpu_init(void) | |||
| 302 | detect_cache_shape(); | 334 | detect_cache_shape(); |
| 303 | } | 335 | } |
| 304 | 336 | ||
| 305 | /* Disable the FPU */ | 337 | fpu_init(); |
| 306 | if (fpu_disabled) { | 338 | dsp_init(); |
| 307 | printk("FPU Disabled\n"); | ||
| 308 | current_cpu_data.flags &= ~CPU_HAS_FPU; | ||
| 309 | } | ||
| 310 | |||
| 311 | /* FPU initialization */ | ||
| 312 | disable_fpu(); | ||
| 313 | if ((current_cpu_data.flags & CPU_HAS_FPU)) { | ||
| 314 | current_thread_info()->status &= ~TS_USEDFPU; | ||
| 315 | clear_used_math(); | ||
| 316 | } | ||
| 317 | 339 | ||
| 318 | /* | 340 | /* |
| 319 | * Initialize the per-CPU ASID cache very early, since the | 341 | * Initialize the per-CPU ASID cache very early, since the |
| @@ -321,18 +343,12 @@ asmlinkage void __init sh_cpu_init(void) | |||
| 321 | */ | 343 | */ |
| 322 | current_cpu_data.asid_cache = NO_CONTEXT; | 344 | current_cpu_data.asid_cache = NO_CONTEXT; |
| 323 | 345 | ||
| 324 | #ifdef CONFIG_SH_DSP | ||
| 325 | /* Probe for DSP */ | ||
| 326 | dsp_init(); | ||
| 327 | |||
| 328 | /* Disable the DSP */ | ||
| 329 | if (dsp_disabled) { | ||
| 330 | printk("DSP Disabled\n"); | ||
| 331 | current_cpu_data.flags &= ~CPU_HAS_DSP; | ||
| 332 | release_dsp(); | ||
| 333 | } | ||
| 334 | #endif | ||
| 335 | |||
| 336 | speculative_execution_init(); | 346 | speculative_execution_init(); |
| 337 | expmask_init(); | 347 | expmask_init(); |
| 348 | |||
| 349 | /* | ||
| 350 | * Boot processor to setup the FP and extended state context info. | ||
| 351 | */ | ||
| 352 | if (raw_smp_processor_id() == 0) | ||
| 353 | init_thread_xstate(); | ||
| 338 | } | 354 | } |
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index d395ce5740e..488d24e0cdf 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c | |||
| @@ -26,8 +26,7 @@ | |||
| 26 | /* | 26 | /* |
| 27 | * Save FPU registers onto task structure. | 27 | * Save FPU registers onto task structure. |
| 28 | */ | 28 | */ |
| 29 | void | 29 | void save_fpu(struct task_struct *tsk) |
| 30 | save_fpu(struct task_struct *tsk) | ||
| 31 | { | 30 | { |
| 32 | unsigned long dummy; | 31 | unsigned long dummy; |
| 33 | 32 | ||
| @@ -52,7 +51,7 @@ save_fpu(struct task_struct *tsk) | |||
| 52 | "fmov.s fr0, @-%0\n\t" | 51 | "fmov.s fr0, @-%0\n\t" |
| 53 | "lds %3, fpscr\n\t" | 52 | "lds %3, fpscr\n\t" |
| 54 | : "=r" (dummy) | 53 | : "=r" (dummy) |
| 55 | : "0" ((char *)(&tsk->thread.fpu.hard.status)), | 54 | : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)), |
| 56 | "r" (FPSCR_RCHG), | 55 | "r" (FPSCR_RCHG), |
| 57 | "r" (FPSCR_INIT) | 56 | "r" (FPSCR_INIT) |
| 58 | : "memory"); | 57 | : "memory"); |
| @@ -60,8 +59,7 @@ save_fpu(struct task_struct *tsk) | |||
| 60 | disable_fpu(); | 59 | disable_fpu(); |
| 61 | } | 60 | } |
| 62 | 61 | ||
| 63 | static void | 62 | void restore_fpu(struct task_struct *tsk) |
| 64 | restore_fpu(struct task_struct *tsk) | ||
| 65 | { | 63 | { |
| 66 | unsigned long dummy; | 64 | unsigned long dummy; |
| 67 | 65 | ||
| @@ -85,45 +83,12 @@ restore_fpu(struct task_struct *tsk) | |||
| 85 | "lds.l @%0+, fpscr\n\t" | 83 | "lds.l @%0+, fpscr\n\t" |
| 86 | "lds.l @%0+, fpul\n\t" | 84 | "lds.l @%0+, fpul\n\t" |
| 87 | : "=r" (dummy) | 85 | : "=r" (dummy) |
| 88 | : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG) | 86 | : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG) |
| 89 | : "memory"); | 87 | : "memory"); |
| 90 | disable_fpu(); | 88 | disable_fpu(); |
| 91 | } | 89 | } |
| 92 | 90 | ||
| 93 | /* | 91 | /* |
| 94 | * Load the FPU with signalling NANS. This bit pattern we're using | ||
| 95 | * has the property that no matter wether considered as single or as | ||
| 96 | * double precission represents signaling NANS. | ||
| 97 | */ | ||
| 98 | |||
| 99 | static void | ||
| 100 | fpu_init(void) | ||
| 101 | { | ||
| 102 | enable_fpu(); | ||
| 103 | asm volatile("lds %0, fpul\n\t" | ||
| 104 | "fsts fpul, fr0\n\t" | ||
| 105 | "fsts fpul, fr1\n\t" | ||
| 106 | "fsts fpul, fr2\n\t" | ||
| 107 | "fsts fpul, fr3\n\t" | ||
| 108 | "fsts fpul, fr4\n\t" | ||
| 109 | "fsts fpul, fr5\n\t" | ||
| 110 | "fsts fpul, fr6\n\t" | ||
| 111 | "fsts fpul, fr7\n\t" | ||
| 112 | "fsts fpul, fr8\n\t" | ||
| 113 | "fsts fpul, fr9\n\t" | ||
| 114 | "fsts fpul, fr10\n\t" | ||
| 115 | "fsts fpul, fr11\n\t" | ||
| 116 | "fsts fpul, fr12\n\t" | ||
| 117 | "fsts fpul, fr13\n\t" | ||
| 118 | "fsts fpul, fr14\n\t" | ||
| 119 | "fsts fpul, fr15\n\t" | ||
| 120 | "lds %2, fpscr\n\t" | ||
| 121 | : /* no output */ | ||
| 122 | : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT)); | ||
| 123 | disable_fpu(); | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Emulate arithmetic ops on denormalized number for some FPU insns. | 92 | * Emulate arithmetic ops on denormalized number for some FPU insns. |
| 128 | */ | 93 | */ |
| 129 | 94 | ||
| @@ -490,9 +455,9 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
| 490 | if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ | 455 | if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ |
| 491 | struct task_struct *tsk = current; | 456 | struct task_struct *tsk = current; |
| 492 | 457 | ||
| 493 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) { | 458 | if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) { |
| 494 | /* FPU error */ | 459 | /* FPU error */ |
| 495 | denormal_to_double (&tsk->thread.fpu.hard, | 460 | denormal_to_double (&tsk->thread.xstate->hardfpu, |
| 496 | (finsn >> 8) & 0xf); | 461 | (finsn >> 8) & 0xf); |
| 497 | } else | 462 | } else |
| 498 | return 0; | 463 | return 0; |
| @@ -507,9 +472,9 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
| 507 | 472 | ||
| 508 | n = (finsn >> 8) & 0xf; | 473 | n = (finsn >> 8) & 0xf; |
| 509 | m = (finsn >> 4) & 0xf; | 474 | m = (finsn >> 4) & 0xf; |
| 510 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 475 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
| 511 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 476 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
| 512 | fpscr = tsk->thread.fpu.hard.fpscr; | 477 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
| 513 | prec = fpscr & (1 << 19); | 478 | prec = fpscr & (1 << 19); |
| 514 | 479 | ||
| 515 | if ((fpscr & FPSCR_FPU_ERROR) | 480 | if ((fpscr & FPSCR_FPU_ERROR) |
| @@ -519,15 +484,15 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
| 519 | 484 | ||
| 520 | /* FPU error because of denormal */ | 485 | /* FPU error because of denormal */ |
| 521 | llx = ((long long) hx << 32) | 486 | llx = ((long long) hx << 32) |
| 522 | | tsk->thread.fpu.hard.fp_regs[n+1]; | 487 | | tsk->thread.xstate->hardfpu.fp_regs[n+1]; |
| 523 | lly = ((long long) hy << 32) | 488 | lly = ((long long) hy << 32) |
| 524 | | tsk->thread.fpu.hard.fp_regs[m+1]; | 489 | | tsk->thread.xstate->hardfpu.fp_regs[m+1]; |
| 525 | if ((hx & 0x7fffffff) >= 0x00100000) | 490 | if ((hx & 0x7fffffff) >= 0x00100000) |
| 526 | llx = denormal_muld(lly, llx); | 491 | llx = denormal_muld(lly, llx); |
| 527 | else | 492 | else |
| 528 | llx = denormal_muld(llx, lly); | 493 | llx = denormal_muld(llx, lly); |
| 529 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 494 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
| 530 | tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; | 495 | tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; |
| 531 | } else if ((fpscr & FPSCR_FPU_ERROR) | 496 | } else if ((fpscr & FPSCR_FPU_ERROR) |
| 532 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 497 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
| 533 | || (hy & 0x7fffffff) < 0x00800000))) { | 498 | || (hy & 0x7fffffff) < 0x00800000))) { |
| @@ -536,7 +501,7 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
| 536 | hx = denormal_mulf(hy, hx); | 501 | hx = denormal_mulf(hy, hx); |
| 537 | else | 502 | else |
| 538 | hx = denormal_mulf(hx, hy); | 503 | hx = denormal_mulf(hx, hy); |
| 539 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 504 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
| 540 | } else | 505 | } else |
| 541 | return 0; | 506 | return 0; |
| 542 | 507 | ||
| @@ -550,9 +515,9 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
| 550 | 515 | ||
| 551 | n = (finsn >> 8) & 0xf; | 516 | n = (finsn >> 8) & 0xf; |
| 552 | m = (finsn >> 4) & 0xf; | 517 | m = (finsn >> 4) & 0xf; |
| 553 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 518 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
| 554 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 519 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
| 555 | fpscr = tsk->thread.fpu.hard.fpscr; | 520 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
| 556 | prec = fpscr & (1 << 19); | 521 | prec = fpscr & (1 << 19); |
| 557 | 522 | ||
| 558 | if ((fpscr & FPSCR_FPU_ERROR) | 523 | if ((fpscr & FPSCR_FPU_ERROR) |
| @@ -562,15 +527,15 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
| 562 | 527 | ||
| 563 | /* FPU error because of denormal */ | 528 | /* FPU error because of denormal */ |
| 564 | llx = ((long long) hx << 32) | 529 | llx = ((long long) hx << 32) |
| 565 | | tsk->thread.fpu.hard.fp_regs[n+1]; | 530 | | tsk->thread.xstate->hardfpu.fp_regs[n+1]; |
| 566 | lly = ((long long) hy << 32) | 531 | lly = ((long long) hy << 32) |
| 567 | | tsk->thread.fpu.hard.fp_regs[m+1]; | 532 | | tsk->thread.xstate->hardfpu.fp_regs[m+1]; |
| 568 | if ((finsn & 0xf00f) == 0xf000) | 533 | if ((finsn & 0xf00f) == 0xf000) |
| 569 | llx = denormal_addd(llx, lly); | 534 | llx = denormal_addd(llx, lly); |
| 570 | else | 535 | else |
| 571 | llx = denormal_addd(llx, lly ^ (1LL << 63)); | 536 | llx = denormal_addd(llx, lly ^ (1LL << 63)); |
| 572 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 537 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
| 573 | tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; | 538 | tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; |
| 574 | } else if ((fpscr & FPSCR_FPU_ERROR) | 539 | } else if ((fpscr & FPSCR_FPU_ERROR) |
| 575 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 540 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
| 576 | || (hy & 0x7fffffff) < 0x00800000))) { | 541 | || (hy & 0x7fffffff) < 0x00800000))) { |
| @@ -579,7 +544,7 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
| 579 | hx = denormal_addf(hx, hy); | 544 | hx = denormal_addf(hx, hy); |
| 580 | else | 545 | else |
| 581 | hx = denormal_addf(hx, hy ^ 0x80000000); | 546 | hx = denormal_addf(hx, hy ^ 0x80000000); |
| 582 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 547 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
| 583 | } else | 548 | } else |
| 584 | return 0; | 549 | return 0; |
| 585 | 550 | ||
| @@ -597,7 +562,7 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
| 597 | 562 | ||
| 598 | __unlazy_fpu(tsk, regs); | 563 | __unlazy_fpu(tsk, regs); |
| 599 | if (ieee_fpe_handler(regs)) { | 564 | if (ieee_fpe_handler(regs)) { |
| 600 | tsk->thread.fpu.hard.fpscr &= | 565 | tsk->thread.xstate->hardfpu.fpscr &= |
| 601 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); | 566 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); |
| 602 | grab_fpu(regs); | 567 | grab_fpu(regs); |
| 603 | restore_fpu(tsk); | 568 | restore_fpu(tsk); |
| @@ -607,33 +572,3 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
| 607 | 572 | ||
| 608 | force_sig(SIGFPE, tsk); | 573 | force_sig(SIGFPE, tsk); |
| 609 | } | 574 | } |
| 610 | |||
| 611 | void fpu_state_restore(struct pt_regs *regs) | ||
| 612 | { | ||
| 613 | struct task_struct *tsk = current; | ||
| 614 | |||
| 615 | grab_fpu(regs); | ||
| 616 | if (unlikely(!user_mode(regs))) { | ||
| 617 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | ||
| 618 | BUG(); | ||
| 619 | return; | ||
| 620 | } | ||
| 621 | |||
| 622 | if (likely(used_math())) { | ||
| 623 | /* Using the FPU again. */ | ||
| 624 | restore_fpu(tsk); | ||
| 625 | } else { | ||
| 626 | /* First time FPU user. */ | ||
| 627 | fpu_init(); | ||
| 628 | set_used_math(); | ||
| 629 | } | ||
| 630 | task_thread_info(tsk)->status |= TS_USEDFPU; | ||
| 631 | tsk->fpu_counter++; | ||
| 632 | } | ||
| 633 | |||
| 634 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
| 635 | { | ||
| 636 | TRAP_HANDLER_DECL; | ||
| 637 | |||
| 638 | fpu_state_restore(regs); | ||
| 639 | } | ||
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index e97857aec8a..447482d7f65 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c | |||
| @@ -85,14 +85,14 @@ void save_fpu(struct task_struct *tsk) | |||
| 85 | "fmov.s fr1, @-%0\n\t" | 85 | "fmov.s fr1, @-%0\n\t" |
| 86 | "fmov.s fr0, @-%0\n\t" | 86 | "fmov.s fr0, @-%0\n\t" |
| 87 | "lds %3, fpscr\n\t":"=r" (dummy) | 87 | "lds %3, fpscr\n\t":"=r" (dummy) |
| 88 | :"0"((char *)(&tsk->thread.fpu.hard.status)), | 88 | :"0"((char *)(&tsk->thread.xstate->hardfpu.status)), |
| 89 | "r"(FPSCR_RCHG), "r"(FPSCR_INIT) | 89 | "r"(FPSCR_RCHG), "r"(FPSCR_INIT) |
| 90 | :"memory"); | 90 | :"memory"); |
| 91 | 91 | ||
| 92 | disable_fpu(); | 92 | disable_fpu(); |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | static void restore_fpu(struct task_struct *tsk) | 95 | void restore_fpu(struct task_struct *tsk) |
| 96 | { | 96 | { |
| 97 | unsigned long dummy; | 97 | unsigned long dummy; |
| 98 | 98 | ||
| @@ -135,62 +135,11 @@ static void restore_fpu(struct task_struct *tsk) | |||
| 135 | "lds.l @%0+, fpscr\n\t" | 135 | "lds.l @%0+, fpscr\n\t" |
| 136 | "lds.l @%0+, fpul\n\t" | 136 | "lds.l @%0+, fpul\n\t" |
| 137 | :"=r" (dummy) | 137 | :"=r" (dummy) |
| 138 | :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG) | 138 | :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG) |
| 139 | :"memory"); | 139 | :"memory"); |
| 140 | disable_fpu(); | 140 | disable_fpu(); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | /* | ||
| 144 | * Load the FPU with signalling NANS. This bit pattern we're using | ||
| 145 | * has the property that no matter wether considered as single or as | ||
| 146 | * double precision represents signaling NANS. | ||
| 147 | */ | ||
| 148 | |||
| 149 | static void fpu_init(void) | ||
| 150 | { | ||
| 151 | enable_fpu(); | ||
| 152 | asm volatile ( "lds %0, fpul\n\t" | ||
| 153 | "lds %1, fpscr\n\t" | ||
| 154 | "fsts fpul, fr0\n\t" | ||
| 155 | "fsts fpul, fr1\n\t" | ||
| 156 | "fsts fpul, fr2\n\t" | ||
| 157 | "fsts fpul, fr3\n\t" | ||
| 158 | "fsts fpul, fr4\n\t" | ||
| 159 | "fsts fpul, fr5\n\t" | ||
| 160 | "fsts fpul, fr6\n\t" | ||
| 161 | "fsts fpul, fr7\n\t" | ||
| 162 | "fsts fpul, fr8\n\t" | ||
| 163 | "fsts fpul, fr9\n\t" | ||
| 164 | "fsts fpul, fr10\n\t" | ||
| 165 | "fsts fpul, fr11\n\t" | ||
| 166 | "fsts fpul, fr12\n\t" | ||
| 167 | "fsts fpul, fr13\n\t" | ||
| 168 | "fsts fpul, fr14\n\t" | ||
| 169 | "fsts fpul, fr15\n\t" | ||
| 170 | "frchg\n\t" | ||
| 171 | "fsts fpul, fr0\n\t" | ||
| 172 | "fsts fpul, fr1\n\t" | ||
| 173 | "fsts fpul, fr2\n\t" | ||
| 174 | "fsts fpul, fr3\n\t" | ||
| 175 | "fsts fpul, fr4\n\t" | ||
| 176 | "fsts fpul, fr5\n\t" | ||
| 177 | "fsts fpul, fr6\n\t" | ||
| 178 | "fsts fpul, fr7\n\t" | ||
| 179 | "fsts fpul, fr8\n\t" | ||
| 180 | "fsts fpul, fr9\n\t" | ||
| 181 | "fsts fpul, fr10\n\t" | ||
| 182 | "fsts fpul, fr11\n\t" | ||
| 183 | "fsts fpul, fr12\n\t" | ||
| 184 | "fsts fpul, fr13\n\t" | ||
| 185 | "fsts fpul, fr14\n\t" | ||
| 186 | "fsts fpul, fr15\n\t" | ||
| 187 | "frchg\n\t" | ||
| 188 | "lds %2, fpscr\n\t" | ||
| 189 | : /* no output */ | ||
| 190 | :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT)); | ||
| 191 | disable_fpu(); | ||
| 192 | } | ||
| 193 | |||
| 194 | /** | 143 | /** |
| 195 | * denormal_to_double - Given denormalized float number, | 144 | * denormal_to_double - Given denormalized float number, |
| 196 | * store double float | 145 | * store double float |
| @@ -282,9 +231,9 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 282 | /* fcnvsd */ | 231 | /* fcnvsd */ |
| 283 | struct task_struct *tsk = current; | 232 | struct task_struct *tsk = current; |
| 284 | 233 | ||
| 285 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) | 234 | if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)) |
| 286 | /* FPU error */ | 235 | /* FPU error */ |
| 287 | denormal_to_double(&tsk->thread.fpu.hard, | 236 | denormal_to_double(&tsk->thread.xstate->hardfpu, |
| 288 | (finsn >> 8) & 0xf); | 237 | (finsn >> 8) & 0xf); |
| 289 | else | 238 | else |
| 290 | return 0; | 239 | return 0; |
| @@ -300,9 +249,9 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 300 | 249 | ||
| 301 | n = (finsn >> 8) & 0xf; | 250 | n = (finsn >> 8) & 0xf; |
| 302 | m = (finsn >> 4) & 0xf; | 251 | m = (finsn >> 4) & 0xf; |
| 303 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 252 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
| 304 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 253 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
| 305 | fpscr = tsk->thread.fpu.hard.fpscr; | 254 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
| 306 | prec = fpscr & FPSCR_DBL_PRECISION; | 255 | prec = fpscr & FPSCR_DBL_PRECISION; |
| 307 | 256 | ||
| 308 | if ((fpscr & FPSCR_CAUSE_ERROR) | 257 | if ((fpscr & FPSCR_CAUSE_ERROR) |
| @@ -312,18 +261,18 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 312 | 261 | ||
| 313 | /* FPU error because of denormal (doubles) */ | 262 | /* FPU error because of denormal (doubles) */ |
| 314 | llx = ((long long)hx << 32) | 263 | llx = ((long long)hx << 32) |
| 315 | | tsk->thread.fpu.hard.fp_regs[n + 1]; | 264 | | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; |
| 316 | lly = ((long long)hy << 32) | 265 | lly = ((long long)hy << 32) |
| 317 | | tsk->thread.fpu.hard.fp_regs[m + 1]; | 266 | | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; |
| 318 | llx = float64_mul(llx, lly); | 267 | llx = float64_mul(llx, lly); |
| 319 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 268 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
| 320 | tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; | 269 | tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; |
| 321 | } else if ((fpscr & FPSCR_CAUSE_ERROR) | 270 | } else if ((fpscr & FPSCR_CAUSE_ERROR) |
| 322 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 271 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
| 323 | || (hy & 0x7fffffff) < 0x00800000))) { | 272 | || (hy & 0x7fffffff) < 0x00800000))) { |
| 324 | /* FPU error because of denormal (floats) */ | 273 | /* FPU error because of denormal (floats) */ |
| 325 | hx = float32_mul(hx, hy); | 274 | hx = float32_mul(hx, hy); |
| 326 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 275 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
| 327 | } else | 276 | } else |
| 328 | return 0; | 277 | return 0; |
| 329 | 278 | ||
| @@ -338,9 +287,9 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 338 | 287 | ||
| 339 | n = (finsn >> 8) & 0xf; | 288 | n = (finsn >> 8) & 0xf; |
| 340 | m = (finsn >> 4) & 0xf; | 289 | m = (finsn >> 4) & 0xf; |
| 341 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 290 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
| 342 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 291 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
| 343 | fpscr = tsk->thread.fpu.hard.fpscr; | 292 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
| 344 | prec = fpscr & FPSCR_DBL_PRECISION; | 293 | prec = fpscr & FPSCR_DBL_PRECISION; |
| 345 | 294 | ||
| 346 | if ((fpscr & FPSCR_CAUSE_ERROR) | 295 | if ((fpscr & FPSCR_CAUSE_ERROR) |
| @@ -350,15 +299,15 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 350 | 299 | ||
| 351 | /* FPU error because of denormal (doubles) */ | 300 | /* FPU error because of denormal (doubles) */ |
| 352 | llx = ((long long)hx << 32) | 301 | llx = ((long long)hx << 32) |
| 353 | | tsk->thread.fpu.hard.fp_regs[n + 1]; | 302 | | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; |
| 354 | lly = ((long long)hy << 32) | 303 | lly = ((long long)hy << 32) |
| 355 | | tsk->thread.fpu.hard.fp_regs[m + 1]; | 304 | | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; |
| 356 | if ((finsn & 0xf00f) == 0xf000) | 305 | if ((finsn & 0xf00f) == 0xf000) |
| 357 | llx = float64_add(llx, lly); | 306 | llx = float64_add(llx, lly); |
| 358 | else | 307 | else |
| 359 | llx = float64_sub(llx, lly); | 308 | llx = float64_sub(llx, lly); |
| 360 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 309 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
| 361 | tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; | 310 | tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; |
| 362 | } else if ((fpscr & FPSCR_CAUSE_ERROR) | 311 | } else if ((fpscr & FPSCR_CAUSE_ERROR) |
| 363 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 312 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
| 364 | || (hy & 0x7fffffff) < 0x00800000))) { | 313 | || (hy & 0x7fffffff) < 0x00800000))) { |
| @@ -367,7 +316,7 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 367 | hx = float32_add(hx, hy); | 316 | hx = float32_add(hx, hy); |
| 368 | else | 317 | else |
| 369 | hx = float32_sub(hx, hy); | 318 | hx = float32_sub(hx, hy); |
| 370 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 319 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
| 371 | } else | 320 | } else |
| 372 | return 0; | 321 | return 0; |
| 373 | 322 | ||
| @@ -382,9 +331,9 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 382 | 331 | ||
| 383 | n = (finsn >> 8) & 0xf; | 332 | n = (finsn >> 8) & 0xf; |
| 384 | m = (finsn >> 4) & 0xf; | 333 | m = (finsn >> 4) & 0xf; |
| 385 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 334 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
| 386 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 335 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
| 387 | fpscr = tsk->thread.fpu.hard.fpscr; | 336 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
| 388 | prec = fpscr & FPSCR_DBL_PRECISION; | 337 | prec = fpscr & FPSCR_DBL_PRECISION; |
| 389 | 338 | ||
| 390 | if ((fpscr & FPSCR_CAUSE_ERROR) | 339 | if ((fpscr & FPSCR_CAUSE_ERROR) |
| @@ -394,20 +343,20 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 394 | 343 | ||
| 395 | /* FPU error because of denormal (doubles) */ | 344 | /* FPU error because of denormal (doubles) */ |
| 396 | llx = ((long long)hx << 32) | 345 | llx = ((long long)hx << 32) |
| 397 | | tsk->thread.fpu.hard.fp_regs[n + 1]; | 346 | | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; |
| 398 | lly = ((long long)hy << 32) | 347 | lly = ((long long)hy << 32) |
| 399 | | tsk->thread.fpu.hard.fp_regs[m + 1]; | 348 | | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; |
| 400 | 349 | ||
| 401 | llx = float64_div(llx, lly); | 350 | llx = float64_div(llx, lly); |
| 402 | 351 | ||
| 403 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 352 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
| 404 | tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; | 353 | tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; |
| 405 | } else if ((fpscr & FPSCR_CAUSE_ERROR) | 354 | } else if ((fpscr & FPSCR_CAUSE_ERROR) |
| 406 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 355 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
| 407 | || (hy & 0x7fffffff) < 0x00800000))) { | 356 | || (hy & 0x7fffffff) < 0x00800000))) { |
| 408 | /* FPU error because of denormal (floats) */ | 357 | /* FPU error because of denormal (floats) */ |
| 409 | hx = float32_div(hx, hy); | 358 | hx = float32_div(hx, hy); |
| 410 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 359 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
| 411 | } else | 360 | } else |
| 412 | return 0; | 361 | return 0; |
| 413 | 362 | ||
| @@ -420,17 +369,17 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 420 | unsigned int hx; | 369 | unsigned int hx; |
| 421 | 370 | ||
| 422 | m = (finsn >> 8) & 0x7; | 371 | m = (finsn >> 8) & 0x7; |
| 423 | hx = tsk->thread.fpu.hard.fp_regs[m]; | 372 | hx = tsk->thread.xstate->hardfpu.fp_regs[m]; |
| 424 | 373 | ||
| 425 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR) | 374 | if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR) |
| 426 | && ((hx & 0x7fffffff) < 0x00100000)) { | 375 | && ((hx & 0x7fffffff) < 0x00100000)) { |
| 427 | /* subnormal double to float conversion */ | 376 | /* subnormal double to float conversion */ |
| 428 | long long llx; | 377 | long long llx; |
| 429 | 378 | ||
| 430 | llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32) | 379 | llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32) |
| 431 | | tsk->thread.fpu.hard.fp_regs[m + 1]; | 380 | | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; |
| 432 | 381 | ||
| 433 | tsk->thread.fpu.hard.fpul = float64_to_float32(llx); | 382 | tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx); |
| 434 | } else | 383 | } else |
| 435 | return 0; | 384 | return 0; |
| 436 | 385 | ||
| @@ -449,7 +398,7 @@ void float_raise(unsigned int flags) | |||
| 449 | int float_rounding_mode(void) | 398 | int float_rounding_mode(void) |
| 450 | { | 399 | { |
| 451 | struct task_struct *tsk = current; | 400 | struct task_struct *tsk = current; |
| 452 | int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr); | 401 | int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr); |
| 453 | return roundingMode; | 402 | return roundingMode; |
| 454 | } | 403 | } |
| 455 | 404 | ||
| @@ -461,16 +410,16 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
| 461 | __unlazy_fpu(tsk, regs); | 410 | __unlazy_fpu(tsk, regs); |
| 462 | fpu_exception_flags = 0; | 411 | fpu_exception_flags = 0; |
| 463 | if (ieee_fpe_handler(regs)) { | 412 | if (ieee_fpe_handler(regs)) { |
| 464 | tsk->thread.fpu.hard.fpscr &= | 413 | tsk->thread.xstate->hardfpu.fpscr &= |
| 465 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); | 414 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); |
| 466 | tsk->thread.fpu.hard.fpscr |= fpu_exception_flags; | 415 | tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags; |
| 467 | /* Set the FPSCR flag as well as cause bits - simply | 416 | /* Set the FPSCR flag as well as cause bits - simply |
| 468 | * replicate the cause */ | 417 | * replicate the cause */ |
| 469 | tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); | 418 | tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10); |
| 470 | grab_fpu(regs); | 419 | grab_fpu(regs); |
| 471 | restore_fpu(tsk); | 420 | restore_fpu(tsk); |
| 472 | task_thread_info(tsk)->status |= TS_USEDFPU; | 421 | task_thread_info(tsk)->status |= TS_USEDFPU; |
| 473 | if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & | 422 | if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) & |
| 474 | (fpu_exception_flags >> 2)) == 0) { | 423 | (fpu_exception_flags >> 2)) == 0) { |
| 475 | return; | 424 | return; |
| 476 | } | 425 | } |
| @@ -478,33 +427,3 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
| 478 | 427 | ||
| 479 | force_sig(SIGFPE, tsk); | 428 | force_sig(SIGFPE, tsk); |
| 480 | } | 429 | } |
| 481 | |||
| 482 | void fpu_state_restore(struct pt_regs *regs) | ||
| 483 | { | ||
| 484 | struct task_struct *tsk = current; | ||
| 485 | |||
| 486 | grab_fpu(regs); | ||
| 487 | if (unlikely(!user_mode(regs))) { | ||
| 488 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | ||
| 489 | BUG(); | ||
| 490 | return; | ||
| 491 | } | ||
| 492 | |||
| 493 | if (likely(used_math())) { | ||
| 494 | /* Using the FPU again. */ | ||
| 495 | restore_fpu(tsk); | ||
| 496 | } else { | ||
| 497 | /* First time FPU user. */ | ||
| 498 | fpu_init(); | ||
| 499 | set_used_math(); | ||
| 500 | } | ||
| 501 | task_thread_info(tsk)->status |= TS_USEDFPU; | ||
| 502 | tsk->fpu_counter++; | ||
| 503 | } | ||
| 504 | |||
| 505 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
| 506 | { | ||
| 507 | TRAP_HANDLER_DECL; | ||
| 508 | |||
| 509 | fpu_state_restore(regs); | ||
| 510 | } | ||
