aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-12 22:51:40 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-12 22:51:40 -0500
commit0ea820cf9bf58f735ed40ec67947159c4f170012 (patch)
tree77320006b4dded5804c678c1a869571be5c0b95f /arch
parenta3705799e2cc5fb69d88ad6a7f317a8f5597f18d (diff)
sh: Move over to dynamically allocated FPU context.
This follows the x86 xstate changes and implements a task_xstate slab cache that is dynamically sized to match one of hard FP/soft FP/FPU-less. This also tidies up and consolidates some of the SH-2A/SH-4 FPU fragmentation. Now fpu state restorers are commonly defined, with the init_fpu()/fpu_init() mess reworked to follow the x86 convention. The fpu_init() register initialization has been replaced by xstate setup followed by writing out to hardware via the standard restore path. As init_fpu() now performs a slab allocation a secondary lighterweight restorer is also introduced for the context switch. In the future the DSP state will be rolled in here, too. More work remains for math emulation and the SH-5 FPU, which presently uses its own special (UP-only) interfaces. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/fpu.h35
-rw-r--r--arch/sh/include/asm/processor_32.h16
-rw-r--r--arch/sh/include/asm/thread_info.h4
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/fpu.c82
-rw-r--r--arch/sh/kernel/cpu/init.c80
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c111
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c159
-rw-r--r--arch/sh/kernel/process.c54
-rw-r--r--arch/sh/kernel/process_32.c6
-rw-r--r--arch/sh/kernel/ptrace_32.c12
-rw-r--r--arch/sh/kernel/signal_32.c4
-rw-r--r--arch/sh/math-emu/math.c12
13 files changed, 292 insertions, 285 deletions
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index fb6bbb9b1cc8..06c4281aab65 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -2,8 +2,8 @@
2#define __ASM_SH_FPU_H 2#define __ASM_SH_FPU_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/preempt.h> 5
6#include <asm/ptrace.h> 6struct task_struct;
7 7
8#ifdef CONFIG_SH_FPU 8#ifdef CONFIG_SH_FPU
9static inline void release_fpu(struct pt_regs *regs) 9static inline void release_fpu(struct pt_regs *regs)
@@ -16,22 +16,23 @@ static inline void grab_fpu(struct pt_regs *regs)
16 regs->sr &= ~SR_FD; 16 regs->sr &= ~SR_FD;
17} 17}
18 18
19struct task_struct;
20
21extern void save_fpu(struct task_struct *__tsk); 19extern void save_fpu(struct task_struct *__tsk);
22void fpu_state_restore(struct pt_regs *regs); 20extern void restore_fpu(struct task_struct *__tsk);
21extern void fpu_state_restore(struct pt_regs *regs);
22extern void __fpu_state_restore(void);
23#else 23#else
24 24#define save_fpu(tsk) do { } while (0)
25#define save_fpu(tsk) do { } while (0) 25#define restore_fpu(tsk) do { } while (0)
26#define release_fpu(regs) do { } while (0) 26#define release_fpu(regs) do { } while (0)
27#define grab_fpu(regs) do { } while (0) 27#define grab_fpu(regs) do { } while (0)
28#define fpu_state_restore(regs) do { } while (0) 28#define fpu_state_restore(regs) do { } while (0)
29 29#define __fpu_state_restore(regs) do { } while (0)
30#endif 30#endif
31 31
32struct user_regset; 32struct user_regset;
33 33
34extern int do_fpu_inst(unsigned short, struct pt_regs *); 34extern int do_fpu_inst(unsigned short, struct pt_regs *);
35extern int init_fpu(struct task_struct *);
35 36
36extern int fpregs_get(struct task_struct *target, 37extern int fpregs_get(struct task_struct *target,
37 const struct user_regset *regset, 38 const struct user_regset *regset,
@@ -65,18 +66,6 @@ static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
65 preempt_enable(); 66 preempt_enable();
66} 67}
67 68
68static inline int init_fpu(struct task_struct *tsk)
69{
70 if (tsk_used_math(tsk)) {
71 if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
72 unlazy_fpu(tsk, task_pt_regs(tsk));
73 return 0;
74 }
75
76 set_stopped_child_used_math(tsk);
77 return 0;
78}
79
80#endif /* __ASSEMBLY__ */ 69#endif /* __ASSEMBLY__ */
81 70
82#endif /* __ASM_SH_FPU_H */ 71#endif /* __ASM_SH_FPU_H */
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 50b8c9c3fa4c..a359898206e8 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -90,11 +90,15 @@ struct sh_fpu_soft_struct {
90 unsigned long entry_pc; 90 unsigned long entry_pc;
91}; 91};
92 92
93union sh_fpu_union { 93union thread_xstate {
94 struct sh_fpu_hard_struct hard; 94 struct sh_fpu_hard_struct hardfpu;
95 struct sh_fpu_soft_struct soft; 95 struct sh_fpu_soft_struct softfpu;
96}; 96};
97 97
98extern unsigned int xstate_size;
99extern void free_thread_xstate(struct task_struct *);
100extern struct kmem_cache *task_xstate_cachep;
101
98struct thread_struct { 102struct thread_struct {
99 /* Saved registers when thread is descheduled */ 103 /* Saved registers when thread is descheduled */
100 unsigned long sp; 104 unsigned long sp;
@@ -103,13 +107,13 @@ struct thread_struct {
103 /* Hardware debugging registers */ 107 /* Hardware debugging registers */
104 unsigned long ubc_pc; 108 unsigned long ubc_pc;
105 109
106 /* floating point info */
107 union sh_fpu_union fpu;
108
109#ifdef CONFIG_SH_DSP 110#ifdef CONFIG_SH_DSP
110 /* Dsp status information */ 111 /* Dsp status information */
111 struct sh_dsp_struct dsp_status; 112 struct sh_dsp_struct dsp_status;
112#endif 113#endif
114
115 /* Extended processor state */
116 union thread_xstate *xstate;
113}; 117};
114 118
115/* Count of active tasks with UBC settings */ 119/* Count of active tasks with UBC settings */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 2c5b48edeab9..55a36fef6875 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -97,6 +97,10 @@ static inline struct thread_info *current_thread_info(void)
97 97
98extern struct thread_info *alloc_thread_info(struct task_struct *tsk); 98extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
99extern void free_thread_info(struct thread_info *ti); 99extern void free_thread_info(struct thread_info *ti);
100extern void arch_task_cache_init(void);
101#define arch_task_cache_init arch_task_cache_init
102extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
103extern void init_thread_xstate(void);
100 104
101#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 105#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
102 106
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index d97c803719ec..0e48bc61c272 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -17,5 +17,7 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
17 17
18obj-$(CONFIG_SH_ADC) += adc.o 18obj-$(CONFIG_SH_ADC) += adc.o
19obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o 19obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o
20obj-$(CONFIG_SH_FPU) += fpu.o
21obj-$(CONFIG_SH_FPU_EMU) += fpu.o
20 22
21obj-y += irq/ init.o clock.o hwblk.o 23obj-y += irq/ init.o clock.o hwblk.o
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
new file mode 100644
index 000000000000..c23e6727002a
--- /dev/null
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -0,0 +1,82 @@
1#include <linux/sched.h>
2#include <asm/processor.h>
3#include <asm/fpu.h>
4
5int init_fpu(struct task_struct *tsk)
6{
7 if (tsk_used_math(tsk)) {
8 if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
9 unlazy_fpu(tsk, task_pt_regs(tsk));
10 return 0;
11 }
12
13 /*
14 * Memory allocation at the first usage of the FPU and other state.
15 */
16 if (!tsk->thread.xstate) {
17 tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
18 GFP_KERNEL);
19 if (!tsk->thread.xstate)
20 return -ENOMEM;
21 }
22
23 if (boot_cpu_data.flags & CPU_HAS_FPU) {
24 struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
25 memset(fp, 0, xstate_size);
26 fp->fpscr = FPSCR_INIT;
27 } else {
28 struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
29 memset(fp, 0, xstate_size);
30 fp->fpscr = FPSCR_INIT;
31 }
32
33 set_stopped_child_used_math(tsk);
34 return 0;
35}
36
37#ifdef CONFIG_SH_FPU
38void __fpu_state_restore(void)
39{
40 struct task_struct *tsk = current;
41
42 restore_fpu(tsk);
43
44 task_thread_info(tsk)->status |= TS_USEDFPU;
45 tsk->fpu_counter++;
46}
47
48void fpu_state_restore(struct pt_regs *regs)
49{
50 struct task_struct *tsk = current;
51
52 if (unlikely(!user_mode(regs))) {
53 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
54 BUG();
55 return;
56 }
57
58 if (!tsk_used_math(tsk)) {
59 /*
60 * does a slab alloc which can sleep
61 */
62 if (init_fpu(tsk)) {
63 /*
64 * ran out of memory!
65 */
66 do_group_exit(SIGKILL);
67 return;
68 }
69 }
70
71 grab_fpu(regs);
72
73 __fpu_state_restore();
74}
75
76BUILD_TRAP_HANDLER(fpu_state_restore)
77{
78 TRAP_HANDLER_DECL;
79
80 fpu_state_restore(regs);
81}
82#endif /* CONFIG_SH_FPU */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 89b4b76c0d76..2e23422280a7 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -28,18 +28,30 @@
28#include <asm/ubc.h> 28#include <asm/ubc.h>
29#endif 29#endif
30 30
31#ifdef CONFIG_SH_FPU
32#define cpu_has_fpu 1
33#else
34#define cpu_has_fpu 0
35#endif
36
37#ifdef CONFIG_SH_DSP
38#define cpu_has_dsp 1
39#else
40#define cpu_has_dsp 0
41#endif
42
31/* 43/*
32 * Generic wrapper for command line arguments to disable on-chip 44 * Generic wrapper for command line arguments to disable on-chip
33 * peripherals (nofpu, nodsp, and so forth). 45 * peripherals (nofpu, nodsp, and so forth).
34 */ 46 */
35#define onchip_setup(x) \ 47#define onchip_setup(x) \
36static int x##_disabled __initdata = 0; \ 48static int x##_disabled __initdata = !cpu_has_##x; \
37 \ 49 \
38static int __init x##_setup(char *opts) \ 50static int __init x##_setup(char *opts) \
39{ \ 51{ \
40 x##_disabled = 1; \ 52 x##_disabled = 1; \
41 return 1; \ 53 return 1; \
42} \ 54} \
43__setup("no" __stringify(x), x##_setup); 55__setup("no" __stringify(x), x##_setup);
44 56
45onchip_setup(fpu); 57onchip_setup(fpu);
@@ -207,6 +219,18 @@ static void detect_cache_shape(void)
207 l2_cache_shape = -1; /* No S-cache */ 219 l2_cache_shape = -1; /* No S-cache */
208} 220}
209 221
222static void __init fpu_init(void)
223{
224 /* Disable the FPU */
225 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
226 printk("FPU Disabled\n");
227 current_cpu_data.flags &= ~CPU_HAS_FPU;
228 }
229
230 disable_fpu();
231 clear_used_math();
232}
233
210#ifdef CONFIG_SH_DSP 234#ifdef CONFIG_SH_DSP
211static void __init release_dsp(void) 235static void __init release_dsp(void)
212{ 236{
@@ -244,9 +268,17 @@ static void __init dsp_init(void)
244 if (sr & SR_DSP) 268 if (sr & SR_DSP)
245 current_cpu_data.flags |= CPU_HAS_DSP; 269 current_cpu_data.flags |= CPU_HAS_DSP;
246 270
271 /* Disable the DSP */
272 if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
273 printk("DSP Disabled\n");
274 current_cpu_data.flags &= ~CPU_HAS_DSP;
275 }
276
247 /* Now that we've determined the DSP status, clear the DSP bit. */ 277 /* Now that we've determined the DSP status, clear the DSP bit. */
248 release_dsp(); 278 release_dsp();
249} 279}
280#else
281static inline void __init dsp_init(void) { }
250#endif /* CONFIG_SH_DSP */ 282#endif /* CONFIG_SH_DSP */
251 283
252/** 284/**
@@ -302,18 +334,8 @@ asmlinkage void __init sh_cpu_init(void)
302 detect_cache_shape(); 334 detect_cache_shape();
303 } 335 }
304 336
305 /* Disable the FPU */ 337 fpu_init();
306 if (fpu_disabled) { 338 dsp_init();
307 printk("FPU Disabled\n");
308 current_cpu_data.flags &= ~CPU_HAS_FPU;
309 }
310
311 /* FPU initialization */
312 disable_fpu();
313 if ((current_cpu_data.flags & CPU_HAS_FPU)) {
314 current_thread_info()->status &= ~TS_USEDFPU;
315 clear_used_math();
316 }
317 339
318 /* 340 /*
319 * Initialize the per-CPU ASID cache very early, since the 341 * Initialize the per-CPU ASID cache very early, since the
@@ -321,18 +343,12 @@ asmlinkage void __init sh_cpu_init(void)
321 */ 343 */
322 current_cpu_data.asid_cache = NO_CONTEXT; 344 current_cpu_data.asid_cache = NO_CONTEXT;
323 345
324#ifdef CONFIG_SH_DSP
325 /* Probe for DSP */
326 dsp_init();
327
328 /* Disable the DSP */
329 if (dsp_disabled) {
330 printk("DSP Disabled\n");
331 current_cpu_data.flags &= ~CPU_HAS_DSP;
332 release_dsp();
333 }
334#endif
335
336 speculative_execution_init(); 346 speculative_execution_init();
337 expmask_init(); 347 expmask_init();
348
349 /*
350 * Boot processor to setup the FP and extended state context info.
351 */
352 if (raw_smp_processor_id() == 0)
353 init_thread_xstate();
338} 354}
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index d395ce5740e7..488d24e0cdf0 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -26,8 +26,7 @@
26/* 26/*
27 * Save FPU registers onto task structure. 27 * Save FPU registers onto task structure.
28 */ 28 */
29void 29void save_fpu(struct task_struct *tsk)
30save_fpu(struct task_struct *tsk)
31{ 30{
32 unsigned long dummy; 31 unsigned long dummy;
33 32
@@ -52,7 +51,7 @@ save_fpu(struct task_struct *tsk)
52 "fmov.s fr0, @-%0\n\t" 51 "fmov.s fr0, @-%0\n\t"
53 "lds %3, fpscr\n\t" 52 "lds %3, fpscr\n\t"
54 : "=r" (dummy) 53 : "=r" (dummy)
55 : "0" ((char *)(&tsk->thread.fpu.hard.status)), 54 : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
56 "r" (FPSCR_RCHG), 55 "r" (FPSCR_RCHG),
57 "r" (FPSCR_INIT) 56 "r" (FPSCR_INIT)
58 : "memory"); 57 : "memory");
@@ -60,8 +59,7 @@ save_fpu(struct task_struct *tsk)
60 disable_fpu(); 59 disable_fpu();
61} 60}
62 61
63static void 62void restore_fpu(struct task_struct *tsk)
64restore_fpu(struct task_struct *tsk)
65{ 63{
66 unsigned long dummy; 64 unsigned long dummy;
67 65
@@ -85,45 +83,12 @@ restore_fpu(struct task_struct *tsk)
85 "lds.l @%0+, fpscr\n\t" 83 "lds.l @%0+, fpscr\n\t"
86 "lds.l @%0+, fpul\n\t" 84 "lds.l @%0+, fpul\n\t"
87 : "=r" (dummy) 85 : "=r" (dummy)
88 : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG) 86 : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
89 : "memory"); 87 : "memory");
90 disable_fpu(); 88 disable_fpu();
91} 89}
92 90
93/* 91/*
94 * Load the FPU with signalling NANS. This bit pattern we're using
95 * has the property that no matter wether considered as single or as
96 * double precission represents signaling NANS.
97 */
98
99static void
100fpu_init(void)
101{
102 enable_fpu();
103 asm volatile("lds %0, fpul\n\t"
104 "fsts fpul, fr0\n\t"
105 "fsts fpul, fr1\n\t"
106 "fsts fpul, fr2\n\t"
107 "fsts fpul, fr3\n\t"
108 "fsts fpul, fr4\n\t"
109 "fsts fpul, fr5\n\t"
110 "fsts fpul, fr6\n\t"
111 "fsts fpul, fr7\n\t"
112 "fsts fpul, fr8\n\t"
113 "fsts fpul, fr9\n\t"
114 "fsts fpul, fr10\n\t"
115 "fsts fpul, fr11\n\t"
116 "fsts fpul, fr12\n\t"
117 "fsts fpul, fr13\n\t"
118 "fsts fpul, fr14\n\t"
119 "fsts fpul, fr15\n\t"
120 "lds %2, fpscr\n\t"
121 : /* no output */
122 : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
123 disable_fpu();
124}
125
126/*
127 * Emulate arithmetic ops on denormalized number for some FPU insns. 92 * Emulate arithmetic ops on denormalized number for some FPU insns.
128 */ 93 */
129 94
@@ -490,9 +455,9 @@ ieee_fpe_handler (struct pt_regs *regs)
490 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ 455 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
491 struct task_struct *tsk = current; 456 struct task_struct *tsk = current;
492 457
493 if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) { 458 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
494 /* FPU error */ 459 /* FPU error */
495 denormal_to_double (&tsk->thread.fpu.hard, 460 denormal_to_double (&tsk->thread.xstate->hardfpu,
496 (finsn >> 8) & 0xf); 461 (finsn >> 8) & 0xf);
497 } else 462 } else
498 return 0; 463 return 0;
@@ -507,9 +472,9 @@ ieee_fpe_handler (struct pt_regs *regs)
507 472
508 n = (finsn >> 8) & 0xf; 473 n = (finsn >> 8) & 0xf;
509 m = (finsn >> 4) & 0xf; 474 m = (finsn >> 4) & 0xf;
510 hx = tsk->thread.fpu.hard.fp_regs[n]; 475 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
511 hy = tsk->thread.fpu.hard.fp_regs[m]; 476 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
512 fpscr = tsk->thread.fpu.hard.fpscr; 477 fpscr = tsk->thread.xstate->hardfpu.fpscr;
513 prec = fpscr & (1 << 19); 478 prec = fpscr & (1 << 19);
514 479
515 if ((fpscr & FPSCR_FPU_ERROR) 480 if ((fpscr & FPSCR_FPU_ERROR)
@@ -519,15 +484,15 @@ ieee_fpe_handler (struct pt_regs *regs)
519 484
520 /* FPU error because of denormal */ 485 /* FPU error because of denormal */
521 llx = ((long long) hx << 32) 486 llx = ((long long) hx << 32)
522 | tsk->thread.fpu.hard.fp_regs[n+1]; 487 | tsk->thread.xstate->hardfpu.fp_regs[n+1];
523 lly = ((long long) hy << 32) 488 lly = ((long long) hy << 32)
524 | tsk->thread.fpu.hard.fp_regs[m+1]; 489 | tsk->thread.xstate->hardfpu.fp_regs[m+1];
525 if ((hx & 0x7fffffff) >= 0x00100000) 490 if ((hx & 0x7fffffff) >= 0x00100000)
526 llx = denormal_muld(lly, llx); 491 llx = denormal_muld(lly, llx);
527 else 492 else
528 llx = denormal_muld(llx, lly); 493 llx = denormal_muld(llx, lly);
529 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 494 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
530 tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; 495 tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
531 } else if ((fpscr & FPSCR_FPU_ERROR) 496 } else if ((fpscr & FPSCR_FPU_ERROR)
532 && (!prec && ((hx & 0x7fffffff) < 0x00800000 497 && (!prec && ((hx & 0x7fffffff) < 0x00800000
533 || (hy & 0x7fffffff) < 0x00800000))) { 498 || (hy & 0x7fffffff) < 0x00800000))) {
@@ -536,7 +501,7 @@ ieee_fpe_handler (struct pt_regs *regs)
536 hx = denormal_mulf(hy, hx); 501 hx = denormal_mulf(hy, hx);
537 else 502 else
538 hx = denormal_mulf(hx, hy); 503 hx = denormal_mulf(hx, hy);
539 tsk->thread.fpu.hard.fp_regs[n] = hx; 504 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
540 } else 505 } else
541 return 0; 506 return 0;
542 507
@@ -550,9 +515,9 @@ ieee_fpe_handler (struct pt_regs *regs)
550 515
551 n = (finsn >> 8) & 0xf; 516 n = (finsn >> 8) & 0xf;
552 m = (finsn >> 4) & 0xf; 517 m = (finsn >> 4) & 0xf;
553 hx = tsk->thread.fpu.hard.fp_regs[n]; 518 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
554 hy = tsk->thread.fpu.hard.fp_regs[m]; 519 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
555 fpscr = tsk->thread.fpu.hard.fpscr; 520 fpscr = tsk->thread.xstate->hardfpu.fpscr;
556 prec = fpscr & (1 << 19); 521 prec = fpscr & (1 << 19);
557 522
558 if ((fpscr & FPSCR_FPU_ERROR) 523 if ((fpscr & FPSCR_FPU_ERROR)
@@ -562,15 +527,15 @@ ieee_fpe_handler (struct pt_regs *regs)
562 527
563 /* FPU error because of denormal */ 528 /* FPU error because of denormal */
564 llx = ((long long) hx << 32) 529 llx = ((long long) hx << 32)
565 | tsk->thread.fpu.hard.fp_regs[n+1]; 530 | tsk->thread.xstate->hardfpu.fp_regs[n+1];
566 lly = ((long long) hy << 32) 531 lly = ((long long) hy << 32)
567 | tsk->thread.fpu.hard.fp_regs[m+1]; 532 | tsk->thread.xstate->hardfpu.fp_regs[m+1];
568 if ((finsn & 0xf00f) == 0xf000) 533 if ((finsn & 0xf00f) == 0xf000)
569 llx = denormal_addd(llx, lly); 534 llx = denormal_addd(llx, lly);
570 else 535 else
571 llx = denormal_addd(llx, lly ^ (1LL << 63)); 536 llx = denormal_addd(llx, lly ^ (1LL << 63));
572 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 537 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
573 tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; 538 tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
574 } else if ((fpscr & FPSCR_FPU_ERROR) 539 } else if ((fpscr & FPSCR_FPU_ERROR)
575 && (!prec && ((hx & 0x7fffffff) < 0x00800000 540 && (!prec && ((hx & 0x7fffffff) < 0x00800000
576 || (hy & 0x7fffffff) < 0x00800000))) { 541 || (hy & 0x7fffffff) < 0x00800000))) {
@@ -579,7 +544,7 @@ ieee_fpe_handler (struct pt_regs *regs)
579 hx = denormal_addf(hx, hy); 544 hx = denormal_addf(hx, hy);
580 else 545 else
581 hx = denormal_addf(hx, hy ^ 0x80000000); 546 hx = denormal_addf(hx, hy ^ 0x80000000);
582 tsk->thread.fpu.hard.fp_regs[n] = hx; 547 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
583 } else 548 } else
584 return 0; 549 return 0;
585 550
@@ -597,7 +562,7 @@ BUILD_TRAP_HANDLER(fpu_error)
597 562
598 __unlazy_fpu(tsk, regs); 563 __unlazy_fpu(tsk, regs);
599 if (ieee_fpe_handler(regs)) { 564 if (ieee_fpe_handler(regs)) {
600 tsk->thread.fpu.hard.fpscr &= 565 tsk->thread.xstate->hardfpu.fpscr &=
601 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); 566 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
602 grab_fpu(regs); 567 grab_fpu(regs);
603 restore_fpu(tsk); 568 restore_fpu(tsk);
@@ -607,33 +572,3 @@ BUILD_TRAP_HANDLER(fpu_error)
607 572
608 force_sig(SIGFPE, tsk); 573 force_sig(SIGFPE, tsk);
609} 574}
610
611void fpu_state_restore(struct pt_regs *regs)
612{
613 struct task_struct *tsk = current;
614
615 grab_fpu(regs);
616 if (unlikely(!user_mode(regs))) {
617 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
618 BUG();
619 return;
620 }
621
622 if (likely(used_math())) {
623 /* Using the FPU again. */
624 restore_fpu(tsk);
625 } else {
626 /* First time FPU user. */
627 fpu_init();
628 set_used_math();
629 }
630 task_thread_info(tsk)->status |= TS_USEDFPU;
631 tsk->fpu_counter++;
632}
633
634BUILD_TRAP_HANDLER(fpu_state_restore)
635{
636 TRAP_HANDLER_DECL;
637
638 fpu_state_restore(regs);
639}
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e97857aec8a0..447482d7f65e 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -85,14 +85,14 @@ void save_fpu(struct task_struct *tsk)
85 "fmov.s fr1, @-%0\n\t" 85 "fmov.s fr1, @-%0\n\t"
86 "fmov.s fr0, @-%0\n\t" 86 "fmov.s fr0, @-%0\n\t"
87 "lds %3, fpscr\n\t":"=r" (dummy) 87 "lds %3, fpscr\n\t":"=r" (dummy)
88 :"0"((char *)(&tsk->thread.fpu.hard.status)), 88 :"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
89 "r"(FPSCR_RCHG), "r"(FPSCR_INIT) 89 "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
90 :"memory"); 90 :"memory");
91 91
92 disable_fpu(); 92 disable_fpu();
93} 93}
94 94
95static void restore_fpu(struct task_struct *tsk) 95void restore_fpu(struct task_struct *tsk)
96{ 96{
97 unsigned long dummy; 97 unsigned long dummy;
98 98
@@ -135,62 +135,11 @@ static void restore_fpu(struct task_struct *tsk)
135 "lds.l @%0+, fpscr\n\t" 135 "lds.l @%0+, fpscr\n\t"
136 "lds.l @%0+, fpul\n\t" 136 "lds.l @%0+, fpul\n\t"
137 :"=r" (dummy) 137 :"=r" (dummy)
138 :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG) 138 :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
139 :"memory"); 139 :"memory");
140 disable_fpu(); 140 disable_fpu();
141} 141}
142 142
143/*
144 * Load the FPU with signalling NANS. This bit pattern we're using
145 * has the property that no matter wether considered as single or as
146 * double precision represents signaling NANS.
147 */
148
149static void fpu_init(void)
150{
151 enable_fpu();
152 asm volatile ( "lds %0, fpul\n\t"
153 "lds %1, fpscr\n\t"
154 "fsts fpul, fr0\n\t"
155 "fsts fpul, fr1\n\t"
156 "fsts fpul, fr2\n\t"
157 "fsts fpul, fr3\n\t"
158 "fsts fpul, fr4\n\t"
159 "fsts fpul, fr5\n\t"
160 "fsts fpul, fr6\n\t"
161 "fsts fpul, fr7\n\t"
162 "fsts fpul, fr8\n\t"
163 "fsts fpul, fr9\n\t"
164 "fsts fpul, fr10\n\t"
165 "fsts fpul, fr11\n\t"
166 "fsts fpul, fr12\n\t"
167 "fsts fpul, fr13\n\t"
168 "fsts fpul, fr14\n\t"
169 "fsts fpul, fr15\n\t"
170 "frchg\n\t"
171 "fsts fpul, fr0\n\t"
172 "fsts fpul, fr1\n\t"
173 "fsts fpul, fr2\n\t"
174 "fsts fpul, fr3\n\t"
175 "fsts fpul, fr4\n\t"
176 "fsts fpul, fr5\n\t"
177 "fsts fpul, fr6\n\t"
178 "fsts fpul, fr7\n\t"
179 "fsts fpul, fr8\n\t"
180 "fsts fpul, fr9\n\t"
181 "fsts fpul, fr10\n\t"
182 "fsts fpul, fr11\n\t"
183 "fsts fpul, fr12\n\t"
184 "fsts fpul, fr13\n\t"
185 "fsts fpul, fr14\n\t"
186 "fsts fpul, fr15\n\t"
187 "frchg\n\t"
188 "lds %2, fpscr\n\t"
189 : /* no output */
190 :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
191 disable_fpu();
192}
193
194/** 143/**
195 * denormal_to_double - Given denormalized float number, 144 * denormal_to_double - Given denormalized float number,
196 * store double float 145 * store double float
@@ -282,9 +231,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
282 /* fcnvsd */ 231 /* fcnvsd */
283 struct task_struct *tsk = current; 232 struct task_struct *tsk = current;
284 233
285 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) 234 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
286 /* FPU error */ 235 /* FPU error */
287 denormal_to_double(&tsk->thread.fpu.hard, 236 denormal_to_double(&tsk->thread.xstate->hardfpu,
288 (finsn >> 8) & 0xf); 237 (finsn >> 8) & 0xf);
289 else 238 else
290 return 0; 239 return 0;
@@ -300,9 +249,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
300 249
301 n = (finsn >> 8) & 0xf; 250 n = (finsn >> 8) & 0xf;
302 m = (finsn >> 4) & 0xf; 251 m = (finsn >> 4) & 0xf;
303 hx = tsk->thread.fpu.hard.fp_regs[n]; 252 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
304 hy = tsk->thread.fpu.hard.fp_regs[m]; 253 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
305 fpscr = tsk->thread.fpu.hard.fpscr; 254 fpscr = tsk->thread.xstate->hardfpu.fpscr;
306 prec = fpscr & FPSCR_DBL_PRECISION; 255 prec = fpscr & FPSCR_DBL_PRECISION;
307 256
308 if ((fpscr & FPSCR_CAUSE_ERROR) 257 if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -312,18 +261,18 @@ static int ieee_fpe_handler(struct pt_regs *regs)
312 261
313 /* FPU error because of denormal (doubles) */ 262 /* FPU error because of denormal (doubles) */
314 llx = ((long long)hx << 32) 263 llx = ((long long)hx << 32)
315 | tsk->thread.fpu.hard.fp_regs[n + 1]; 264 | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
316 lly = ((long long)hy << 32) 265 lly = ((long long)hy << 32)
317 | tsk->thread.fpu.hard.fp_regs[m + 1]; 266 | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
318 llx = float64_mul(llx, lly); 267 llx = float64_mul(llx, lly);
319 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 268 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
320 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; 269 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
321 } else if ((fpscr & FPSCR_CAUSE_ERROR) 270 } else if ((fpscr & FPSCR_CAUSE_ERROR)
322 && (!prec && ((hx & 0x7fffffff) < 0x00800000 271 && (!prec && ((hx & 0x7fffffff) < 0x00800000
323 || (hy & 0x7fffffff) < 0x00800000))) { 272 || (hy & 0x7fffffff) < 0x00800000))) {
324 /* FPU error because of denormal (floats) */ 273 /* FPU error because of denormal (floats) */
325 hx = float32_mul(hx, hy); 274 hx = float32_mul(hx, hy);
326 tsk->thread.fpu.hard.fp_regs[n] = hx; 275 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
327 } else 276 } else
328 return 0; 277 return 0;
329 278
@@ -338,9 +287,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
338 287
339 n = (finsn >> 8) & 0xf; 288 n = (finsn >> 8) & 0xf;
340 m = (finsn >> 4) & 0xf; 289 m = (finsn >> 4) & 0xf;
341 hx = tsk->thread.fpu.hard.fp_regs[n]; 290 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
342 hy = tsk->thread.fpu.hard.fp_regs[m]; 291 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
343 fpscr = tsk->thread.fpu.hard.fpscr; 292 fpscr = tsk->thread.xstate->hardfpu.fpscr;
344 prec = fpscr & FPSCR_DBL_PRECISION; 293 prec = fpscr & FPSCR_DBL_PRECISION;
345 294
346 if ((fpscr & FPSCR_CAUSE_ERROR) 295 if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -350,15 +299,15 @@ static int ieee_fpe_handler(struct pt_regs *regs)
350 299
351 /* FPU error because of denormal (doubles) */ 300 /* FPU error because of denormal (doubles) */
352 llx = ((long long)hx << 32) 301 llx = ((long long)hx << 32)
353 | tsk->thread.fpu.hard.fp_regs[n + 1]; 302 | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
354 lly = ((long long)hy << 32) 303 lly = ((long long)hy << 32)
355 | tsk->thread.fpu.hard.fp_regs[m + 1]; 304 | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
356 if ((finsn & 0xf00f) == 0xf000) 305 if ((finsn & 0xf00f) == 0xf000)
357 llx = float64_add(llx, lly); 306 llx = float64_add(llx, lly);
358 else 307 else
359 llx = float64_sub(llx, lly); 308 llx = float64_sub(llx, lly);
360 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 309 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
361 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; 310 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
362 } else if ((fpscr & FPSCR_CAUSE_ERROR) 311 } else if ((fpscr & FPSCR_CAUSE_ERROR)
363 && (!prec && ((hx & 0x7fffffff) < 0x00800000 312 && (!prec && ((hx & 0x7fffffff) < 0x00800000
364 || (hy & 0x7fffffff) < 0x00800000))) { 313 || (hy & 0x7fffffff) < 0x00800000))) {
@@ -367,7 +316,7 @@ static int ieee_fpe_handler(struct pt_regs *regs)
367 hx = float32_add(hx, hy); 316 hx = float32_add(hx, hy);
368 else 317 else
369 hx = float32_sub(hx, hy); 318 hx = float32_sub(hx, hy);
370 tsk->thread.fpu.hard.fp_regs[n] = hx; 319 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
371 } else 320 } else
372 return 0; 321 return 0;
373 322
@@ -382,9 +331,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
382 331
383 n = (finsn >> 8) & 0xf; 332 n = (finsn >> 8) & 0xf;
384 m = (finsn >> 4) & 0xf; 333 m = (finsn >> 4) & 0xf;
385 hx = tsk->thread.fpu.hard.fp_regs[n]; 334 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
386 hy = tsk->thread.fpu.hard.fp_regs[m]; 335 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
387 fpscr = tsk->thread.fpu.hard.fpscr; 336 fpscr = tsk->thread.xstate->hardfpu.fpscr;
388 prec = fpscr & FPSCR_DBL_PRECISION; 337 prec = fpscr & FPSCR_DBL_PRECISION;
389 338
390 if ((fpscr & FPSCR_CAUSE_ERROR) 339 if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -394,20 +343,20 @@ static int ieee_fpe_handler(struct pt_regs *regs)
394 343
395 /* FPU error because of denormal (doubles) */ 344 /* FPU error because of denormal (doubles) */
396 llx = ((long long)hx << 32) 345 llx = ((long long)hx << 32)
397 | tsk->thread.fpu.hard.fp_regs[n + 1]; 346 | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
398 lly = ((long long)hy << 32) 347 lly = ((long long)hy << 32)
399 | tsk->thread.fpu.hard.fp_regs[m + 1]; 348 | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
400 349
401 llx = float64_div(llx, lly); 350 llx = float64_div(llx, lly);
402 351
403 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 352 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
404 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; 353 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
405 } else if ((fpscr & FPSCR_CAUSE_ERROR) 354 } else if ((fpscr & FPSCR_CAUSE_ERROR)
406 && (!prec && ((hx & 0x7fffffff) < 0x00800000 355 && (!prec && ((hx & 0x7fffffff) < 0x00800000
407 || (hy & 0x7fffffff) < 0x00800000))) { 356 || (hy & 0x7fffffff) < 0x00800000))) {
408 /* FPU error because of denormal (floats) */ 357 /* FPU error because of denormal (floats) */
409 hx = float32_div(hx, hy); 358 hx = float32_div(hx, hy);
410 tsk->thread.fpu.hard.fp_regs[n] = hx; 359 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
411 } else 360 } else
412 return 0; 361 return 0;
413 362
@@ -420,17 +369,17 @@ static int ieee_fpe_handler(struct pt_regs *regs)
420 unsigned int hx; 369 unsigned int hx;
421 370
422 m = (finsn >> 8) & 0x7; 371 m = (finsn >> 8) & 0x7;
423 hx = tsk->thread.fpu.hard.fp_regs[m]; 372 hx = tsk->thread.xstate->hardfpu.fp_regs[m];
424 373
425 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR) 374 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
426 && ((hx & 0x7fffffff) < 0x00100000)) { 375 && ((hx & 0x7fffffff) < 0x00100000)) {
427 /* subnormal double to float conversion */ 376 /* subnormal double to float conversion */
428 long long llx; 377 long long llx;
429 378
430 llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32) 379 llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
431 | tsk->thread.fpu.hard.fp_regs[m + 1]; 380 | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
432 381
433 tsk->thread.fpu.hard.fpul = float64_to_float32(llx); 382 tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
434 } else 383 } else
435 return 0; 384 return 0;
436 385
@@ -449,7 +398,7 @@ void float_raise(unsigned int flags)
449int float_rounding_mode(void) 398int float_rounding_mode(void)
450{ 399{
451 struct task_struct *tsk = current; 400 struct task_struct *tsk = current;
452 int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr); 401 int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
453 return roundingMode; 402 return roundingMode;
454} 403}
455 404
@@ -461,16 +410,16 @@ BUILD_TRAP_HANDLER(fpu_error)
461 __unlazy_fpu(tsk, regs); 410 __unlazy_fpu(tsk, regs);
462 fpu_exception_flags = 0; 411 fpu_exception_flags = 0;
463 if (ieee_fpe_handler(regs)) { 412 if (ieee_fpe_handler(regs)) {
464 tsk->thread.fpu.hard.fpscr &= 413 tsk->thread.xstate->hardfpu.fpscr &=
465 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); 414 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
466 tsk->thread.fpu.hard.fpscr |= fpu_exception_flags; 415 tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
467 /* Set the FPSCR flag as well as cause bits - simply 416 /* Set the FPSCR flag as well as cause bits - simply
468 * replicate the cause */ 417 * replicate the cause */
469 tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); 418 tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
470 grab_fpu(regs); 419 grab_fpu(regs);
471 restore_fpu(tsk); 420 restore_fpu(tsk);
472 task_thread_info(tsk)->status |= TS_USEDFPU; 421 task_thread_info(tsk)->status |= TS_USEDFPU;
473 if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & 422 if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
474 (fpu_exception_flags >> 2)) == 0) { 423 (fpu_exception_flags >> 2)) == 0) {
475 return; 424 return;
476 } 425 }
@@ -478,33 +427,3 @@ BUILD_TRAP_HANDLER(fpu_error)
478 427
479 force_sig(SIGFPE, tsk); 428 force_sig(SIGFPE, tsk);
480} 429}
481
482void fpu_state_restore(struct pt_regs *regs)
483{
484 struct task_struct *tsk = current;
485
486 grab_fpu(regs);
487 if (unlikely(!user_mode(regs))) {
488 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
489 BUG();
490 return;
491 }
492
493 if (likely(used_math())) {
494 /* Using the FPU again. */
495 restore_fpu(tsk);
496 } else {
497 /* First time FPU user. */
498 fpu_init();
499 set_used_math();
500 }
501 task_thread_info(tsk)->status |= TS_USEDFPU;
502 tsk->fpu_counter++;
503}
504
505BUILD_TRAP_HANDLER(fpu_state_restore)
506{
507 TRAP_HANDLER_DECL;
508
509 fpu_state_restore(regs);
510}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 077e06e1a889..81add9b9ea6e 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -2,6 +2,32 @@
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/sched.h> 3#include <linux/sched.h>
4 4
5struct kmem_cache *task_xstate_cachep = NULL;
6unsigned int xstate_size;
7
8int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
9{
10 *dst = *src;
11
12 if (src->thread.xstate) {
13 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
14 GFP_KERNEL);
15 if (!dst->thread.xstate)
16 return -ENOMEM;
17 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
18 }
19
20 return 0;
21}
22
23void free_thread_xstate(struct task_struct *tsk)
24{
25 if (tsk->thread.xstate) {
26 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
27 tsk->thread.xstate = NULL;
28 }
29}
30
5#if THREAD_SHIFT < PAGE_SHIFT 31#if THREAD_SHIFT < PAGE_SHIFT
6static struct kmem_cache *thread_info_cache; 32static struct kmem_cache *thread_info_cache;
7 33
@@ -20,6 +46,7 @@ struct thread_info *alloc_thread_info(struct task_struct *tsk)
20 46
21void free_thread_info(struct thread_info *ti) 47void free_thread_info(struct thread_info *ti)
22{ 48{
49 free_thread_xstate(ti->task);
23 kmem_cache_free(thread_info_cache, ti); 50 kmem_cache_free(thread_info_cache, ti);
24} 51}
25 52
@@ -41,6 +68,33 @@ struct thread_info *alloc_thread_info(struct task_struct *tsk)
41 68
42void free_thread_info(struct thread_info *ti) 69void free_thread_info(struct thread_info *ti)
43{ 70{
71 free_thread_xstate(ti->task);
44 free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 72 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
45} 73}
46#endif /* THREAD_SHIFT < PAGE_SHIFT */ 74#endif /* THREAD_SHIFT < PAGE_SHIFT */
75
76void arch_task_cache_init(void)
77{
78 if (!xstate_size)
79 return;
80
81 task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
82 __alignof__(union thread_xstate),
83 SLAB_PANIC | SLAB_NOTRACK, NULL);
84}
85
86#ifdef CONFIG_SH_FPU_EMU
87# define HAVE_SOFTFP 1
88#else
89# define HAVE_SOFTFP 0
90#endif
91
92void init_thread_xstate(void)
93{
94 if (boot_cpu_data.flags & CPU_HAS_FPU)
95 xstate_size = sizeof(struct sh_fpu_hard_struct);
96 else if (HAVE_SOFTFP)
97 xstate_size = sizeof(struct sh_fpu_soft_struct);
98 else
99 xstate_size = 0;
100}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index c4361402ec5e..03de6573aa76 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -156,6 +156,8 @@ void start_thread(struct pt_regs *regs, unsigned long new_pc,
156 regs->sr = SR_FD; 156 regs->sr = SR_FD;
157 regs->pc = new_pc; 157 regs->pc = new_pc;
158 regs->regs[15] = new_sp; 158 regs->regs[15] = new_sp;
159
160 free_thread_xstate(current);
159} 161}
160EXPORT_SYMBOL(start_thread); 162EXPORT_SYMBOL(start_thread);
161 163
@@ -316,7 +318,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
316 318
317 /* we're going to use this soon, after a few expensive things */ 319 /* we're going to use this soon, after a few expensive things */
318 if (next->fpu_counter > 5) 320 if (next->fpu_counter > 5)
319 prefetch(&next_t->fpu.hard); 321 prefetch(next_t->xstate);
320 322
321#ifdef CONFIG_MMU 323#ifdef CONFIG_MMU
322 /* 324 /*
@@ -353,7 +355,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
353 * chances of needing FPU soon are obviously high now 355 * chances of needing FPU soon are obviously high now
354 */ 356 */
355 if (next->fpu_counter > 5) 357 if (next->fpu_counter > 5)
356 fpu_state_restore(task_pt_regs(next)); 358 __fpu_state_restore();
357 359
358 return prev; 360 return prev;
359} 361}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 9be35f348093..be9b5dcb4021 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -163,10 +163,10 @@ int fpregs_get(struct task_struct *target,
163 163
164 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 164 if ((boot_cpu_data.flags & CPU_HAS_FPU))
165 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 165 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
166 &target->thread.fpu.hard, 0, -1); 166 &target->thread.xstate->hardfpu, 0, -1);
167 167
168 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 168 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
169 &target->thread.fpu.soft, 0, -1); 169 &target->thread.xstate->softfpu, 0, -1);
170} 170}
171 171
172static int fpregs_set(struct task_struct *target, 172static int fpregs_set(struct task_struct *target,
@@ -184,10 +184,10 @@ static int fpregs_set(struct task_struct *target,
184 184
185 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 185 if ((boot_cpu_data.flags & CPU_HAS_FPU))
186 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 186 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
187 &target->thread.fpu.hard, 0, -1); 187 &target->thread.xstate->hardfpu, 0, -1);
188 188
189 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 189 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
190 &target->thread.fpu.soft, 0, -1); 190 &target->thread.xstate->softfpu, 0, -1);
191} 191}
192 192
193static int fpregs_active(struct task_struct *target, 193static int fpregs_active(struct task_struct *target,
@@ -333,7 +333,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
333 else 333 else
334 tmp = 0; 334 tmp = 0;
335 } else 335 } else
336 tmp = ((long *)&child->thread.fpu) 336 tmp = ((long *)child->thread.xstate)
337 [(addr - (long)&dummy->fpu) >> 2]; 337 [(addr - (long)&dummy->fpu) >> 2];
338 } else if (addr == (long) &dummy->u_fpvalid) 338 } else if (addr == (long) &dummy->u_fpvalid)
339 tmp = !!tsk_used_math(child); 339 tmp = !!tsk_used_math(child);
@@ -362,7 +362,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
362 else if (addr >= (long) &dummy->fpu && 362 else if (addr >= (long) &dummy->fpu &&
363 addr < (long) &dummy->u_fpvalid) { 363 addr < (long) &dummy->u_fpvalid) {
364 set_stopped_child_used_math(child); 364 set_stopped_child_used_math(child);
365 ((long *)&child->thread.fpu) 365 ((long *)child->thread.xstate)
366 [(addr - (long)&dummy->fpu) >> 2] = data; 366 [(addr - (long)&dummy->fpu) >> 2] = data;
367 ret = 0; 367 ret = 0;
368 } else if (addr == (long) &dummy->u_fpvalid) { 368 } else if (addr == (long) &dummy->u_fpvalid) {
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 12815ce01ecd..6a7cce79eb4e 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -150,7 +150,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
150 return 0; 150 return 0;
151 151
152 set_used_math(); 152 set_used_math();
153 return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0], 153 return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
154 sizeof(long)*(16*2+2)); 154 sizeof(long)*(16*2+2));
155} 155}
156 156
@@ -175,7 +175,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
175 clear_used_math(); 175 clear_used_math();
176 176
177 unlazy_fpu(tsk, regs); 177 unlazy_fpu(tsk, regs);
178 return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard, 178 return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
179 sizeof(long)*(16*2+2)); 179 sizeof(long)*(16*2+2));
180} 180}
181#endif /* CONFIG_SH_FPU */ 181#endif /* CONFIG_SH_FPU */
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
index d6c15cae0912..1fcdb1220975 100644
--- a/arch/sh/math-emu/math.c
+++ b/arch/sh/math-emu/math.c
@@ -471,10 +471,10 @@ static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_reg
471 * denormal_to_double - Given denormalized float number, 471 * denormal_to_double - Given denormalized float number,
472 * store double float 472 * store double float
473 * 473 *
474 * @fpu: Pointer to sh_fpu_hard structure 474 * @fpu: Pointer to sh_fpu_soft structure
475 * @n: Index to FP register 475 * @n: Index to FP register
476 */ 476 */
477static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n) 477static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n)
478{ 478{
479 unsigned long du, dl; 479 unsigned long du, dl;
480 unsigned long x = fpu->fpul; 480 unsigned long x = fpu->fpul;
@@ -552,11 +552,11 @@ static int ieee_fpe_handler(struct pt_regs *regs)
552 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ 552 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
553 struct task_struct *tsk = current; 553 struct task_struct *tsk = current;
554 554
555 if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) { 555 if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) {
556 /* FPU error */ 556 /* FPU error */
557 denormal_to_double (&tsk->thread.fpu.hard, 557 denormal_to_double (&tsk->thread.xstate->softfpu,
558 (finsn >> 8) & 0xf); 558 (finsn >> 8) & 0xf);
559 tsk->thread.fpu.hard.fpscr &= 559 tsk->thread.xstate->softfpu.fpscr &=
560 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); 560 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
561 task_thread_info(tsk)->status |= TS_USEDFPU; 561 task_thread_info(tsk)->status |= TS_USEDFPU;
562 } else { 562 } else {
@@ -617,7 +617,7 @@ static void fpu_init(struct sh_fpu_soft_struct *fpu)
617int do_fpu_inst(unsigned short inst, struct pt_regs *regs) 617int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
618{ 618{
619 struct task_struct *tsk = current; 619 struct task_struct *tsk = current;
620 struct sh_fpu_soft_struct *fpu = &(tsk->thread.fpu.soft); 620 struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
621 621
622 if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { 622 if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
623 /* initialize once. */ 623 /* initialize once. */