aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-03-10 18:28:04 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:19:55 -0400
commit61c4628b538608c1a85211ed8438136adfeb9a95 (patch)
tree290a695299a363153bc692e6d705ac680d64359e /arch/x86/kernel/process.c
parentfa5c4639419668cbb18ca3d20c1253559a3b43ae (diff)
x86, fpu: split FPU state from task struct - v5
Split the FPU save area from the task struct. This allows easy migration of FPU context, and it's generally cleaner. It also allows the following two optimizations: 1) only allocate when the application actually uses FPU, so in the first lazy FPU trap. This could save memory for non-fpu using apps. Next patch does this lazy allocation. 2) allocate the right size for the actual cpu rather than 512 bytes always. Patches enabling xsave/xrstor support (coming shortly) will take advantage of this. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
new file mode 100644
index 000000000000..ead24efbcba0
--- /dev/null
+++ b/arch/x86/kernel/process.c
@@ -0,0 +1,35 @@
1#include <linux/errno.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/smp.h>
5#include <linux/slab.h>
6#include <linux/sched.h>
7
8static struct kmem_cache *task_xstate_cachep;
9
10int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
11{
12 *dst = *src;
13 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
14 if (!dst->thread.xstate)
15 return -ENOMEM;
16 WARN_ON((unsigned long)dst->thread.xstate & 15);
17 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
18 return 0;
19}
20
21void free_thread_info(struct thread_info *ti)
22{
23 kmem_cache_free(task_xstate_cachep, ti->task->thread.xstate);
24 ti->task->thread.xstate = NULL;
25
26 free_pages((unsigned long)(ti), get_order(THREAD_SIZE));
27}
28
29void arch_task_cache_init(void)
30{
31 task_xstate_cachep =
32 kmem_cache_create("task_xstate", xstate_size,
33 __alignof__(union thread_xstate),
34 SLAB_PANIC, NULL);
35}