aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-03-10 18:28:04 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:19:55 -0400
commit61c4628b538608c1a85211ed8438136adfeb9a95 (patch)
tree290a695299a363153bc692e6d705ac680d64359e /kernel/fork.c
parentfa5c4639419668cbb18ca3d20c1253559a3b43ae (diff)
x86, fpu: split FPU state from task struct - v5
Split the FPU save area from the task struct. This allows easy migration of FPU context, and it's generally cleaner. It also allows the following two optimizations: 1) only allocate when the application actually uses FPU, so in the first lazy FPU trap. This could save memory for non-fpu using apps. Next patch does this lazy allocation. 2) allocate the right size for the actual cpu rather than 512 bytes always. Patches enabling xsave/xrstor support (coming shortly) will take advantage of this. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c31
1 files changed, 25 insertions, 6 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 9c042f901570..44a18192c420 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -132,6 +132,10 @@ void __put_task_struct(struct task_struct *tsk)
132 free_task(tsk); 132 free_task(tsk);
133} 133}
134 134
135void __attribute__((weak)) arch_task_cache_init(void)
136{
137}
138
135void __init fork_init(unsigned long mempages) 139void __init fork_init(unsigned long mempages)
136{ 140{
137#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 141#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -144,6 +148,9 @@ void __init fork_init(unsigned long mempages)
144 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); 148 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
145#endif 149#endif
146 150
151 /* do the arch specific task caches init */
152 arch_task_cache_init();
153
147 /* 154 /*
148 * The default maximum number of threads is set to a safe 155 * The default maximum number of threads is set to a safe
149 * value: the thread structures can take up at most half 156 * value: the thread structures can take up at most half
@@ -163,6 +170,13 @@ void __init fork_init(unsigned long mempages)
163 init_task.signal->rlim[RLIMIT_NPROC]; 170 init_task.signal->rlim[RLIMIT_NPROC];
164} 171}
165 172
173int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
174 struct task_struct *src)
175{
176 *dst = *src;
177 return 0;
178}
179
166static struct task_struct *dup_task_struct(struct task_struct *orig) 180static struct task_struct *dup_task_struct(struct task_struct *orig)
167{ 181{
168 struct task_struct *tsk; 182 struct task_struct *tsk;
@@ -181,15 +195,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
181 return NULL; 195 return NULL;
182 } 196 }
183 197
184 *tsk = *orig; 198 err = arch_dup_task_struct(tsk, orig);
199 if (err)
200 goto out;
201
185 tsk->stack = ti; 202 tsk->stack = ti;
186 203
187 err = prop_local_init_single(&tsk->dirties); 204 err = prop_local_init_single(&tsk->dirties);
188 if (err) { 205 if (err)
189 free_thread_info(ti); 206 goto out;
190 free_task_struct(tsk);
191 return NULL;
192 }
193 207
194 setup_thread_stack(tsk, orig); 208 setup_thread_stack(tsk, orig);
195 209
@@ -205,6 +219,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
205#endif 219#endif
206 tsk->splice_pipe = NULL; 220 tsk->splice_pipe = NULL;
207 return tsk; 221 return tsk;
222
223out:
224 free_thread_info(ti);
225 free_task_struct(tsk);
226 return NULL;
208} 227}
209 228
210#ifdef CONFIG_MMU 229#ifdef CONFIG_MMU