aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2006-05-09 17:14:28 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-05-09 17:14:28 -0400
commit1929ab8c6860a4a94109eed038b0fa9d12c81721 (patch)
tree42d3319df9d94cda4a514762f393c277cbdea5d3 /arch/arm/kernel
parentf9d8f063fee645a23776519fb5c910b9d9435270 (diff)
[ARM] Fix thread struct allocator for SMP case
The ARM thread struct allocator is racy on SMP systems. Fix it by turning it into a per-cpu based allocator. This also allows keeps the cache cache warm for thread structs and kernel stacks. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/process.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1ff75cee4b0d..1a1539e3a946 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -264,8 +264,12 @@ void show_fpregs(struct user_fp *regs)
264/* 264/*
265 * Task structure and kernel stack allocation. 265 * Task structure and kernel stack allocation.
266 */ 266 */
267static unsigned long *thread_info_head; 267struct thread_info_list {
268static unsigned int nr_thread_info; 268 unsigned long *head;
269 unsigned int nr;
270};
271
272static DEFINE_PER_CPU(struct thread_info_list, thread_info_list) = { NULL, 0 };
269 273
270#define EXTRA_TASK_STRUCT 4 274#define EXTRA_TASK_STRUCT 4
271 275
@@ -274,12 +278,15 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
274 struct thread_info *thread = NULL; 278 struct thread_info *thread = NULL;
275 279
276 if (EXTRA_TASK_STRUCT) { 280 if (EXTRA_TASK_STRUCT) {
277 unsigned long *p = thread_info_head; 281 struct thread_info_list *th = &get_cpu_var(thread_info_list);
282 unsigned long *p = th->head;
278 283
279 if (p) { 284 if (p) {
280 thread_info_head = (unsigned long *)p[0]; 285 th->head = (unsigned long *)p[0];
281 nr_thread_info -= 1; 286 th->nr -= 1;
282 } 287 }
288 put_cpu_var(thread_info_list);
289
283 thread = (struct thread_info *)p; 290 thread = (struct thread_info *)p;
284 } 291 }
285 292
@@ -300,13 +307,19 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
300 307
301void free_thread_info(struct thread_info *thread) 308void free_thread_info(struct thread_info *thread)
302{ 309{
303 if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) { 310 if (EXTRA_TASK_STRUCT) {
304 unsigned long *p = (unsigned long *)thread; 311 struct thread_info_list *th = &get_cpu_var(thread_info_list);
305 p[0] = (unsigned long)thread_info_head; 312 if (th->nr < EXTRA_TASK_STRUCT) {
306 thread_info_head = p; 313 unsigned long *p = (unsigned long *)thread;
307 nr_thread_info += 1; 314 p[0] = th->head;
308 } else 315 th->head = p;
309 free_pages((unsigned long)thread, THREAD_SIZE_ORDER); 316 th->nr += 1;
317 put_cpu_var(thread_info_list);
318 return;
319 }
320 put_cpu_var(thread_info_list);
321 }
322 free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
310} 323}
311 324
312/* 325/*