aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorakpm@osdl.org <akpm@osdl.org>2006-01-12 04:05:32 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-12 12:08:50 -0500
commitd7102e95b7b9c00277562c29aad421d2d521c5f6 (patch)
tree3ad3d94c329095962c6cd6dcea41e1ccf2db5a7e
parent198e2f181163233b379dc7ce8a6d7516b84042e7 (diff)
[PATCH] sched: filter affine wakeups
) From: Nick Piggin <nickpiggin@yahoo.com.au> Track the last waker CPU, and only consider wakeup-balancing if there's a match between current waker CPU and the previous waker CPU. This ensures that there is some correlation between two subsequent wakeup events before we move the task. Should help random-wakeup workloads on large SMP systems, by reducing the migration attempts by a factor of nr_cpus. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/sched.h5
-rw-r--r--kernel/sched.c10
2 files changed, 13 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5d6b9228bba9..b5ef92a043a6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -696,9 +696,12 @@ struct task_struct {
696 696
697 int lock_depth; /* BKL lock depth */ 697 int lock_depth; /* BKL lock depth */
698 698
699#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 699#if defined(CONFIG_SMP)
700 int last_waker_cpu; /* CPU that last woke this task up */
701#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
700 int oncpu; 702 int oncpu;
701#endif 703#endif
704#endif
702 int prio, static_prio; 705 int prio, static_prio;
703 struct list_head run_list; 706 struct list_head run_list;
704 prio_array_t *array; 707 prio_array_t *array;
diff --git a/kernel/sched.c b/kernel/sched.c
index 98461de1ab65..c9dec2aa1976 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1290,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
1290 } 1290 }
1291 } 1291 }
1292 1292
1293 if (p->last_waker_cpu != this_cpu)
1294 goto out_set_cpu;
1295
1293 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1296 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1294 goto out_set_cpu; 1297 goto out_set_cpu;
1295 1298
@@ -1360,6 +1363,8 @@ out_set_cpu:
1360 cpu = task_cpu(p); 1363 cpu = task_cpu(p);
1361 } 1364 }
1362 1365
1366 p->last_waker_cpu = this_cpu;
1367
1363out_activate: 1368out_activate:
1364#endif /* CONFIG_SMP */ 1369#endif /* CONFIG_SMP */
1365 if (old_state == TASK_UNINTERRUPTIBLE) { 1370 if (old_state == TASK_UNINTERRUPTIBLE) {
@@ -1441,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
1441#ifdef CONFIG_SCHEDSTATS 1446#ifdef CONFIG_SCHEDSTATS
1442 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1447 memset(&p->sched_info, 0, sizeof(p->sched_info));
1443#endif 1448#endif
1444#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 1449#if defined(CONFIG_SMP)
1450 p->last_waker_cpu = cpu;
1451#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
1445 p->oncpu = 0; 1452 p->oncpu = 0;
1446#endif 1453#endif
1454#endif
1447#ifdef CONFIG_PREEMPT 1455#ifdef CONFIG_PREEMPT
1448 /* Want to start with kernel preemption disabled. */ 1456 /* Want to start with kernel preemption disabled. */
1449 task_thread_info(p)->preempt_count = 1; 1457 task_thread_info(p)->preempt_count = 1;