aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 22:36:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 22:36:53 -0400
commit7f9dce38378f0a4a298e885553d6bb7121376376 (patch)
tree5bfd688c9f356f7216bbc3cef3b4c10153de334b /kernel/sched_fair.c
parent26dcce0fabbef75ae426461edf21b5030bad60f3 (diff)
parentba42059fbd0aa1ac91b582412b5fedb1258f241f (diff)
Merge branch 'sched/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: hrtick_enabled() should use cpu_active() sched, x86: clean up hrtick implementation sched: fix build error, provide partition_sched_domains() unconditionally sched: fix warning in inc_rt_tasks() to not declare variable 'rq' if it's not needed cpu hotplug: Make cpu_active_map synchronization dependency clear cpu hotplug, sched: Introduce cpu_active_map and redo sched domain managment (take 2) sched: rework of "prioritize non-migratable tasks over migratable ones" sched: reduce stack size in isolated_cpu_setup() Revert parts of "ftrace: do not trace scheduler functions" Fixed up conflicts in include/asm-x86/thread_info.h (due to the TIF_SINGLESTEP unification vs TIF_HRTICK_RESCHED removal) and kernel/sched_fair.c (due to cpu_active_map vs for_each_cpu_mask_nr() introduction).
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bb61fe26b62c..cf2cd6ce4cb2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
878#ifdef CONFIG_SCHED_HRTICK 878#ifdef CONFIG_SCHED_HRTICK
879static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 879static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
880{ 880{
881 int requeue = rq->curr == p;
882 struct sched_entity *se = &p->se; 881 struct sched_entity *se = &p->se;
883 struct cfs_rq *cfs_rq = cfs_rq_of(se); 882 struct cfs_rq *cfs_rq = cfs_rq_of(se);
884 883
@@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
899 * Don't schedule slices shorter than 10000ns, that just 898 * Don't schedule slices shorter than 10000ns, that just
900 * doesn't make sense. Rely on vruntime for fairness. 899 * doesn't make sense. Rely on vruntime for fairness.
901 */ 900 */
902 if (!requeue) 901 if (rq->curr != p)
903 delta = max(10000LL, delta); 902 delta = max(10000LL, delta);
904 903
905 hrtick_start(rq, delta, requeue); 904 hrtick_start(rq, delta);
906 } 905 }
907} 906}
908#else /* !CONFIG_SCHED_HRTICK */ 907#else /* !CONFIG_SCHED_HRTICK */
@@ -1004,6 +1003,8 @@ static void yield_task_fair(struct rq *rq)
1004 * not idle and an idle cpu is available. The span of cpus to 1003 * not idle and an idle cpu is available. The span of cpus to
1005 * search starts with cpus closest then further out as needed, 1004 * search starts with cpus closest then further out as needed,
1006 * so we always favor a closer, idle cpu. 1005 * so we always favor a closer, idle cpu.
1006 * Domains may include CPUs that are not usable for migration,
1007 * hence we need to mask them out (cpu_active_map)
1007 * 1008 *
1008 * Returns the CPU we should wake onto. 1009 * Returns the CPU we should wake onto.
1009 */ 1010 */
@@ -1031,6 +1032,7 @@ static int wake_idle(int cpu, struct task_struct *p)
1031 || ((sd->flags & SD_WAKE_IDLE_FAR) 1032 || ((sd->flags & SD_WAKE_IDLE_FAR)
1032 && !task_hot(p, task_rq(p)->clock, sd))) { 1033 && !task_hot(p, task_rq(p)->clock, sd))) {
1033 cpus_and(tmp, sd->span, p->cpus_allowed); 1034 cpus_and(tmp, sd->span, p->cpus_allowed);
1035 cpus_and(tmp, tmp, cpu_active_map);
1034 for_each_cpu_mask_nr(i, tmp) { 1036 for_each_cpu_mask_nr(i, tmp) {
1035 if (idle_cpu(i)) { 1037 if (idle_cpu(i)) {
1036 if (i != task_cpu(p)) { 1038 if (i != task_cpu(p)) {