aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2013-06-26 01:05:39 -0400
committerIngo Molnar <mingo@kernel.org>2013-06-27 04:07:22 -0400
commit141965c7494d984b2bf24efd361a3125278869c6 (patch)
treead6808fcea54cf0b6e42915b09b07d9183817654 /kernel/sched/fair.c
parentbe7002e6c613d22976f2b8d4bae6121a5fc0433a (diff)
Revert "sched: Introduce temporary FAIR_GROUP_SCHED dependency for load-tracking"
Remove CONFIG_FAIR_GROUP_SCHED that covers the runnable info, then we can use runnable load variables. Also remove 2 CONFIG_FAIR_GROUP_SCHED setting which is not in reverted patch(introduced in 9ee474f), but also need to revert. Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/51CA76A3.3050207@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c0ac2c3b56e1..36eadaaa4e5b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1128,8 +1128,7 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
1128} 1128}
1129#endif /* CONFIG_FAIR_GROUP_SCHED */ 1129#endif /* CONFIG_FAIR_GROUP_SCHED */
1130 1130
1131/* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */ 1131#ifdef CONFIG_SMP
1132#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1133/* 1132/*
1134 * We choose a half-life close to 1 scheduling period. 1133 * We choose a half-life close to 1 scheduling period.
1135 * Note: The tables below are dependent on this value. 1134 * Note: The tables below are dependent on this value.
@@ -3431,12 +3430,6 @@ unlock:
3431} 3430}
3432 3431
3433/* 3432/*
3434 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
3435 * removed when useful for applications beyond shares distribution (e.g.
3436 * load-balance).
3437 */
3438#ifdef CONFIG_FAIR_GROUP_SCHED
3439/*
3440 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and 3433 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3441 * cfs_rq_of(p) references at time of call are still valid and identify the 3434 * cfs_rq_of(p) references at time of call are still valid and identify the
3442 * previous cpu. However, the caller only guarantees p->pi_lock is held; no 3435 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
@@ -3459,7 +3452,6 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3459 atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); 3452 atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
3460 } 3453 }
3461} 3454}
3462#endif
3463#endif /* CONFIG_SMP */ 3455#endif /* CONFIG_SMP */
3464 3456
3465static unsigned long 3457static unsigned long
@@ -5861,7 +5853,7 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
5861 se->vruntime -= cfs_rq->min_vruntime; 5853 se->vruntime -= cfs_rq->min_vruntime;
5862 } 5854 }
5863 5855
5864#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) 5856#ifdef CONFIG_SMP
5865 /* 5857 /*
5866 * Remove our load from contribution when we leave sched_fair 5858 * Remove our load from contribution when we leave sched_fair
5867 * and ensure we don't carry in an old decay_count if we 5859 * and ensure we don't carry in an old decay_count if we
@@ -5920,7 +5912,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
5920#ifndef CONFIG_64BIT 5912#ifndef CONFIG_64BIT
5921 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 5913 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5922#endif 5914#endif
5923#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) 5915#ifdef CONFIG_SMP
5924 atomic64_set(&cfs_rq->decay_counter, 1); 5916 atomic64_set(&cfs_rq->decay_counter, 1);
5925 atomic64_set(&cfs_rq->removed_load, 0); 5917 atomic64_set(&cfs_rq->removed_load, 0);
5926#endif 5918#endif
@@ -6162,9 +6154,8 @@ const struct sched_class fair_sched_class = {
6162 6154
6163#ifdef CONFIG_SMP 6155#ifdef CONFIG_SMP
6164 .select_task_rq = select_task_rq_fair, 6156 .select_task_rq = select_task_rq_fair,
6165#ifdef CONFIG_FAIR_GROUP_SCHED
6166 .migrate_task_rq = migrate_task_rq_fair, 6157 .migrate_task_rq = migrate_task_rq_fair,
6167#endif 6158
6168 .rq_online = rq_online_fair, 6159 .rq_online = rq_online_fair,
6169 .rq_offline = rq_offline_fair, 6160 .rq_offline = rq_offline_fair,
6170 6161