aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2013-06-26 01:05:39 -0400
committerIngo Molnar <mingo@kernel.org>2013-06-27 04:07:22 -0400
commit141965c7494d984b2bf24efd361a3125278869c6 (patch)
treead6808fcea54cf0b6e42915b09b07d9183817654 /kernel/sched
parentbe7002e6c613d22976f2b8d4bae6121a5fc0433a (diff)
Revert "sched: Introduce temporary FAIR_GROUP_SCHED dependency for load-tracking"
Remove CONFIG_FAIR_GROUP_SCHED that covers the runnable info, then we can use runnable load variables. Also remove 2 CONFIG_FAIR_GROUP_SCHED setting which is not in reverted patch(introduced in 9ee474f), but also need to revert. Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/51CA76A3.3050207@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/fair.c17
-rw-r--r--kernel/sched/sched.h19
3 files changed, 7 insertions, 36 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ceeaf0f45be0..0241b1b55a04 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1611,12 +1611,7 @@ static void __sched_fork(struct task_struct *p)
1611 p->se.vruntime = 0; 1611 p->se.vruntime = 0;
1612 INIT_LIST_HEAD(&p->se.group_node); 1612 INIT_LIST_HEAD(&p->se.group_node);
1613 1613
1614/* 1614#ifdef CONFIG_SMP
1615 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1616 * removed when useful for applications beyond shares distribution (e.g.
1617 * load-balance).
1618 */
1619#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1620 p->se.avg.runnable_avg_period = 0; 1615 p->se.avg.runnable_avg_period = 0;
1621 p->se.avg.runnable_avg_sum = 0; 1616 p->se.avg.runnable_avg_sum = 0;
1622#endif 1617#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c0ac2c3b56e1..36eadaaa4e5b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1128,8 +1128,7 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
1128} 1128}
1129#endif /* CONFIG_FAIR_GROUP_SCHED */ 1129#endif /* CONFIG_FAIR_GROUP_SCHED */
1130 1130
1131/* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */ 1131#ifdef CONFIG_SMP
1132#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1133/* 1132/*
1134 * We choose a half-life close to 1 scheduling period. 1133 * We choose a half-life close to 1 scheduling period.
1135 * Note: The tables below are dependent on this value. 1134 * Note: The tables below are dependent on this value.
@@ -3431,12 +3430,6 @@ unlock:
3431} 3430}
3432 3431
3433/* 3432/*
3434 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
3435 * removed when useful for applications beyond shares distribution (e.g.
3436 * load-balance).
3437 */
3438#ifdef CONFIG_FAIR_GROUP_SCHED
3439/*
3440 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and 3433 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3441 * cfs_rq_of(p) references at time of call are still valid and identify the 3434 * cfs_rq_of(p) references at time of call are still valid and identify the
3442 * previous cpu. However, the caller only guarantees p->pi_lock is held; no 3435 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
@@ -3459,7 +3452,6 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3459 atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); 3452 atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
3460 } 3453 }
3461} 3454}
3462#endif
3463#endif /* CONFIG_SMP */ 3455#endif /* CONFIG_SMP */
3464 3456
3465static unsigned long 3457static unsigned long
@@ -5861,7 +5853,7 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
5861 se->vruntime -= cfs_rq->min_vruntime; 5853 se->vruntime -= cfs_rq->min_vruntime;
5862 } 5854 }
5863 5855
5864#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) 5856#ifdef CONFIG_SMP
5865 /* 5857 /*
5866 * Remove our load from contribution when we leave sched_fair 5858 * Remove our load from contribution when we leave sched_fair
5867 * and ensure we don't carry in an old decay_count if we 5859 * and ensure we don't carry in an old decay_count if we
@@ -5920,7 +5912,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
5920#ifndef CONFIG_64BIT 5912#ifndef CONFIG_64BIT
5921 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 5913 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5922#endif 5914#endif
5923#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) 5915#ifdef CONFIG_SMP
5924 atomic64_set(&cfs_rq->decay_counter, 1); 5916 atomic64_set(&cfs_rq->decay_counter, 1);
5925 atomic64_set(&cfs_rq->removed_load, 0); 5917 atomic64_set(&cfs_rq->removed_load, 0);
5926#endif 5918#endif
@@ -6162,9 +6154,8 @@ const struct sched_class fair_sched_class = {
6162 6154
6163#ifdef CONFIG_SMP 6155#ifdef CONFIG_SMP
6164 .select_task_rq = select_task_rq_fair, 6156 .select_task_rq = select_task_rq_fair,
6165#ifdef CONFIG_FAIR_GROUP_SCHED
6166 .migrate_task_rq = migrate_task_rq_fair, 6157 .migrate_task_rq = migrate_task_rq_fair,
6167#endif 6158
6168 .rq_online = rq_online_fair, 6159 .rq_online = rq_online_fair,
6169 .rq_offline = rq_offline_fair, 6160 .rq_offline = rq_offline_fair,
6170 6161
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 029601a61587..77ce668ba302 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -269,12 +269,6 @@ struct cfs_rq {
269#endif 269#endif
270 270
271#ifdef CONFIG_SMP 271#ifdef CONFIG_SMP
272/*
273 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
274 * removed when useful for applications beyond shares distribution (e.g.
275 * load-balance).
276 */
277#ifdef CONFIG_FAIR_GROUP_SCHED
278 /* 272 /*
279 * CFS Load tracking 273 * CFS Load tracking
280 * Under CFS, load is tracked on a per-entity basis and aggregated up. 274 * Under CFS, load is tracked on a per-entity basis and aggregated up.
@@ -284,9 +278,9 @@ struct cfs_rq {
284 u64 runnable_load_avg, blocked_load_avg; 278 u64 runnable_load_avg, blocked_load_avg;
285 atomic64_t decay_counter, removed_load; 279 atomic64_t decay_counter, removed_load;
286 u64 last_decay; 280 u64 last_decay;
287#endif /* CONFIG_FAIR_GROUP_SCHED */ 281
288/* These always depend on CONFIG_FAIR_GROUP_SCHED */
289#ifdef CONFIG_FAIR_GROUP_SCHED 282#ifdef CONFIG_FAIR_GROUP_SCHED
283 /* Required to track per-cpu representation of a task_group */
290 u32 tg_runnable_contrib; 284 u32 tg_runnable_contrib;
291 u64 tg_load_contrib; 285 u64 tg_load_contrib;
292#endif /* CONFIG_FAIR_GROUP_SCHED */ 286#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -1027,17 +1021,8 @@ extern void update_group_power(struct sched_domain *sd, int cpu);
1027extern void trigger_load_balance(struct rq *rq, int cpu); 1021extern void trigger_load_balance(struct rq *rq, int cpu);
1028extern void idle_balance(int this_cpu, struct rq *this_rq); 1022extern void idle_balance(int this_cpu, struct rq *this_rq);
1029 1023
1030/*
1031 * Only depends on SMP, FAIR_GROUP_SCHED may be removed when runnable_avg
1032 * becomes useful in lb
1033 */
1034#if defined(CONFIG_FAIR_GROUP_SCHED)
1035extern void idle_enter_fair(struct rq *this_rq); 1024extern void idle_enter_fair(struct rq *this_rq);
1036extern void idle_exit_fair(struct rq *this_rq); 1025extern void idle_exit_fair(struct rq *this_rq);
1037#else
1038static inline void idle_enter_fair(struct rq *this_rq) {}
1039static inline void idle_exit_fair(struct rq *this_rq) {}
1040#endif
1041 1026
1042#else /* CONFIG_SMP */ 1027#else /* CONFIG_SMP */
1043 1028