aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2014-02-26 06:19:33 -0500
committerIngo Molnar <mingo@kernel.org>2014-02-27 06:41:00 -0500
commitf5f9739d7a0ccbdcf913a0b3604b134129d14f7e (patch)
tree0a9052c1fb1f503114acb18ebde3669a6be0701d
parent6990566b535908905b4eccda7cc9e09c2db52187 (diff)
sched: Put rq's sched_avg under CONFIG_FAIR_GROUP_SCHED
The struct sched_avg of struct rq is only used in case group scheduling is enabled inside __update_tg_runnable_avg() to update per-cpu representation of a task group. I.e. that there is no need to maintain the runnable avg of a rq in the !CONFIG_FAIR_GROUP_SCHED case. This patch guards struct sched_avg of struct rq and update_rq_runnable_avg() with CONFIG_FAIR_GROUP_SCHED. There is an extra empty definition for update_rq_runnable_avg() necessary for the !CONFIG_FAIR_GROUP_SCHED && CONFIG_SMP case. The function print_cfs_group_stats() which prints out struct sched_avg of struct rq is already guarded with CONFIG_FAIR_GROUP_SCHED. Reviewed-by: Ben Segall <bsegall@google.com> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/530DCDC5.1060406@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/fair.c13
-rw-r--r--kernel/sched/sched.h4
2 files changed, 9 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a3a41c61a2c9..be4f7d9eaf03 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2374,12 +2374,19 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
2374 se->avg.load_avg_contrib >>= NICE_0_SHIFT; 2374 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2375 } 2375 }
2376} 2376}
2377
2378static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2379{
2380 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2381 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2382}
2377#else /* CONFIG_FAIR_GROUP_SCHED */ 2383#else /* CONFIG_FAIR_GROUP_SCHED */
2378static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, 2384static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2379 int force_update) {} 2385 int force_update) {}
2380static inline void __update_tg_runnable_avg(struct sched_avg *sa, 2386static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2381 struct cfs_rq *cfs_rq) {} 2387 struct cfs_rq *cfs_rq) {}
2382static inline void __update_group_entity_contrib(struct sched_entity *se) {} 2388static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2389static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2383#endif /* CONFIG_FAIR_GROUP_SCHED */ 2390#endif /* CONFIG_FAIR_GROUP_SCHED */
2384 2391
2385static inline void __update_task_entity_contrib(struct sched_entity *se) 2392static inline void __update_task_entity_contrib(struct sched_entity *se)
@@ -2478,12 +2485,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
2478 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update); 2485 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
2479} 2486}
2480 2487
2481static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2482{
2483 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2484 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2485}
2486
2487/* Add the load generated by se into cfs_rq's child load-average */ 2488/* Add the load generated by se into cfs_rq's child load-average */
2488static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, 2489static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2489 struct sched_entity *se, 2490 struct sched_entity *se,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d608125b36ef..046084ebb1fb 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -541,6 +541,8 @@ struct rq {
541#ifdef CONFIG_FAIR_GROUP_SCHED 541#ifdef CONFIG_FAIR_GROUP_SCHED
542 /* list of leaf cfs_rq on this cpu: */ 542 /* list of leaf cfs_rq on this cpu: */
543 struct list_head leaf_cfs_rq_list; 543 struct list_head leaf_cfs_rq_list;
544
545 struct sched_avg avg;
544#endif /* CONFIG_FAIR_GROUP_SCHED */ 546#endif /* CONFIG_FAIR_GROUP_SCHED */
545 547
546 /* 548 /*
@@ -630,8 +632,6 @@ struct rq {
630#ifdef CONFIG_SMP 632#ifdef CONFIG_SMP
631 struct llist_head wake_list; 633 struct llist_head wake_list;
632#endif 634#endif
633
634 struct sched_avg avg;
635}; 635};
636 636
637static inline int cpu_of(struct rq *rq) 637static inline int cpu_of(struct rq *rq)