aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 39d5495540d2..6a6b0139eb32 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -554,6 +554,8 @@ struct rq {
554 int cpu; 554 int cpu;
555 int online; 555 int online;
556 556
557 unsigned long avg_load_per_task;
558
557 struct task_struct *migration_thread; 559 struct task_struct *migration_thread;
558 struct list_head migration_queue; 560 struct list_head migration_queue;
559#endif 561#endif
@@ -1427,9 +1429,18 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1427#ifdef CONFIG_SMP 1429#ifdef CONFIG_SMP
1428static unsigned long source_load(int cpu, int type); 1430static unsigned long source_load(int cpu, int type);
1429static unsigned long target_load(int cpu, int type); 1431static unsigned long target_load(int cpu, int type);
1430static unsigned long cpu_avg_load_per_task(int cpu);
1431static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); 1432static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1432 1433
1434static unsigned long cpu_avg_load_per_task(int cpu)
1435{
1436 struct rq *rq = cpu_rq(cpu);
1437
1438 if (rq->nr_running)
1439 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1440
1441 return rq->avg_load_per_task;
1442}
1443
1433#ifdef CONFIG_FAIR_GROUP_SCHED 1444#ifdef CONFIG_FAIR_GROUP_SCHED
1434 1445
1435typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); 1446typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
@@ -2011,18 +2022,6 @@ static unsigned long target_load(int cpu, int type)
2011} 2022}
2012 2023
2013/* 2024/*
2014 * Return the average load per task on the cpu's run queue
2015 */
2016static unsigned long cpu_avg_load_per_task(int cpu)
2017{
2018 struct rq *rq = cpu_rq(cpu);
2019 unsigned long total = weighted_cpuload(cpu);
2020 unsigned long n = rq->nr_running;
2021
2022 return n ? total / n : SCHED_LOAD_SCALE;
2023}
2024
2025/*
2026 * find_idlest_group finds and returns the least busy CPU group within the 2025 * find_idlest_group finds and returns the least busy CPU group within the
2027 * domain. 2026 * domain.
2028 */ 2027 */