diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:39 -0400 |
commit | a8a51d5e59561aa5b4d66e19eca819b537783e8f (patch) | |
tree | b400bc77244a742e737bb9deb94a6911a769e082 /kernel/sched.c | |
parent | 039a1c41b3a489e34593ea1e1687f6fdad6b13ab (diff) |
sched: persistent average load per task
Remove the fall-back to SCHED_LOAD_SCALE by remembering the previous value of
cpu_avg_load_per_task() - this is useful because of the hierarchical group
model in which task weight can be much smaller.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 25 |
1 files changed, 12 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 39d5495540d2..6a6b0139eb32 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -554,6 +554,8 @@ struct rq { | |||
554 | int cpu; | 554 | int cpu; |
555 | int online; | 555 | int online; |
556 | 556 | ||
557 | unsigned long avg_load_per_task; | ||
558 | |||
557 | struct task_struct *migration_thread; | 559 | struct task_struct *migration_thread; |
558 | struct list_head migration_queue; | 560 | struct list_head migration_queue; |
559 | #endif | 561 | #endif |
@@ -1427,9 +1429,18 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) | |||
1427 | #ifdef CONFIG_SMP | 1429 | #ifdef CONFIG_SMP |
1428 | static unsigned long source_load(int cpu, int type); | 1430 | static unsigned long source_load(int cpu, int type); |
1429 | static unsigned long target_load(int cpu, int type); | 1431 | static unsigned long target_load(int cpu, int type); |
1430 | static unsigned long cpu_avg_load_per_task(int cpu); | ||
1431 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | 1432 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); |
1432 | 1433 | ||
1434 | static unsigned long cpu_avg_load_per_task(int cpu) | ||
1435 | { | ||
1436 | struct rq *rq = cpu_rq(cpu); | ||
1437 | |||
1438 | if (rq->nr_running) | ||
1439 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | ||
1440 | |||
1441 | return rq->avg_load_per_task; | ||
1442 | } | ||
1443 | |||
1433 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1444 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1434 | 1445 | ||
1435 | typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); | 1446 | typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); |
@@ -2011,18 +2022,6 @@ static unsigned long target_load(int cpu, int type) | |||
2011 | } | 2022 | } |
2012 | 2023 | ||
2013 | /* | 2024 | /* |
2014 | * Return the average load per task on the cpu's run queue | ||
2015 | */ | ||
2016 | static unsigned long cpu_avg_load_per_task(int cpu) | ||
2017 | { | ||
2018 | struct rq *rq = cpu_rq(cpu); | ||
2019 | unsigned long total = weighted_cpuload(cpu); | ||
2020 | unsigned long n = rq->nr_running; | ||
2021 | |||
2022 | return n ? total / n : SCHED_LOAD_SCALE; | ||
2023 | } | ||
2024 | |||
2025 | /* | ||
2026 | * find_idlest_group finds and returns the least busy CPU group within the | 2025 | * find_idlest_group finds and returns the least busy CPU group within the |
2027 | * domain. | 2026 | * domain. |
2028 | */ | 2027 | */ |