aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
commit53df556e06d85245cf6aacedaba8e4da684859c3 (patch)
tree60095031fd9493b8da74a5ca8813ea48e617f86c /kernel/sched.c
parenta25707f3aef9cf68c341eba5960d580f364e4e6f (diff)
sched: remove precise CPU load calculations #2
continued removal of precise CPU load calculations. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c32
1 files changed, 1 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 25cc9b2a8c15..f6a81061fd50 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -173,8 +173,6 @@ struct rt_prio_array {
173 173
174struct load_stat { 174struct load_stat {
175 struct load_weight load; 175 struct load_weight load;
176 u64 load_update_start, load_update_last;
177 unsigned long delta_fair, delta_exec, delta_stat;
178}; 176};
179 177
180/* CFS-related fields in a runqueue */ 178/* CFS-related fields in a runqueue */
@@ -793,15 +791,6 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
793 791
794#define sched_class_highest (&rt_sched_class) 792#define sched_class_highest (&rt_sched_class)
795 793
796static void __update_curr_load(struct rq *rq, struct load_stat *ls)
797{
798 if (rq->curr != rq->idle && ls->load.weight) {
799 ls->delta_exec += ls->delta_stat;
800 ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
801 ls->delta_stat = 0;
802 }
803}
804
805/* 794/*
806 * Update delta_exec, delta_fair fields for rq. 795 * Update delta_exec, delta_fair fields for rq.
807 * 796 *
@@ -817,31 +806,13 @@ static void __update_curr_load(struct rq *rq, struct load_stat *ls)
817 * This function is called /before/ updating rq->ls.load 806 * This function is called /before/ updating rq->ls.load
818 * and when switching tasks. 807 * and when switching tasks.
819 */ 808 */
820static void update_curr_load(struct rq *rq)
821{
822 struct load_stat *ls = &rq->ls;
823 u64 start;
824
825 start = ls->load_update_start;
826 ls->load_update_start = rq->clock;
827 ls->delta_stat += rq->clock - start;
828 /*
829 * Stagger updates to ls->delta_fair. Very frequent updates
830 * can be expensive.
831 */
832 if (ls->delta_stat)
833 __update_curr_load(rq, ls);
834}
835
836static inline void inc_load(struct rq *rq, const struct task_struct *p) 809static inline void inc_load(struct rq *rq, const struct task_struct *p)
837{ 810{
838 update_curr_load(rq);
839 update_load_add(&rq->ls.load, p->se.load.weight); 811 update_load_add(&rq->ls.load, p->se.load.weight);
840} 812}
841 813
842static inline void dec_load(struct rq *rq, const struct task_struct *p) 814static inline void dec_load(struct rq *rq, const struct task_struct *p)
843{ 815{
844 update_curr_load(rq);
845 update_load_sub(&rq->ls.load, p->se.load.weight); 816 update_load_sub(&rq->ls.load, p->se.load.weight);
846} 817}
847 818
@@ -1972,8 +1943,7 @@ unsigned long nr_active(void)
1972 */ 1943 */
1973static void update_cpu_load(struct rq *this_rq) 1944static void update_cpu_load(struct rq *this_rq)
1974{ 1945{
1975 unsigned long total_load = this_rq->ls.load.weight; 1946 unsigned long this_load = this_rq->ls.load.weight;
1976 unsigned long this_load = total_load;
1977 int i, scale; 1947 int i, scale;
1978 1948
1979 this_rq->nr_load_updates++; 1949 this_rq->nr_load_updates++;