aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-02 11:41:40 -0400
committerIngo Molnar <mingo@elte.hu>2007-08-02 11:41:40 -0400
commit9c2172459a47c99adf9c968180a8a57d9ff84efa (patch)
tree24478a1c0f8d32b0b1f18661d42d26e644e40292 /kernel
parentcad60d93e18ba52b6f069b2edb031c89bf603b07 (diff)
[PATCH] sched: move load-calculation functions
move load-calculation functions so that they can use the per-policy declarations and methods. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c132
1 files changed, 66 insertions, 66 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 915c75e5a276..a9d374061a46 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -678,46 +678,6 @@ static void update_load_sub(struct load_weight *lw, unsigned long dec)
678 lw->inv_weight = 0; 678 lw->inv_weight = 0;
679} 679}
680 680
681static void __update_curr_load(struct rq *rq, struct load_stat *ls)
682{
683 if (rq->curr != rq->idle && ls->load.weight) {
684 ls->delta_exec += ls->delta_stat;
685 ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
686 ls->delta_stat = 0;
687 }
688}
689
690/*
691 * Update delta_exec, delta_fair fields for rq.
692 *
693 * delta_fair clock advances at a rate inversely proportional to
694 * total load (rq->ls.load.weight) on the runqueue, while
695 * delta_exec advances at the same rate as wall-clock (provided
696 * cpu is not idle).
697 *
698 * delta_exec / delta_fair is a measure of the (smoothened) load on this
699 * runqueue over any given interval. This (smoothened) load is used
700 * during load balance.
701 *
702 * This function is called /before/ updating rq->ls.load
703 * and when switching tasks.
704 */
705static void update_curr_load(struct rq *rq, u64 now)
706{
707 struct load_stat *ls = &rq->ls;
708 u64 start;
709
710 start = ls->load_update_start;
711 ls->load_update_start = now;
712 ls->delta_stat += now - start;
713 /*
714 * Stagger updates to ls->delta_fair. Very frequent updates
715 * can be expensive.
716 */
717 if (ls->delta_stat >= sysctl_sched_stat_granularity)
718 __update_curr_load(rq, ls);
719}
720
721/* 681/*
722 * To aid in avoiding the subversion of "niceness" due to uneven distribution 682 * To aid in avoiding the subversion of "niceness" due to uneven distribution
723 * of tasks with abnormal "nice" values across CPUs the contribution that 683 * of tasks with abnormal "nice" values across CPUs the contribution that
@@ -768,32 +728,6 @@ static const u32 prio_to_wmult[40] = {
768/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 728/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
769}; 729};
770 730
771static inline void
772inc_load(struct rq *rq, const struct task_struct *p, u64 now)
773{
774 update_curr_load(rq, now);
775 update_load_add(&rq->ls.load, p->se.load.weight);
776}
777
778static inline void
779dec_load(struct rq *rq, const struct task_struct *p, u64 now)
780{
781 update_curr_load(rq, now);
782 update_load_sub(&rq->ls.load, p->se.load.weight);
783}
784
785static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
786{
787 rq->nr_running++;
788 inc_load(rq, p, now);
789}
790
791static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
792{
793 rq->nr_running--;
794 dec_load(rq, p, now);
795}
796
797static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); 731static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
798 732
799/* 733/*
@@ -824,6 +758,72 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
824 758
825#define sched_class_highest (&rt_sched_class) 759#define sched_class_highest (&rt_sched_class)
826 760
761static void __update_curr_load(struct rq *rq, struct load_stat *ls)
762{
763 if (rq->curr != rq->idle && ls->load.weight) {
764 ls->delta_exec += ls->delta_stat;
765 ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
766 ls->delta_stat = 0;
767 }
768}
769
770/*
771 * Update delta_exec, delta_fair fields for rq.
772 *
773 * delta_fair clock advances at a rate inversely proportional to
774 * total load (rq->ls.load.weight) on the runqueue, while
775 * delta_exec advances at the same rate as wall-clock (provided
776 * cpu is not idle).
777 *
778 * delta_exec / delta_fair is a measure of the (smoothened) load on this
779 * runqueue over any given interval. This (smoothened) load is used
780 * during load balance.
781 *
782 * This function is called /before/ updating rq->ls.load
783 * and when switching tasks.
784 */
785static void update_curr_load(struct rq *rq, u64 now)
786{
787 struct load_stat *ls = &rq->ls;
788 u64 start;
789
790 start = ls->load_update_start;
791 ls->load_update_start = now;
792 ls->delta_stat += now - start;
793 /*
794 * Stagger updates to ls->delta_fair. Very frequent updates
795 * can be expensive.
796 */
797 if (ls->delta_stat >= sysctl_sched_stat_granularity)
798 __update_curr_load(rq, ls);
799}
800
801static inline void
802inc_load(struct rq *rq, const struct task_struct *p, u64 now)
803{
804 update_curr_load(rq, now);
805 update_load_add(&rq->ls.load, p->se.load.weight);
806}
807
808static inline void
809dec_load(struct rq *rq, const struct task_struct *p, u64 now)
810{
811 update_curr_load(rq, now);
812 update_load_sub(&rq->ls.load, p->se.load.weight);
813}
814
815static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
816{
817 rq->nr_running++;
818 inc_load(rq, p, now);
819}
820
821static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
822{
823 rq->nr_running--;
824 dec_load(rq, p, now);
825}
826
827static void set_load_weight(struct task_struct *p) 827static void set_load_weight(struct task_struct *p)
828{ 828{
829 task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; 829 task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;