aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-01-25 15:08:00 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:00 -0500
commit58e2d4ca581167c2a079f4ee02be2f0bc52e8729 (patch)
tree9a8c8324785800f3577fb897ca3e2ae21ad8c55a /kernel/sched.c
parentec2c507fe8c8fa3c04fc6cb99a382a965c477379 (diff)
sched: group scheduling, change how cpu load is calculated
This patch changes how the cpu load exerted by fair_sched_class tasks is calculated. Load exerted by fair_sched_class tasks on a cpu is now a summation of the group weights, rather than summation of task weights. Weight exerted by a group on a cpu is dependent on the shares allocated to it. This version of patch has a minor impact on code size, but should have no runtime/functional impact for !CONFIG_FAIR_GROUP_SCHED. Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c27
1 files changed, 11 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index cfa695819252..c915f3e6e593 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -886,6 +886,16 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
886static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} 886static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
887#endif 887#endif
888 888
889static inline void inc_cpu_load(struct rq *rq, unsigned long load)
890{
891 update_load_add(&rq->load, load);
892}
893
894static inline void dec_cpu_load(struct rq *rq, unsigned long load)
895{
896 update_load_sub(&rq->load, load);
897}
898
889#include "sched_stats.h" 899#include "sched_stats.h"
890#include "sched_idletask.c" 900#include "sched_idletask.c"
891#include "sched_fair.c" 901#include "sched_fair.c"
@@ -896,26 +906,14 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
896 906
897#define sched_class_highest (&rt_sched_class) 907#define sched_class_highest (&rt_sched_class)
898 908
899static inline void inc_load(struct rq *rq, const struct task_struct *p)
900{
901 update_load_add(&rq->load, p->se.load.weight);
902}
903
904static inline void dec_load(struct rq *rq, const struct task_struct *p)
905{
906 update_load_sub(&rq->load, p->se.load.weight);
907}
908
909static void inc_nr_running(struct task_struct *p, struct rq *rq) 909static void inc_nr_running(struct task_struct *p, struct rq *rq)
910{ 910{
911 rq->nr_running++; 911 rq->nr_running++;
912 inc_load(rq, p);
913} 912}
914 913
915static void dec_nr_running(struct task_struct *p, struct rq *rq) 914static void dec_nr_running(struct task_struct *p, struct rq *rq)
916{ 915{
917 rq->nr_running--; 916 rq->nr_running--;
918 dec_load(rq, p);
919} 917}
920 918
921static void set_load_weight(struct task_struct *p) 919static void set_load_weight(struct task_struct *p)
@@ -4087,10 +4085,8 @@ void set_user_nice(struct task_struct *p, long nice)
4087 goto out_unlock; 4085 goto out_unlock;
4088 } 4086 }
4089 on_rq = p->se.on_rq; 4087 on_rq = p->se.on_rq;
4090 if (on_rq) { 4088 if (on_rq)
4091 dequeue_task(rq, p, 0); 4089 dequeue_task(rq, p, 0);
4092 dec_load(rq, p);
4093 }
4094 4090
4095 p->static_prio = NICE_TO_PRIO(nice); 4091 p->static_prio = NICE_TO_PRIO(nice);
4096 set_load_weight(p); 4092 set_load_weight(p);
@@ -4100,7 +4096,6 @@ void set_user_nice(struct task_struct *p, long nice)
4100 4096
4101 if (on_rq) { 4097 if (on_rq) {
4102 enqueue_task(rq, p, 0); 4098 enqueue_task(rq, p, 0);
4103 inc_load(rq, p);
4104 /* 4099 /*
4105 * If the task increased its priority or is running and 4100 * If the task increased its priority or is running and
4106 * lowered its priority, then reschedule its CPU: 4101 * lowered its priority, then reschedule its CPU: