aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-01-25 15:08:00 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:00 -0500
commit58e2d4ca581167c2a079f4ee02be2f0bc52e8729 (patch)
tree9a8c8324785800f3577fb897ca3e2ae21ad8c55a /kernel
parentec2c507fe8c8fa3c04fc6cb99a382a965c477379 (diff)
sched: group scheduling, change how cpu load is calculated
This patch changes how the cpu load exerted by fair_sched_class tasks is calculated. Load exerted by fair_sched_class tasks on a cpu is now a summation of the group weights, rather than summation of task weights. Weight exerted by a group on a cpu is dependent on the shares allocated to it. This version of patch has a minor impact on code size, but should have no runtime/functional impact for !CONFIG_FAIR_GROUP_SCHED. Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c27
-rw-r--r--kernel/sched_fair.c31
-rw-r--r--kernel/sched_rt.c2
3 files changed, 40 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index cfa695819252..c915f3e6e593 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -886,6 +886,16 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
886static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} 886static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
887#endif 887#endif
888 888
889static inline void inc_cpu_load(struct rq *rq, unsigned long load)
890{
891 update_load_add(&rq->load, load);
892}
893
894static inline void dec_cpu_load(struct rq *rq, unsigned long load)
895{
896 update_load_sub(&rq->load, load);
897}
898
889#include "sched_stats.h" 899#include "sched_stats.h"
890#include "sched_idletask.c" 900#include "sched_idletask.c"
891#include "sched_fair.c" 901#include "sched_fair.c"
@@ -896,26 +906,14 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
896 906
897#define sched_class_highest (&rt_sched_class) 907#define sched_class_highest (&rt_sched_class)
898 908
899static inline void inc_load(struct rq *rq, const struct task_struct *p)
900{
901 update_load_add(&rq->load, p->se.load.weight);
902}
903
904static inline void dec_load(struct rq *rq, const struct task_struct *p)
905{
906 update_load_sub(&rq->load, p->se.load.weight);
907}
908
909static void inc_nr_running(struct task_struct *p, struct rq *rq) 909static void inc_nr_running(struct task_struct *p, struct rq *rq)
910{ 910{
911 rq->nr_running++; 911 rq->nr_running++;
912 inc_load(rq, p);
913} 912}
914 913
915static void dec_nr_running(struct task_struct *p, struct rq *rq) 914static void dec_nr_running(struct task_struct *p, struct rq *rq)
916{ 915{
917 rq->nr_running--; 916 rq->nr_running--;
918 dec_load(rq, p);
919} 917}
920 918
921static void set_load_weight(struct task_struct *p) 919static void set_load_weight(struct task_struct *p)
@@ -4087,10 +4085,8 @@ void set_user_nice(struct task_struct *p, long nice)
4087 goto out_unlock; 4085 goto out_unlock;
4088 } 4086 }
4089 on_rq = p->se.on_rq; 4087 on_rq = p->se.on_rq;
4090 if (on_rq) { 4088 if (on_rq)
4091 dequeue_task(rq, p, 0); 4089 dequeue_task(rq, p, 0);
4092 dec_load(rq, p);
4093 }
4094 4090
4095 p->static_prio = NICE_TO_PRIO(nice); 4091 p->static_prio = NICE_TO_PRIO(nice);
4096 set_load_weight(p); 4092 set_load_weight(p);
@@ -4100,7 +4096,6 @@ void set_user_nice(struct task_struct *p, long nice)
4100 4096
4101 if (on_rq) { 4097 if (on_rq) {
4102 enqueue_task(rq, p, 0); 4098 enqueue_task(rq, p, 0);
4103 inc_load(rq, p);
4104 /* 4099 /*
4105 * If the task increased its priority or is running and 4100 * If the task increased its priority or is running and
4106 * lowered its priority, then reschedule its CPU: 4101 * lowered its priority, then reschedule its CPU:
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0c5fdce67228..30ae9c2a2861 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -760,15 +760,26 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
760static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) 760static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
761{ 761{
762 struct cfs_rq *cfs_rq; 762 struct cfs_rq *cfs_rq;
763 struct sched_entity *se = &p->se; 763 struct sched_entity *se = &p->se,
764 *topse = NULL; /* Highest schedulable entity */
765 int incload = 1;
764 766
765 for_each_sched_entity(se) { 767 for_each_sched_entity(se) {
766 if (se->on_rq) 768 topse = se;
769 if (se->on_rq) {
770 incload = 0;
767 break; 771 break;
772 }
768 cfs_rq = cfs_rq_of(se); 773 cfs_rq = cfs_rq_of(se);
769 enqueue_entity(cfs_rq, se, wakeup); 774 enqueue_entity(cfs_rq, se, wakeup);
770 wakeup = 1; 775 wakeup = 1;
771 } 776 }
777 /* Increment cpu load if we just enqueued the first task of a group on
778 * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
779 * at the highest grouping level.
780 */
781 if (incload)
782 inc_cpu_load(rq, topse->load.weight);
772} 783}
773 784
774/* 785/*
@@ -779,16 +790,28 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
779static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) 790static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
780{ 791{
781 struct cfs_rq *cfs_rq; 792 struct cfs_rq *cfs_rq;
782 struct sched_entity *se = &p->se; 793 struct sched_entity *se = &p->se,
794 *topse = NULL; /* Highest schedulable entity */
795 int decload = 1;
783 796
784 for_each_sched_entity(se) { 797 for_each_sched_entity(se) {
798 topse = se;
785 cfs_rq = cfs_rq_of(se); 799 cfs_rq = cfs_rq_of(se);
786 dequeue_entity(cfs_rq, se, sleep); 800 dequeue_entity(cfs_rq, se, sleep);
787 /* Don't dequeue parent if it has other entities besides us */ 801 /* Don't dequeue parent if it has other entities besides us */
788 if (cfs_rq->load.weight) 802 if (cfs_rq->load.weight) {
803 if (parent_entity(se))
804 decload = 0;
789 break; 805 break;
806 }
790 sleep = 1; 807 sleep = 1;
791 } 808 }
809 /* Decrement cpu load if we just dequeued the last task of a group on
810 * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
811 * at the highest grouping level.
812 */
813 if (decload)
814 dec_cpu_load(rq, topse->load.weight);
792} 815}
793 816
794/* 817/*
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 9ba3daa03475..cefcd5105146 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -32,6 +32,7 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
32 32
33 list_add_tail(&p->run_list, array->queue + p->prio); 33 list_add_tail(&p->run_list, array->queue + p->prio);
34 __set_bit(p->prio, array->bitmap); 34 __set_bit(p->prio, array->bitmap);
35 inc_cpu_load(rq, p->se.load.weight);
35} 36}
36 37
37/* 38/*
@@ -46,6 +47,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
46 list_del(&p->run_list); 47 list_del(&p->run_list);
47 if (list_empty(array->queue + p->prio)) 48 if (list_empty(array->queue + p->prio))
48 __clear_bit(p->prio, array->bitmap); 49 __clear_bit(p->prio, array->bitmap);
50 dec_cpu_load(rq, p->se.load.weight);
49} 51}
50 52
51/* 53/*