aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-01-25 15:07:59 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:07:59 -0500
commit93f992ccc008dd4030381caeebb252e85e66684b (patch)
tree34c177cc9de4eee560aee07c08a1fde59b37ed37 /kernel/sched.c
parent86faf39d0fc04272b05fab1db6d683f3ac7199d1 (diff)
sched: group scheduling code cleanup
Minor cleanups: - Fix coding style - remove obsolete comment Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e76b11ca6df3..7f827b70ae02 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -191,12 +191,12 @@ struct task_group init_task_group = {
191}; 191};
192 192
193#ifdef CONFIG_FAIR_USER_SCHED 193#ifdef CONFIG_FAIR_USER_SCHED
194# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD 194# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD
195#else 195#else
196# define INIT_TASK_GRP_LOAD NICE_0_LOAD 196# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
197#endif 197#endif
198 198
199static int init_task_group_load = INIT_TASK_GRP_LOAD; 199static int init_task_group_load = INIT_TASK_GROUP_LOAD;
200 200
201/* return group to which a task belongs */ 201/* return group to which a task belongs */
202static inline struct task_group *task_group(struct task_struct *p) 202static inline struct task_group *task_group(struct task_struct *p)
@@ -881,21 +881,6 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
881 881
882#define sched_class_highest (&rt_sched_class) 882#define sched_class_highest (&rt_sched_class)
883 883
884/*
885 * Update delta_exec, delta_fair fields for rq.
886 *
887 * delta_fair clock advances at a rate inversely proportional to
888 * total load (rq->load.weight) on the runqueue, while
889 * delta_exec advances at the same rate as wall-clock (provided
890 * cpu is not idle).
891 *
892 * delta_exec / delta_fair is a measure of the (smoothened) load on this
893 * runqueue over any given interval. This (smoothened) load is used
894 * during load balance.
895 *
896 * This function is called /before/ updating rq->load
897 * and when switching tasks.
898 */
899static inline void inc_load(struct rq *rq, const struct task_struct *p) 884static inline void inc_load(struct rq *rq, const struct task_struct *p)
900{ 885{
901 update_load_add(&rq->load, p->se.load.weight); 886 update_load_add(&rq->load, p->se.load.weight);