aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-02 11:41:40 -0400
committerIngo Molnar <mingo@elte.hu>2007-08-02 11:41:40 -0400
commitcad60d93e18ba52b6f069b2edb031c89bf603b07 (patch)
treedfe74c165e7607c233d223614ef400163c6ba44c
parent4e6f96f313561d86d248edf0eaff2336d8217e1b (diff)
[PATCH] sched: ->task_new cleanup
make sched_class.task_new == NULL a 'default method', this allows the removal of task_rt_new. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c11
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/sched_rt.c10
4 files changed, 10 insertions, 17 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 81eec7e36c84..c9e0c2a6a950 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -874,7 +874,7 @@ struct sched_class {
874 874
875 void (*set_curr_task) (struct rq *rq); 875 void (*set_curr_task) (struct rq *rq);
876 void (*task_tick) (struct rq *rq, struct task_struct *p); 876 void (*task_tick) (struct rq *rq, struct task_struct *p);
877 void (*task_new) (struct rq *rq, struct task_struct *p); 877 void (*task_new) (struct rq *rq, struct task_struct *p, u64 now);
878}; 878};
879 879
880struct load_weight { 880struct load_weight {
diff --git a/kernel/sched.c b/kernel/sched.c
index 7bed2c58b986..915c75e5a276 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1641,22 +1641,27 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1641 unsigned long flags; 1641 unsigned long flags;
1642 struct rq *rq; 1642 struct rq *rq;
1643 int this_cpu; 1643 int this_cpu;
1644 u64 now;
1644 1645
1645 rq = task_rq_lock(p, &flags); 1646 rq = task_rq_lock(p, &flags);
1646 BUG_ON(p->state != TASK_RUNNING); 1647 BUG_ON(p->state != TASK_RUNNING);
1647 this_cpu = smp_processor_id(); /* parent's CPU */ 1648 this_cpu = smp_processor_id(); /* parent's CPU */
1649 now = rq_clock(rq);
1648 1650
1649 p->prio = effective_prio(p); 1651 p->prio = effective_prio(p);
1650 1652
1651 if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) || 1653 if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
1652 task_cpu(p) != this_cpu || !current->se.on_rq) { 1654 (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
1655 !current->se.on_rq) {
1656
1653 activate_task(rq, p, 0); 1657 activate_task(rq, p, 0);
1654 } else { 1658 } else {
1655 /* 1659 /*
1656 * Let the scheduling class do new task startup 1660 * Let the scheduling class do new task startup
1657 * management (if any): 1661 * management (if any):
1658 */ 1662 */
1659 p->sched_class->task_new(rq, p); 1663 p->sched_class->task_new(rq, p, now);
1664 inc_nr_running(p, rq, now);
1660 } 1665 }
1661 check_preempt_curr(rq, p); 1666 check_preempt_curr(rq, p);
1662 task_rq_unlock(rq, &flags); 1667 task_rq_unlock(rq, &flags);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6971db0a7160..243da6cae71c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1041,11 +1041,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1041 * monopolize the CPU. Note: the parent runqueue is locked, 1041 * monopolize the CPU. Note: the parent runqueue is locked,
1042 * the child is not running yet. 1042 * the child is not running yet.
1043 */ 1043 */
1044static void task_new_fair(struct rq *rq, struct task_struct *p) 1044static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now)
1045{ 1045{
1046 struct cfs_rq *cfs_rq = task_cfs_rq(p); 1046 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1047 struct sched_entity *se = &p->se; 1047 struct sched_entity *se = &p->se;
1048 u64 now = rq_clock(rq);
1049 1048
1050 sched_info_queued(p); 1049 sched_info_queued(p);
1051 1050
@@ -1072,7 +1071,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1072 p->se.wait_runtime = -(sysctl_sched_granularity / 2); 1071 p->se.wait_runtime = -(sysctl_sched_granularity / 2);
1073 1072
1074 __enqueue_entity(cfs_rq, se); 1073 __enqueue_entity(cfs_rq, se);
1075 inc_nr_running(p, rq, now);
1076} 1074}
1077 1075
1078#ifdef CONFIG_FAIR_GROUP_SCHED 1076#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1192a2741b99..ade20dc422f1 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -229,15 +229,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
229 requeue_task_rt(rq, p); 229 requeue_task_rt(rq, p);
230} 230}
231 231
232/*
233 * No parent/child timeslice management necessary for RT tasks,
234 * just activate them:
235 */
236static void task_new_rt(struct rq *rq, struct task_struct *p)
237{
238 activate_task(rq, p, 1);
239}
240
241static struct sched_class rt_sched_class __read_mostly = { 232static struct sched_class rt_sched_class __read_mostly = {
242 .enqueue_task = enqueue_task_rt, 233 .enqueue_task = enqueue_task_rt,
243 .dequeue_task = dequeue_task_rt, 234 .dequeue_task = dequeue_task_rt,
@@ -251,5 +242,4 @@ static struct sched_class rt_sched_class __read_mostly = {
251 .load_balance = load_balance_rt, 242 .load_balance = load_balance_rt,
252 243
253 .task_tick = task_tick_rt, 244 .task_tick = task_tick_rt,
254 .task_new = task_new_rt,
255}; 245};