diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-05-10 04:31:43 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-05-10 04:31:43 -0400 |
commit | 1939631637ed7ac662126a76ef7665e8295ac61d (patch) | |
tree | b75a670bb9abfc28e409af47b44c8c037dd5a900 | |
parent | e424c53361798a31721734816ec53d29569dbbbd (diff) |
two fixes and some logging improvement
- remove outdated comment
- reorder stack_in_use marker to be in front of finish_lock_switch()
- add TRACE()s to try_to_wake_up()
-rw-r--r-- | kernel/sched.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a6dbac6282..2a7e5c455f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1521,6 +1521,8 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
1521 | int new_cpu; | 1521 | int new_cpu; |
1522 | #endif | 1522 | #endif |
1523 | 1523 | ||
1524 | if (is_realtime(p)) | ||
1525 | TRACE_TASK(p, "try_to_wake_up()\n"); | ||
1524 | rq = task_rq_lock(p, &flags); | 1526 | rq = task_rq_lock(p, &flags); |
1525 | old_state = p->state; | 1527 | old_state = p->state; |
1526 | if (!(old_state & state)) | 1528 | if (!(old_state & state)) |
@@ -1655,6 +1657,8 @@ out_activate: | |||
1655 | out_running: | 1657 | out_running: |
1656 | p->state = TASK_RUNNING; | 1658 | p->state = TASK_RUNNING; |
1657 | out: | 1659 | out: |
1660 | if (is_realtime(p)) | ||
1661 | TRACE_TASK(p, "try_to_wake_up() done, p->state=%d\n", p->state); | ||
1658 | task_rq_unlock(rq, &flags); | 1662 | task_rq_unlock(rq, &flags); |
1659 | tick_no_rqlock(); | 1663 | tick_no_rqlock(); |
1660 | return success; | 1664 | return success; |
@@ -1896,8 +1900,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1896 | prev_state = prev->state; | 1900 | prev_state = prev->state; |
1897 | finish_arch_switch(prev); | 1901 | finish_arch_switch(prev); |
1898 | litmus->finish_switch(prev); | 1902 | litmus->finish_switch(prev); |
1899 | finish_lock_switch(rq, prev); | ||
1900 | prev->rt_param.stack_in_use = NO_CPU; | 1903 | prev->rt_param.stack_in_use = NO_CPU; |
1904 | finish_lock_switch(rq, prev); | ||
1901 | fire_sched_in_preempt_notifiers(current); | 1905 | fire_sched_in_preempt_notifiers(current); |
1902 | if (mm) | 1906 | if (mm) |
1903 | mmdrop(mm); | 1907 | mmdrop(mm); |
@@ -3651,10 +3655,6 @@ need_resched_nonpreemptible: | |||
3651 | */ | 3655 | */ |
3652 | local_irq_disable(); | 3656 | local_irq_disable(); |
3653 | __update_rq_clock(rq); | 3657 | __update_rq_clock(rq); |
3654 | /* do litmus scheduling outside of rq lock, so that we | ||
3655 | * can do proper migrations for global schedulers | ||
3656 | */ | ||
3657 | |||
3658 | spin_lock(&rq->lock); | 3658 | spin_lock(&rq->lock); |
3659 | clear_tsk_need_resched(prev); | 3659 | clear_tsk_need_resched(prev); |
3660 | litmus_schedule(rq, prev); | 3660 | litmus_schedule(rq, prev); |