diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-11 02:31:38 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-01-05 08:13:18 -0500 |
commit | 69d8772cb575fe71cfb6a7fbc07ca21acfbb4665 (patch) | |
tree | ecca820ac3435948695d1dcc06870d0281ac5b07 | |
parent | a698e30c15dc3ce113f7f7c0c821a38f4f8cbab0 (diff) |
DBG: add additional tracingwip-extra-debug
This is not meant to be merged into master...
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 31 | ||||
-rw-r--r-- | kernel/sched_fair.c | 8 | ||||
-rw-r--r-- | litmus/preempt.c | 4 |
4 files changed, 39 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index c9ac4fc837ba..0aaf530c9603 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2308,6 +2308,7 @@ static inline void set_tsk_need_resched(struct task_struct *tsk) | |||
2308 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | 2308 | static inline void clear_tsk_need_resched(struct task_struct *tsk) |
2309 | { | 2309 | { |
2310 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 2310 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
2311 | TRACE_TASK(tsk, "clear_tsk_need_resched\n"); | ||
2311 | } | 2312 | } |
2312 | 2313 | ||
2313 | static inline int test_tsk_need_resched(struct task_struct *tsk) | 2314 | static inline int test_tsk_need_resched(struct task_struct *tsk) |
diff --git a/kernel/sched.c b/kernel/sched.c index e4cfd193e76b..3682adadf6a7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -79,6 +79,7 @@ | |||
79 | #include "sched_cpupri.h" | 79 | #include "sched_cpupri.h" |
80 | #include "workqueue_sched.h" | 80 | #include "workqueue_sched.h" |
81 | 81 | ||
82 | #include <litmus/litmus.h> | ||
82 | #include <litmus/debug_trace.h> | 83 | #include <litmus/debug_trace.h> |
83 | #include <litmus/sched_trace.h> | 84 | #include <litmus/sched_trace.h> |
84 | #include <litmus/trace.h> | 85 | #include <litmus/trace.h> |
@@ -462,6 +463,8 @@ struct rq { | |||
462 | /* runqueue lock: */ | 463 | /* runqueue lock: */ |
463 | raw_spinlock_t lock; | 464 | raw_spinlock_t lock; |
464 | 465 | ||
466 | int dbg_in_schedule; | ||
467 | |||
465 | /* | 468 | /* |
466 | * nr_running and cpu_load should be in the same cacheline because | 469 | * nr_running and cpu_load should be in the same cacheline because |
467 | * remote CPUs use both these fields when doing load calculation. | 470 | * remote CPUs use both these fields when doing load calculation. |
@@ -573,6 +576,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | |||
573 | static inline | 576 | static inline |
574 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | 577 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
575 | { | 578 | { |
579 | if (test_tsk_need_resched(p)) | ||
580 | TRACE_TASK(p, "already need_resched when resuming\n"); | ||
576 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | 581 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
577 | 582 | ||
578 | /* | 583 | /* |
@@ -1193,6 +1198,10 @@ static void resched_task(struct task_struct *p) | |||
1193 | if (test_tsk_need_resched(p)) | 1198 | if (test_tsk_need_resched(p)) |
1194 | return; | 1199 | return; |
1195 | 1200 | ||
1201 | if(task_rq(p)->dbg_in_schedule) | ||
1202 | TRACE_TASK(p, "XXXX calling resched_task() during schedule() from ret:0x%p [rt:%d nr_running:%d]\n", | ||
1203 | __builtin_return_address(0), is_realtime(p), task_rq(p)->nr_running); | ||
1204 | |||
1196 | set_tsk_need_resched(p); | 1205 | set_tsk_need_resched(p); |
1197 | 1206 | ||
1198 | cpu = task_cpu(p); | 1207 | cpu = task_cpu(p); |
@@ -3823,6 +3832,7 @@ need_resched_nonpreemptible: | |||
3823 | hrtick_clear(rq); | 3832 | hrtick_clear(rq); |
3824 | 3833 | ||
3825 | raw_spin_lock_irq(&rq->lock); | 3834 | raw_spin_lock_irq(&rq->lock); |
3835 | rq->dbg_in_schedule = 1; | ||
3826 | clear_tsk_need_resched(prev); | 3836 | clear_tsk_need_resched(prev); |
3827 | 3837 | ||
3828 | switch_count = &prev->nivcsw; | 3838 | switch_count = &prev->nivcsw; |
@@ -3840,20 +3850,35 @@ need_resched_nonpreemptible: | |||
3840 | struct task_struct *to_wakeup; | 3850 | struct task_struct *to_wakeup; |
3841 | 3851 | ||
3842 | to_wakeup = wq_worker_sleeping(prev, cpu); | 3852 | to_wakeup = wq_worker_sleeping(prev, cpu); |
3843 | if (to_wakeup) | 3853 | if (to_wakeup) { |
3854 | TRACE_TASK(prev, "try_to_wake_up_local(%s/%d)\n", to_wakeup->comm, to_wakeup->pid); | ||
3844 | try_to_wake_up_local(to_wakeup); | 3855 | try_to_wake_up_local(to_wakeup); |
3856 | } | ||
3845 | } | 3857 | } |
3846 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 3858 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
3847 | } | 3859 | } |
3848 | switch_count = &prev->nvcsw; | 3860 | switch_count = &prev->nvcsw; |
3849 | } | 3861 | } |
3850 | 3862 | ||
3863 | if (test_tsk_need_resched(prev)) | ||
3864 | TRACE_TASK(prev, "need_resched before pre_schedule()\n"); | ||
3865 | |||
3851 | pre_schedule(rq, prev); | 3866 | pre_schedule(rq, prev); |
3852 | 3867 | ||
3853 | if (unlikely(!rq->nr_running)) | 3868 | if (test_tsk_need_resched(prev)) |
3869 | TRACE_TASK(prev, "need_resched after pre_schedule()\n"); | ||
3870 | |||
3871 | if (unlikely(!rq->nr_running)) { | ||
3854 | idle_balance(cpu, rq); | 3872 | idle_balance(cpu, rq); |
3873 | if (test_tsk_need_resched(prev)) | ||
3874 | TRACE_TASK(prev, "need_resched after idle_balance\n"); | ||
3875 | } | ||
3855 | 3876 | ||
3856 | put_prev_task(rq, prev); | 3877 | put_prev_task(rq, prev); |
3878 | |||
3879 | if (test_tsk_need_resched(prev)) | ||
3880 | TRACE_TASK(prev, "need_resched after put_prev\n"); | ||
3881 | |||
3857 | next = pick_next_task(rq); | 3882 | next = pick_next_task(rq); |
3858 | 3883 | ||
3859 | if (likely(prev != next)) { | 3884 | if (likely(prev != next)) { |
@@ -3866,6 +3891,7 @@ need_resched_nonpreemptible: | |||
3866 | 3891 | ||
3867 | TS_SCHED_END(next); | 3892 | TS_SCHED_END(next); |
3868 | TS_CXS_START(next); | 3893 | TS_CXS_START(next); |
3894 | rq->dbg_in_schedule = 0; | ||
3869 | context_switch(rq, prev, next); /* unlocks the rq */ | 3895 | context_switch(rq, prev, next); /* unlocks the rq */ |
3870 | TS_CXS_END(current); | 3896 | TS_CXS_END(current); |
3871 | /* | 3897 | /* |
@@ -3878,6 +3904,7 @@ need_resched_nonpreemptible: | |||
3878 | rq = cpu_rq(cpu); | 3904 | rq = cpu_rq(cpu); |
3879 | } else { | 3905 | } else { |
3880 | TS_SCHED_END(prev); | 3906 | TS_SCHED_END(prev); |
3907 | rq->dbg_in_schedule = 0; | ||
3881 | raw_spin_unlock_irq(&rq->lock); | 3908 | raw_spin_unlock_irq(&rq->lock); |
3882 | } | 3909 | } |
3883 | 3910 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e0e8d5ca3c98..47a38af8b30f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -531,8 +531,14 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
531 | * overflow on 32 bits): | 531 | * overflow on 32 bits): |
532 | */ | 532 | */ |
533 | delta_exec = (unsigned long)(now - curr->exec_start); | 533 | delta_exec = (unsigned long)(now - curr->exec_start); |
534 | if (!delta_exec) | 534 | if (!delta_exec) { |
535 | if (rq_of(cfs_rq)->skip_clock_update) { | ||
536 | TRACE_TASK(rq_of(cfs_rq)->curr, | ||
537 | "rq->skip_clock_update [CFS] => sum_exec_runtime=%llu delta=0\n", | ||
538 | curr->sum_exec_runtime); | ||
539 | } | ||
535 | return; | 540 | return; |
541 | } | ||
536 | 542 | ||
537 | __update_curr(cfs_rq, curr, delta_exec); | 543 | __update_curr(cfs_rq, curr, delta_exec); |
538 | curr->exec_start = now; | 544 | curr->exec_start = now; |
diff --git a/litmus/preempt.c b/litmus/preempt.c index ebe2e3461895..ea13fd3bd3be 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c | |||
@@ -30,8 +30,8 @@ void sched_state_will_schedule(struct task_struct* tsk) | |||
30 | /* Litmus tasks should never be subject to a remote | 30 | /* Litmus tasks should never be subject to a remote |
31 | * set_tsk_need_resched(). */ | 31 | * set_tsk_need_resched(). */ |
32 | BUG_ON(is_realtime(tsk)); | 32 | BUG_ON(is_realtime(tsk)); |
33 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", | 33 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p task_cpu:%d\n", |
34 | __builtin_return_address(0)); | 34 | __builtin_return_address(0), task_cpu(tsk)); |
35 | } | 35 | } |
36 | 36 | ||
37 | /* Called by the IPI handler after another CPU called smp_send_resched(). */ | 37 | /* Called by the IPI handler after another CPU called smp_send_resched(). */ |