From 69d8772cb575fe71cfb6a7fbc07ca21acfbb4665 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Thu, 11 Nov 2010 02:31:38 -0500 Subject: DBG: add additional tracing This is not meant to be merged into master... --- include/linux/sched.h | 1 + kernel/sched.c | 31 +++++++++++++++++++++++++++++-- kernel/sched_fair.c | 8 +++++++- litmus/preempt.c | 4 ++-- 4 files changed, 39 insertions(+), 5 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index c9ac4fc837ba..0aaf530c9603 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2308,6 +2308,7 @@ static inline void set_tsk_need_resched(struct task_struct *tsk) static inline void clear_tsk_need_resched(struct task_struct *tsk) { clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); + TRACE_TASK(tsk, "clear_tsk_need_resched\n"); } static inline int test_tsk_need_resched(struct task_struct *tsk) diff --git a/kernel/sched.c b/kernel/sched.c index e4cfd193e76b..3682adadf6a7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -79,6 +79,7 @@ #include "sched_cpupri.h" #include "workqueue_sched.h" +#include #include #include #include @@ -462,6 +463,8 @@ struct rq { /* runqueue lock: */ raw_spinlock_t lock; + int dbg_in_schedule; + /* * nr_running and cpu_load should be in the same cacheline because * remote CPUs use both these fields when doing load calculation. @@ -573,6 +576,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) { + if (test_tsk_need_resched(p)) + TRACE_TASK(p, "already need_resched when resuming\n"); rq->curr->sched_class->check_preempt_curr(rq, p, flags); /* @@ -1193,6 +1198,10 @@ static void resched_task(struct task_struct *p) if (test_tsk_need_resched(p)) return; + if(task_rq(p)->dbg_in_schedule) + TRACE_TASK(p, "XXXX calling resched_task() during schedule() from ret:0x%p [rt:%d nr_running:%d]\n", + __builtin_return_address(0), is_realtime(p), task_rq(p)->nr_running); + set_tsk_need_resched(p); cpu = task_cpu(p); @@ -3823,6 +3832,7 @@ need_resched_nonpreemptible: hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); + rq->dbg_in_schedule = 1; clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; @@ -3840,20 +3850,35 @@ need_resched_nonpreemptible: struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev, cpu); - if (to_wakeup) + if (to_wakeup) { + TRACE_TASK(prev, "try_to_wake_up_local(%s/%d)\n", to_wakeup->comm, to_wakeup->pid); try_to_wake_up_local(to_wakeup); + } } deactivate_task(rq, prev, DEQUEUE_SLEEP); } switch_count = &prev->nvcsw; } + if (test_tsk_need_resched(prev)) + TRACE_TASK(prev, "need_resched before pre_schedule()\n"); + pre_schedule(rq, prev); - if (unlikely(!rq->nr_running)) + if (test_tsk_need_resched(prev)) + TRACE_TASK(prev, "need_resched after pre_schedule()\n"); + + if (unlikely(!rq->nr_running)) { idle_balance(cpu, rq); + if (test_tsk_need_resched(prev)) + TRACE_TASK(prev, "need_resched after idle_balance\n"); + } put_prev_task(rq, prev); + + if (test_tsk_need_resched(prev)) + TRACE_TASK(prev, "need_resched after put_prev\n"); + next = pick_next_task(rq); if (likely(prev != next)) { @@ -3866,6 +3891,7 @@ need_resched_nonpreemptible: TS_SCHED_END(next); TS_CXS_START(next); + rq->dbg_in_schedule = 0; context_switch(rq, prev, next); /* unlocks the rq */ TS_CXS_END(current); /* @@ -3878,6 +3904,7 @@ need_resched_nonpreemptible: rq = cpu_rq(cpu); } else { TS_SCHED_END(prev); + rq->dbg_in_schedule = 0; raw_spin_unlock_irq(&rq->lock); } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e0e8d5ca3c98..47a38af8b30f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -531,8 +531,14 @@ static void update_curr(struct cfs_rq *cfs_rq) * overflow on 32 bits): */ delta_exec = (unsigned long)(now - curr->exec_start); - if (!delta_exec) + if (!delta_exec) { + if (rq_of(cfs_rq)->skip_clock_update) { + TRACE_TASK(rq_of(cfs_rq)->curr, + "rq->skip_clock_update [CFS] => sum_exec_runtime=%llu delta=0\n", + curr->sum_exec_runtime); + } return; + } __update_curr(cfs_rq, curr, delta_exec); curr->exec_start = now; diff --git a/litmus/preempt.c b/litmus/preempt.c index ebe2e3461895..ea13fd3bd3be 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c @@ -30,8 +30,8 @@ void sched_state_will_schedule(struct task_struct* tsk) /* Litmus tasks should never be subject to a remote * set_tsk_need_resched(). */ BUG_ON(is_realtime(tsk)); - TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", - __builtin_return_address(0)); + TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p task_cpu:%d\n", + __builtin_return_address(0), task_cpu(tsk)); } /* Called by the IPI handler after another CPU called smp_send_resched(). */ -- cgit v1.2.2