From 841d6482cc4b0706f307f36ce1fe3f3f6963fd41 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Thu, 28 Jul 2011 16:48:11 -0400 Subject: GSN-EDF & Core: improve debug TRACE'ing for NP sections --- include/litmus/litmus.h | 6 +++++- litmus/sched_gsn_edf.c | 6 +++--- litmus/sched_litmus.c | 3 +++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 28fb7b331cf3..31ac72eddef7 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -178,6 +178,7 @@ static inline int take_np(struct task_struct *t) static inline int request_exit_np_atomic(struct task_struct *t) { union np_flag old, new; + int ok; if (tsk_rt(t)->ctrl_page) { old.raw = tsk_rt(t)->ctrl_page->sched.raw; @@ -186,17 +187,20 @@ static inline int request_exit_np_atomic(struct task_struct *t) return 0; } else if (old.np.preempt) { /* already set, nothing for us to do */ + TRACE_TASK(t, "not setting np.preempt flag again\n"); return 1; } else { /* non preemptive and flag not set */ new.raw = old.raw; new.np.preempt = 1; /* if we get old back, then we atomically set the flag */ - return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; + ok = cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; /* If we raced with a concurrent change, then so be * it. Deliver it by IPI. We don't want an unbounded * retry loop here since tasks might exploit that to * keep the kernel busy indefinitely. */ + TRACE_TASK(t, "request_exit_np => %d\n", ok); + return ok; } } else return 0; diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index dffea128739f..9debea981419 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -266,8 +266,8 @@ static void check_for_preemptions(void) last = lowest_prio_cpu()) { /* preemption necessary */ task = __take_ready(&gsnedf); - TRACE("check_for_preemptions: attempting to link task %d to %d\n", - task->pid, last->cpu); + TRACE_TASK(task, "attempting to link to P%d\n", + last->cpu); if (last->linked) requeue(last->linked); link_task_to_cpu(task, last); @@ -409,7 +409,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) "state:%d sig:%d\n", blocks, out_of_time, np, sleep, preempt, prev->state, signal_pending(prev)); - if (entry->linked && preempt) + if (entry->linked && preempt && !np) TRACE_TASK(prev, "will be preempted by %s/%d\n", entry->linked->comm, entry->linked->pid); diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 0687be0c8a78..faf2312b762e 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -197,6 +197,9 @@ static void yield_task_litmus(struct rq *rq) TS_SYSCALL_IN_START; TS_SYSCALL_OUT_END; + + TRACE_CUR("yields\n"); + BUG_ON(rq->curr != current); /* sched_yield() is called to trigger delayed preemptions. * Thus, mark the current task as needing to be rescheduled. -- cgit v1.2.2