aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-28 16:48:11 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-28 16:48:11 -0400
commit841d6482cc4b0706f307f36ce1fe3f3f6963fd41 (patch)
tree3627b2e561159e705245bb9b4ba012189960f3f3
parentac58d244d2a1830ba6a4f34ab31bc992d5c0e4aa (diff)
GSN-EDF & Core: improve debug TRACE'ing for NP sectionswip-bbb
-rw-r--r--include/litmus/litmus.h6
-rw-r--r--litmus/sched_gsn_edf.c6
-rw-r--r--litmus/sched_litmus.c3
3 files changed, 11 insertions, 4 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 28fb7b331cf3..31ac72eddef7 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -178,6 +178,7 @@ static inline int take_np(struct task_struct *t)
178static inline int request_exit_np_atomic(struct task_struct *t) 178static inline int request_exit_np_atomic(struct task_struct *t)
179{ 179{
180 union np_flag old, new; 180 union np_flag old, new;
181 int ok;
181 182
182 if (tsk_rt(t)->ctrl_page) { 183 if (tsk_rt(t)->ctrl_page) {
183 old.raw = tsk_rt(t)->ctrl_page->sched.raw; 184 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
@@ -186,17 +187,20 @@ static inline int request_exit_np_atomic(struct task_struct *t)
186 return 0; 187 return 0;
187 } else if (old.np.preempt) { 188 } else if (old.np.preempt) {
188 /* already set, nothing for us to do */ 189 /* already set, nothing for us to do */
190 TRACE_TASK(t, "not setting np.preempt flag again\n");
189 return 1; 191 return 1;
190 } else { 192 } else {
191 /* non preemptive and flag not set */ 193 /* non preemptive and flag not set */
192 new.raw = old.raw; 194 new.raw = old.raw;
193 new.np.preempt = 1; 195 new.np.preempt = 1;
194 /* if we get old back, then we atomically set the flag */ 196 /* if we get old back, then we atomically set the flag */
195 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; 197 ok = cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
196 /* If we raced with a concurrent change, then so be 198 /* If we raced with a concurrent change, then so be
197 * it. Deliver it by IPI. We don't want an unbounded 199 * it. Deliver it by IPI. We don't want an unbounded
198 * retry loop here since tasks might exploit that to 200 * retry loop here since tasks might exploit that to
199 * keep the kernel busy indefinitely. */ 201 * keep the kernel busy indefinitely. */
202 TRACE_TASK(t, "request_exit_np => %d\n", ok);
203 return ok;
200 } 204 }
201 } else 205 } else
202 return 0; 206 return 0;
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index dffea128739f..9debea981419 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -266,8 +266,8 @@ static void check_for_preemptions(void)
266 last = lowest_prio_cpu()) { 266 last = lowest_prio_cpu()) {
267 /* preemption necessary */ 267 /* preemption necessary */
268 task = __take_ready(&gsnedf); 268 task = __take_ready(&gsnedf);
269 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 269 TRACE_TASK(task, "attempting to link to P%d\n",
270 task->pid, last->cpu); 270 last->cpu);
271 if (last->linked) 271 if (last->linked)
272 requeue(last->linked); 272 requeue(last->linked);
273 link_task_to_cpu(task, last); 273 link_task_to_cpu(task, last);
@@ -409,7 +409,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
409 "state:%d sig:%d\n", 409 "state:%d sig:%d\n",
410 blocks, out_of_time, np, sleep, preempt, 410 blocks, out_of_time, np, sleep, preempt,
411 prev->state, signal_pending(prev)); 411 prev->state, signal_pending(prev));
412 if (entry->linked && preempt) 412 if (entry->linked && preempt && !np)
413 TRACE_TASK(prev, "will be preempted by %s/%d\n", 413 TRACE_TASK(prev, "will be preempted by %s/%d\n",
414 entry->linked->comm, entry->linked->pid); 414 entry->linked->comm, entry->linked->pid);
415 415
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 0687be0c8a78..faf2312b762e 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -197,6 +197,9 @@ static void yield_task_litmus(struct rq *rq)
197 TS_SYSCALL_IN_START; 197 TS_SYSCALL_IN_START;
198 198
199 TS_SYSCALL_OUT_END; 199 TS_SYSCALL_OUT_END;
200
201 TRACE_CUR("yields\n");
202
200 BUG_ON(rq->curr != current); 203 BUG_ON(rq->curr != current);
201 /* sched_yield() is called to trigger delayed preemptions. 204 /* sched_yield() is called to trigger delayed preemptions.
202 * Thus, mark the current task as needing to be rescheduled. 205 * Thus, mark the current task as needing to be rescheduled.