aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/litmus.h
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-28 16:48:11 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-28 16:48:11 -0400
commit841d6482cc4b0706f307f36ce1fe3f3f6963fd41 (patch)
tree3627b2e561159e705245bb9b4ba012189960f3f3 /include/litmus/litmus.h
parentac58d244d2a1830ba6a4f34ab31bc992d5c0e4aa (diff)
GSN-EDF & Core: improve debug TRACE'ing for NP sectionswip-bbb
Diffstat (limited to 'include/litmus/litmus.h')
-rw-r--r--include/litmus/litmus.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 28fb7b331cf3..31ac72eddef7 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -178,6 +178,7 @@ static inline int take_np(struct task_struct *t)
178static inline int request_exit_np_atomic(struct task_struct *t) 178static inline int request_exit_np_atomic(struct task_struct *t)
179{ 179{
180 union np_flag old, new; 180 union np_flag old, new;
181 int ok;
181 182
182 if (tsk_rt(t)->ctrl_page) { 183 if (tsk_rt(t)->ctrl_page) {
183 old.raw = tsk_rt(t)->ctrl_page->sched.raw; 184 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
@@ -186,17 +187,20 @@ static inline int request_exit_np_atomic(struct task_struct *t)
186 return 0; 187 return 0;
187 } else if (old.np.preempt) { 188 } else if (old.np.preempt) {
188 /* already set, nothing for us to do */ 189 /* already set, nothing for us to do */
190 TRACE_TASK(t, "not setting np.preempt flag again\n");
189 return 1; 191 return 1;
190 } else { 192 } else {
191 /* non preemptive and flag not set */ 193 /* non preemptive and flag not set */
192 new.raw = old.raw; 194 new.raw = old.raw;
193 new.np.preempt = 1; 195 new.np.preempt = 1;
194 /* if we get old back, then we atomically set the flag */ 196 /* if we get old back, then we atomically set the flag */
195 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; 197 ok = cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
196 /* If we raced with a concurrent change, then so be 198 /* If we raced with a concurrent change, then so be
197 * it. Deliver it by IPI. We don't want an unbounded 199 * it. Deliver it by IPI. We don't want an unbounded
198 * retry loop here since tasks might exploit that to 200 * retry loop here since tasks might exploit that to
199 * keep the kernel busy indefinitely. */ 201 * keep the kernel busy indefinitely. */
202 TRACE_TASK(t, "request_exit_np => %d\n", ok);
203 return ok;
200 } 204 }
201 } else 205 } else
202 return 0; 206 return 0;