aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-05-04 01:17:34 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-05-04 01:17:34 -0400
commit2237cc63c52f8b80633d5ef04aa3b74957a68bab (patch)
tree50f7d71c3691565e57363dd2e5002b5842bcfc43
parent5b0c2aac1d78f1db36c85bd5cd1e215c7cbd2dd6 (diff)
rt domain: remove unused debugging code
-rw-r--r--litmus/rt_domain.c21
1 files changed, 4 insertions, 17 deletions
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index a46b7e8e72..dabf196d92 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -150,21 +150,6 @@ static void reinit_release_heap(struct task_struct* t)
150 /* use pre-allocated release heap */ 150 /* use pre-allocated release heap */
151 rh = tsk_rt(t)->rel_heap; 151 rh = tsk_rt(t)->rel_heap;
152 152
153/* {
154 lt_t start = litmus_clock();
155 int ret;
156 do {
157 if (lt_before(start + 1000000, litmus_clock())) {
158 TRACE_TASK(t, "BAD: timer still in use after 1ms! giving up.\n");
159 break;
160 }
161 } while ((ret = hrtimer_try_to_cancel(&rh->timer)) == -1);
162 if (ret != 0) {
163 TRACE_TASK(t, "BAD: cancelled timer and got %d.\n", ret);
164 }
165 }
166*/
167
168 /* Make sure it is safe to use. The timer callback could still 153 /* Make sure it is safe to use. The timer callback could still
169 * be executing on another CPU; hrtimer_cancel() will wait 154 * be executing on another CPU; hrtimer_cancel() will wait
170 * until the timer callback has completed. However, under no 155 * until the timer callback has completed. However, under no
@@ -212,11 +197,13 @@ static void arm_release_timer(unsigned long _rt)
212 if (!rh) { 197 if (!rh) {
213 /* need to use our own, but drop lock first */ 198 /* need to use our own, but drop lock first */
214 spin_unlock(&rt->release_lock); 199 spin_unlock(&rt->release_lock);
215 TRACE_TASK(t, "Dropped release_lock 0x%p\n", &rt->release_lock); 200 TRACE_TASK(t, "Dropped release_lock 0x%p\n",
201 &rt->release_lock);
216 reinit_release_heap(t); 202 reinit_release_heap(t);
217 TRACE_TASK(t, "release_heap ready\n"); 203 TRACE_TASK(t, "release_heap ready\n");
218 spin_lock(&rt->release_lock); 204 spin_lock(&rt->release_lock);
219 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", &rt->release_lock); 205 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
206 &rt->release_lock);
220 rh = get_release_heap(rt, t, 1); 207 rh = get_release_heap(rt, t, 1);
221 } 208 }
222 heap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); 209 heap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);