aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-10-06 11:36:10 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-10-06 11:36:10 -0400
commit32a53a4274c3cee072604869a8db5b761c8ea5c0 (patch)
tree20edd86277e55219d711a7401068713d56e7ec01
parenta810715b458ce4ed51fea9e017355d84e8990a6a (diff)
sched_trace: new implementation
This provides and hooks up a new made-from-scratch sched_trace() implementation based on Feather-Trace and ftdev.
-rw-r--r--include/litmus/sched_trace.h47
-rw-r--r--kernel/sched.c3
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/rt_domain.c1
-rwxr-xr-xlitmus/sched_cedf.c8
-rw-r--r--litmus/sched_gsn_edf.c8
-rw-r--r--litmus/sched_litmus.c28
-rw-r--r--litmus/sched_psn_edf.c2
-rw-r--r--litmus/sched_task_trace.c179
-rw-r--r--litmus/trace.c6
10 files changed, 239 insertions, 44 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 3be349a69b..c5c55c6554 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -12,8 +12,9 @@ struct st_trace_header {
12 u32 job; /* The job sequence number. */ 12 u32 job; /* The job sequence number. */
13}; 13};
14 14
15#define ST_NAME_LEN 16
15struct st_name_data { 16struct st_name_data {
16 char cmd[16]; /* The name of the executable of this process. */ 17 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
17}; 18};
18 19
19struct st_param_data { /* regular params */ 20struct st_param_data { /* regular params */
@@ -104,31 +105,45 @@ struct st_event_record {
104#ifdef __KERNEL__ 105#ifdef __KERNEL__
105 106
106#include <linux/sched.h> 107#include <linux/sched.h>
107 108#include <litmus/feather_trace.h>
108/* dummies, need to be re-implemented */
109/* used in sched.c */
110#define sched_trace_task_arrival(t)
111#define sched_trace_task_departure(t)
112#define sched_trace_task_preemption(t, by)
113#define sched_trace_task_scheduled(t)
114
115/* used in scheduler plugins */
116#define sched_trace_job_release(t)
117#define sched_trace_job_completion(t)
118
119
120 109
121#ifdef CONFIG_SCHED_TASK_TRACE 110#ifdef CONFIG_SCHED_TASK_TRACE
122 111
123 112#define SCHED_TRACE(id, callback, task) \
124 113 ft_event1(id, callback, task)
114#define SCHED_TRACE2(id, callback, task, xtra) \
115 ft_event2(id, callback, task, xtra)
125 116
126#else 117#else
127 118
119#define SCHED_TRACE(id, callback, task) /* no tracing */
120#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
128 121
129#endif 122#endif
130 123
131 124
125#define SCHED_TRACE_BASE_ID 500
126
127
128#define sched_trace_task_name(t) \
129 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t)
130#define sched_trace_task_param(t) \
131 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t)
132#define sched_trace_task_release(t) \
133 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t)
134#define sched_trace_task_switch_to(t) \
135 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t)
136#define sched_trace_task_switch_away(t) \
137 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t)
138#define sched_trace_task_completion(t, forced) \
139 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \
140 forced)
141#define sched_trace_task_block(t) \
142 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t)
143#define sched_trace_task_resume(t) \
144 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t)
145
146
132#ifdef CONFIG_SCHED_DEBUG_TRACE 147#ifdef CONFIG_SCHED_DEBUG_TRACE
133void sched_trace_log_message(const char* fmt, ...); 148void sched_trace_log_message(const char* fmt, ...);
134 149
diff --git a/kernel/sched.c b/kernel/sched.c
index 9ee07ba365..fdeced2628 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -67,6 +67,7 @@
67#include <asm/tlb.h> 67#include <asm/tlb.h>
68#include <asm/irq_regs.h> 68#include <asm/irq_regs.h>
69 69
70#include <litmus/sched_trace.h>
70#include <litmus/trace.h> 71#include <litmus/trace.h>
71 72
72#include <litmus/norqlock.h> 73#include <litmus/norqlock.h>
@@ -3691,6 +3692,8 @@ need_resched_nonpreemptible:
3691 rq->nr_switches++; 3692 rq->nr_switches++;
3692 rq->curr = next; 3693 rq->curr = next;
3693 ++*switch_count; 3694 ++*switch_count;
3695 sched_trace_task_switch_away(prev);
3696 sched_trace_task_switch_to(next);
3694 3697
3695 TS_SCHED_END(next); 3698 TS_SCHED_END(next);
3696 TS_CXS_START(next); 3699 TS_CXS_START(next);
diff --git a/litmus/Makefile b/litmus/Makefile
index 2b2ab5be1f..fa39a2b753 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -12,4 +12,5 @@ obj-y = sched_plugin.o litmus.o \
12 sched_pfair.o 12 sched_pfair.o
13 13
14obj-$(CONFIG_FEATHER_TRACE) += trace.o ft_event.o ftdev.o 14obj-$(CONFIG_FEATHER_TRACE) += trace.o ft_event.o ftdev.o
15obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
15obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o \ No newline at end of file 16obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o \ No newline at end of file
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 039e7c1374..be4ef5ea6a 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -185,6 +185,7 @@ static void arm_release_timer(unsigned long _rt)
185 185
186 list_for_each_safe(pos, safe, &list) { 186 list_for_each_safe(pos, safe, &list) {
187 t = list_entry(pos, struct task_struct, rt_param.list); 187 t = list_entry(pos, struct task_struct, rt_param.list);
188 sched_trace_task_release(t);
188 list_del(pos); 189 list_del(pos);
189 rh = get_release_heap(rt, get_release(t)); 190 rh = get_release_heap(rt, get_release(t));
190 heap_add(rt->order, &rh->heap, t, GFP_ATOMIC); 191 heap_add(rt->order, &rh->heap, t, GFP_ATOMIC);
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index bebb2c7ba5..f614430c6b 100755
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -409,11 +409,11 @@ static void cedf_tick(struct task_struct* t)
409} 409}
410 410
411/* caller holds cedf_lock */ 411/* caller holds cedf_lock */
412static noinline void job_completion(struct task_struct *t) 412static noinline void job_completion(struct task_struct *t, int forced)
413{ 413{
414 BUG_ON(!t); 414 BUG_ON(!t);
415 415
416 sched_trace_job_completion(t); 416 sched_trace_task_completion(t, forced);
417 417
418 TRACE_TASK(t, "job_completion(). [state:%d]\n", t->state); 418 TRACE_TASK(t, "job_completion(). [state:%d]\n", t->state);
419 419
@@ -504,7 +504,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
504 * running for blocked jobs). Preemption go first for the same reason. 504 * running for blocked jobs). Preemption go first for the same reason.
505 */ 505 */
506 if (!np && (out_of_time || sleep) && !blocks && !preempt) 506 if (!np && (out_of_time || sleep) && !blocks && !preempt)
507 job_completion(entry->scheduled); 507 job_completion(entry->scheduled, !sleep);
508 508
509 /* Link pending task if we became unlinked. 509 /* Link pending task if we became unlinked.
510 */ 510 */
@@ -602,7 +602,7 @@ static void cedf_task_wake_up(struct task_struct *task)
602 if (is_tardy(task, now)) { 602 if (is_tardy(task, now)) {
603 /* new sporadic release */ 603 /* new sporadic release */
604 release_at(task, now); 604 release_at(task, now);
605 sched_trace_job_release(task); 605 sched_trace_task_release(task);
606 } 606 }
607 else if (task->time_slice) 607 else if (task->time_slice)
608 /* came back in time before deadline 608 /* came back in time before deadline
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index a10c6cbe40..a93b6603ae 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -333,11 +333,11 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct heap* tasks)
333} 333}
334 334
335/* caller holds gsnedf_lock */ 335/* caller holds gsnedf_lock */
336static noinline void job_completion(struct task_struct *t) 336static noinline void job_completion(struct task_struct *t, int forced)
337{ 337{
338 BUG_ON(!t); 338 BUG_ON(!t);
339 339
340 sched_trace_job_completion(t); 340 sched_trace_task_completion(t, forced);
341 341
342 TRACE_TASK(t, "job_completion().\n"); 342 TRACE_TASK(t, "job_completion().\n");
343 343
@@ -458,7 +458,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
458 * for blocked jobs). Preemption go first for the same reason. 458 * for blocked jobs). Preemption go first for the same reason.
459 */ 459 */
460 if (!np && (out_of_time || sleep) && !blocks && !preempt) 460 if (!np && (out_of_time || sleep) && !blocks && !preempt)
461 job_completion(entry->scheduled); 461 job_completion(entry->scheduled, !sleep);
462 462
463 /* Link pending task if we became unlinked. 463 /* Link pending task if we became unlinked.
464 */ 464 */
@@ -558,7 +558,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
558 if (is_tardy(task, now)) { 558 if (is_tardy(task, now)) {
559 /* new sporadic release */ 559 /* new sporadic release */
560 release_at(task, now); 560 release_at(task, now);
561 sched_trace_job_release(task); 561 sched_trace_task_release(task);
562 } 562 }
563 else if (task->time_slice) 563 else if (task->time_slice)
564 /* came back in time before deadline 564 /* came back in time before deadline
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 1e541e4ac0..b4858f82a7 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -41,7 +41,7 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev)
41 other_rq = task_rq(rq->litmus_next); 41 other_rq = task_rq(rq->litmus_next);
42 TRACE_TASK(rq->litmus_next, "migrate from %d\n", other_rq->cpu); 42 TRACE_TASK(rq->litmus_next, "migrate from %d\n", other_rq->cpu);
43 43
44 /* while we drop the lock, the prev task could change its 44 /* while we drop the lock, the prev task could change its
45 * state 45 * state
46 */ 46 */
47 prev_state = prev->state; 47 prev_state = prev->state;
@@ -52,7 +52,7 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev)
52 * This could deadlock in the case of cross or circular migrations. 52 * This could deadlock in the case of cross or circular migrations.
53 * It's the job of the plugin to make sure that doesn't happen. 53 * It's the job of the plugin to make sure that doesn't happen.
54 */ 54 */
55 TRACE_TASK(rq->litmus_next, "stack_in_use=%d\n", 55 TRACE_TASK(rq->litmus_next, "stack_in_use=%d\n",
56 rq->litmus_next->rt_param.stack_in_use); 56 rq->litmus_next->rt_param.stack_in_use);
57 if (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { 57 if (rq->litmus_next->rt_param.stack_in_use != NO_CPU) {
58 TRACE_TASK(rq->litmus_next, "waiting to deschedule\n"); 58 TRACE_TASK(rq->litmus_next, "waiting to deschedule\n");
@@ -67,14 +67,14 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev)
67 if (lt_before(_maybe_deadlock + 10000000, litmus_clock())) { 67 if (lt_before(_maybe_deadlock + 10000000, litmus_clock())) {
68 /* We've been spinning for 10ms. 68 /* We've been spinning for 10ms.
69 * Something can't be right! 69 * Something can't be right!
70 * Let's abandon the task and bail out; at least 70 * Let's abandon the task and bail out; at least
71 * we will have debug info instead of a hard 71 * we will have debug info instead of a hard
72 * deadlock. 72 * deadlock.
73 */ 73 */
74 TRACE_TASK(rq->litmus_next, 74 TRACE_TASK(rq->litmus_next,
75 "stack too long in use. Deadlock?\n"); 75 "stack too long in use. Deadlock?\n");
76 rq->litmus_next = NULL; 76 rq->litmus_next = NULL;
77 77
78 /* bail out */ 78 /* bail out */
79 spin_lock(&rq->lock); 79 spin_lock(&rq->lock);
80 return; 80 return;
@@ -91,13 +91,13 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev)
91 double_rq_lock(rq, other_rq); 91 double_rq_lock(rq, other_rq);
92 mb(); 92 mb();
93 if (prev->state != prev_state && is_realtime(prev)) { 93 if (prev->state != prev_state && is_realtime(prev)) {
94 TRACE_TASK(prev, 94 TRACE_TASK(prev,
95 "state changed while we dropped" 95 "state changed while we dropped"
96 " the lock: now=%d, old=%d\n", 96 " the lock: now=%d, old=%d\n",
97 prev->state, prev_state); 97 prev->state, prev_state);
98 if (prev_state && !prev->state) { 98 if (prev_state && !prev->state) {
99 /* prev task became unblocked 99 /* prev task became unblocked
100 * we need to simulate normal sequence of events 100 * we need to simulate normal sequence of events
101 * to scheduler plugins. 101 * to scheduler plugins.
102 */ 102 */
103 litmus->task_block(prev); 103 litmus->task_block(prev);
@@ -115,9 +115,9 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev)
115 if (!is_realtime(rq->litmus_next) || 115 if (!is_realtime(rq->litmus_next) ||
116 !is_running(rq->litmus_next)) { 116 !is_running(rq->litmus_next)) {
117 /* BAD BAD BAD */ 117 /* BAD BAD BAD */
118 TRACE_TASK(rq->litmus_next, 118 TRACE_TASK(rq->litmus_next,
119 "migration invariant FAILED: rt=%d running=%d\n", 119 "migration invariant FAILED: rt=%d running=%d\n",
120 is_realtime(rq->litmus_next), 120 is_realtime(rq->litmus_next),
121 is_running(rq->litmus_next)); 121 is_running(rq->litmus_next));
122 /* drop the task */ 122 /* drop the task */
123 rq->litmus_next = NULL; 123 rq->litmus_next = NULL;
@@ -131,17 +131,19 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev)
131 131
132static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, int wakeup) 132static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, int wakeup)
133{ 133{
134 if (wakeup) 134 if (wakeup) {
135 sched_trace_task_resume(p);
135 litmus->task_wake_up(p); 136 litmus->task_wake_up(p);
136 else 137 } else
137 TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); 138 TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n");
138} 139}
139 140
140static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep) 141static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep)
141{ 142{
142 if (sleep) 143 if (sleep) {
143 litmus->task_block(p); 144 litmus->task_block(p);
144 else 145 sched_trace_task_block(p);
146 } else
145 TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n"); 147 TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n");
146} 148}
147 149
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 71dcfa1fcd..9a2bdfcba0 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -261,7 +261,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
261 get_rt_flags(task) != RT_F_EXIT_SEM) { 261 get_rt_flags(task) != RT_F_EXIT_SEM) {
262 /* new sporadic release */ 262 /* new sporadic release */
263 release_at(task, now); 263 release_at(task, now);
264 sched_trace_job_release(task); 264 sched_trace_task_release(task);
265 } 265 }
266 requeue(task, edf); 266 requeue(task, edf);
267 spin_unlock_irqrestore(&pedf->slock, flags); 267 spin_unlock_irqrestore(&pedf->slock, flags);
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
new file mode 100644
index 0000000000..07155b82da
--- /dev/null
+++ b/litmus/sched_task_trace.c
@@ -0,0 +1,179 @@
1/* sched_task_trace.c -- record scheduling events to a byte stream
2 *
3 */
4
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/percpu.h>
8
9#include <litmus/ftdev.h>
10#include <litmus/litmus.h>
11
12#include <litmus/sched_trace.h>
13#include <litmus/feather_trace.h>
14#include <litmus/ftdev.h>
15
16#define FT_TASK_TRACE_MAJOR 253
17#define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */
18
19struct local_buffer {
20 struct st_event_record record[NO_EVENTS];
21 char flag[NO_EVENTS];
22 struct ft_buffer ftbuf;
23};
24
25DEFINE_PER_CPU(struct local_buffer, st_event_buffer);
26
27static struct ftdev st_dev;
28
29static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
30{
31 return cpu_online(cpu) ? 0 : -ENODEV;
32}
33
34static int __init init_sched_task_trace(void)
35{
36 struct local_buffer* buf;
37 int i, ok = 0;
38 ftdev_init(&st_dev, THIS_MODULE);
39 for (i = 0; i < NR_CPUS; i++) {
40 buf = &per_cpu(st_event_buffer, i);
41 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
42 sizeof(struct st_event_record),
43 buf->flag,
44 buf->record);
45 st_dev.minor[i].buf = &buf->ftbuf;
46 }
47 if (ok == NR_CPUS) {
48 st_dev.minor_cnt = NR_CPUS;
49 st_dev.can_open = st_dev_can_open;
50 return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR);
51 } else
52 return -EINVAL;
53}
54
55module_init(init_sched_task_trace);
56
57
58static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
59{
60 struct st_event_record* rec;
61 struct local_buffer* buf;
62
63 buf = &get_cpu_var(st_event_buffer);
64 if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
65 rec->hdr.type = type;
66 rec->hdr.cpu = smp_processor_id();
67 rec->hdr.pid = t ? t->pid : 0;
68 rec->hdr.job = t ? t->rt_param.job_params.job_no : -1;
69 } else
70 put_cpu_var(st_event_buffer);
71 /* rec will be NULL if it failed */
72 return rec;
73}
74
75static inline void put_record(struct st_event_record* rec)
76{
77 struct local_buffer* buf;
78 buf = &__get_cpu_var(st_event_buffer);
79 ft_buffer_finish_write(&buf->ftbuf, rec);
80 put_cpu_var(st_event_buffer);
81}
82
83feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
84{
85 struct task_struct *t = (struct task_struct*) _task;
86 struct st_event_record* rec = get_record(ST_NAME, t);
87 int i;
88 if (rec) {
89 for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
90 rec->data.name.cmd[i] = t->comm[i];
91 put_record(rec);
92 }
93}
94
95feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
96{
97 struct task_struct *t = (struct task_struct*) _task;
98 struct st_event_record* rec = get_record(ST_PARAM, t);
99 if (rec) {
100 rec->data.param.wcet = get_exec_cost(t);
101 rec->data.param.period = get_rt_period(t);
102 rec->data.param.phase = get_rt_phase(t);
103 rec->data.param.partition = get_partition(t);
104 put_record(rec);
105 }
106}
107
108feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
109{
110 struct task_struct *t = (struct task_struct*) _task;
111 struct st_event_record* rec = get_record(ST_RELEASE, t);
112 if (rec) {
113 rec->data.release.release = get_release(t);
114 rec->data.release.deadline = get_deadline(t);
115 put_record(rec);
116 }
117}
118
119/* skipped: st_assigned_data, we don't use it atm */
120
121feather_callback void do_sched_trace_task_switch_to(unsigned long id, unsigned long _task)
122{
123 struct task_struct *t = (struct task_struct*) _task;
124 struct st_event_record* rec;
125 if (is_realtime(t)) {
126 rec = get_record(ST_SWITCH_TO, t);
127 if (rec) {
128 rec->data.switch_to.when = sched_clock();
129 rec->data.switch_to.exec_time = get_exec_time(t);
130 put_record(rec);
131 }
132 }
133}
134
135feather_callback void do_sched_trace_task_switch_away(unsigned long id, unsigned long _task)
136{
137 struct task_struct *t = (struct task_struct*) _task;
138 struct st_event_record* rec;
139 if (is_realtime(t)) {
140 rec = get_record(ST_SWITCH_AWAY, t);
141 if (rec) {
142 rec->data.switch_away.when = sched_clock();
143 rec->data.switch_away.exec_time = get_exec_time(t);
144 put_record(rec);
145 }
146 }
147}
148
149feather_callback void do_sched_trace_task_completion(unsigned long id, unsigned long _task,
150 unsigned long forced)
151{
152 struct task_struct *t = (struct task_struct*) _task;
153 struct st_event_record* rec = get_record(ST_COMPLETION, t);
154 if (rec) {
155 rec->data.completion.when = sched_clock();
156 rec->data.completion.forced = forced;
157 put_record(rec);
158 }
159}
160
161feather_callback void do_sched_trace_task_block(unsigned long id, unsigned long _task)
162{
163 struct task_struct *t = (struct task_struct*) _task;
164 struct st_event_record* rec = get_record(ST_BLOCK, t);
165 if (rec) {
166 rec->data.block.when = sched_clock();
167 put_record(rec);
168 }
169}
170
171feather_callback void do_sched_trace_task_resume(unsigned long id, unsigned long _task)
172{
173 struct task_struct *t = (struct task_struct*) _task;
174 struct st_event_record* rec = get_record(ST_RESUME, t);
175 if (rec) {
176 rec->data.resume.when = sched_clock();
177 put_record(rec);
178 }
179}
diff --git a/litmus/trace.c b/litmus/trace.c
index 2ac79a2d8b..8851198330 100644
--- a/litmus/trace.c
+++ b/litmus/trace.c
@@ -1,12 +1,6 @@
1#include <linux/sched.h>
2#include <linux/fs.h>
3#include <linux/cdev.h>
4#include <asm/semaphore.h>
5#include <asm/uaccess.h>
6#include <linux/module.h> 1#include <linux/module.h>
7 2
8#include <litmus/ftdev.h> 3#include <litmus/ftdev.h>
9
10#include <litmus/litmus.h> 4#include <litmus/litmus.h>
11#include <litmus/trace.h> 5#include <litmus/trace.h>
12 6