aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_task_trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_task_trace.c')
-rw-r--r--litmus/sched_task_trace.c204
1 files changed, 204 insertions, 0 deletions
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
new file mode 100644
index 000000000000..39a543e22d41
--- /dev/null
+++ b/litmus/sched_task_trace.c
@@ -0,0 +1,204 @@
1/*
2 * sched_task_trace.c -- record scheduling events to a byte stream
3 */
4
5#define NO_TASK_TRACE_DECLS
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/percpu.h>
10
11#include <litmus/ftdev.h>
12#include <litmus/litmus.h>
13
14#include <litmus/sched_trace.h>
15#include <litmus/feather_trace.h>
16#include <litmus/ftdev.h>
17
18
19/* set MAJOR to 0 to have it dynamically assigned */
20#define FT_TASK_TRACE_MAJOR 253
21#define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */
22
23#define now() litmus_clock()
24
25struct local_buffer {
26 struct st_event_record record[NO_EVENTS];
27 char flag[NO_EVENTS];
28 struct ft_buffer ftbuf;
29};
30
31DEFINE_PER_CPU(struct local_buffer, st_event_buffer);
32
33static struct ftdev st_dev;
34
35static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
36{
37 return cpu_online(cpu) ? 0 : -ENODEV;
38}
39
40static int __init init_sched_task_trace(void)
41{
42 struct local_buffer* buf;
43 int i, ok = 0;
44 ftdev_init(&st_dev, THIS_MODULE);
45 for (i = 0; i < NR_CPUS; i++) {
46 buf = &per_cpu(st_event_buffer, i);
47 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
48 sizeof(struct st_event_record),
49 buf->flag,
50 buf->record);
51 st_dev.minor[i].buf = &buf->ftbuf;
52 }
53 if (ok == NR_CPUS) {
54 st_dev.minor_cnt = NR_CPUS;
55 st_dev.can_open = st_dev_can_open;
56 return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR);
57 } else {
58 return -EINVAL;
59 }
60}
61
62module_init(init_sched_task_trace);
63
64
65static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
66{
67 struct st_event_record* rec = NULL;
68 struct local_buffer* buf;
69
70 buf = &get_cpu_var(st_event_buffer);
71 if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
72 rec->hdr.type = type;
73 rec->hdr.cpu = smp_processor_id();
74 rec->hdr.pid = t ? t->pid : 0;
75 rec->hdr.job = t ? t->rt_param.job_params.job_no : 0;
76 } else {
77 put_cpu_var(st_event_buffer);
78 }
79 /* rec will be NULL if it failed */
80 return rec;
81}
82
83static inline void put_record(struct st_event_record* rec)
84{
85 struct local_buffer* buf;
86 buf = &__get_cpu_var(st_event_buffer);
87 ft_buffer_finish_write(&buf->ftbuf, rec);
88 put_cpu_var(st_event_buffer);
89}
90
91feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
92{
93 struct task_struct *t = (struct task_struct*) _task;
94 struct st_event_record* rec = get_record(ST_NAME, t);
95 int i;
96 if (rec) {
97 for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
98 rec->data.name.cmd[i] = t->comm[i];
99 put_record(rec);
100 }
101}
102
103feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
104{
105 struct task_struct *t = (struct task_struct*) _task;
106 struct st_event_record* rec = get_record(ST_PARAM, t);
107 if (rec) {
108 rec->data.param.wcet = get_exec_cost(t);
109 rec->data.param.period = get_rt_period(t);
110 rec->data.param.phase = get_rt_phase(t);
111 rec->data.param.partition = get_partition(t);
112 put_record(rec);
113 }
114}
115
116feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
117{
118 struct task_struct *t = (struct task_struct*) _task;
119 struct st_event_record* rec = get_record(ST_RELEASE, t);
120 if (rec) {
121 rec->data.release.release = get_release(t);
122 rec->data.release.deadline = get_deadline(t);
123 put_record(rec);
124 }
125}
126
127/* skipped: st_assigned_data, we don't use it atm */
128
129feather_callback void do_sched_trace_task_switch_to(unsigned long id,
130 unsigned long _task)
131{
132 struct task_struct *t = (struct task_struct*) _task;
133 struct st_event_record* rec;
134 if (is_realtime(t)) {
135 rec = get_record(ST_SWITCH_TO, t);
136 if (rec) {
137 rec->data.switch_to.when = now();
138 rec->data.switch_to.exec_time = get_exec_time(t);
139 put_record(rec);
140 }
141 }
142}
143
144feather_callback void do_sched_trace_task_switch_away(unsigned long id,
145 unsigned long _task)
146{
147 struct task_struct *t = (struct task_struct*) _task;
148 struct st_event_record* rec;
149 if (is_realtime(t)) {
150 rec = get_record(ST_SWITCH_AWAY, t);
151 if (rec) {
152 rec->data.switch_away.when = now();
153 rec->data.switch_away.exec_time = get_exec_time(t);
154 put_record(rec);
155 }
156 }
157}
158
159feather_callback void do_sched_trace_task_completion(unsigned long id,
160 unsigned long _task,
161 unsigned long forced)
162{
163 struct task_struct *t = (struct task_struct*) _task;
164 struct st_event_record* rec = get_record(ST_COMPLETION, t);
165 if (rec) {
166 rec->data.completion.when = now();
167 rec->data.completion.forced = forced;
168 put_record(rec);
169 }
170}
171
172feather_callback void do_sched_trace_task_block(unsigned long id,
173 unsigned long _task)
174{
175 struct task_struct *t = (struct task_struct*) _task;
176 struct st_event_record* rec = get_record(ST_BLOCK, t);
177 if (rec) {
178 rec->data.block.when = now();
179 put_record(rec);
180 }
181}
182
183feather_callback void do_sched_trace_task_resume(unsigned long id,
184 unsigned long _task)
185{
186 struct task_struct *t = (struct task_struct*) _task;
187 struct st_event_record* rec = get_record(ST_RESUME, t);
188 if (rec) {
189 rec->data.resume.when = now();
190 put_record(rec);
191 }
192}
193
194feather_callback void do_sched_trace_sys_release(unsigned long id,
195 unsigned long _start)
196{
197 lt_t *start = (lt_t*) _start;
198 struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL);
199 if (rec) {
200 rec->data.sys_release.when = now();
201 rec->data.sys_release.release = *start;
202 put_record(rec);
203 }
204}