aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_task_trace.c
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:18:47 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 06:21:18 -0400
commit1921674066c4ccb534e357c69629e365be626e0e (patch)
treed8d6bdf12b6b71223a3afdea5561adc26458f7fd /litmus/sched_task_trace.c
parent8bee6550e07cd89133cbae0c7a6f6097a2011d58 (diff)
Add schedule tracing support
This patch introduces the sched_trace infrastructure, which in principle allows tracing the generated schedule. However, this patch does not yet integrate the callbacks with the kernel.
Diffstat (limited to 'litmus/sched_task_trace.c')
-rw-r--r--litmus/sched_task_trace.c243
1 files changed, 243 insertions, 0 deletions
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
new file mode 100644
index 000000000000..4f95551df1b4
--- /dev/null
+++ b/litmus/sched_task_trace.c
@@ -0,0 +1,243 @@
1/*
2 * sched_task_trace.c -- record scheduling events to a byte stream
3 */
4
5#define NO_TASK_TRACE_DECLS
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/percpu.h>
10
11#include <litmus/ftdev.h>
12#include <litmus/litmus.h>
13
14#include <litmus/sched_trace.h>
15#include <litmus/feather_trace.h>
16#include <litmus/ftdev.h>
17
18
19#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT)
20
21#define now() litmus_clock()
22
23struct local_buffer {
24 struct st_event_record record[NO_EVENTS];
25 char flag[NO_EVENTS];
26 struct ft_buffer ftbuf;
27};
28
29DEFINE_PER_CPU(struct local_buffer, st_event_buffer);
30
31static struct ftdev st_dev;
32
33static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
34{
35 return cpu_online(cpu) ? 0 : -ENODEV;
36}
37
38static int __init init_sched_task_trace(void)
39{
40 struct local_buffer* buf;
41 int i, ok = 0, err;
42 printk("Allocated %u sched_trace_xxx() events per CPU "
43 "(buffer size: %d bytes)\n",
44 NO_EVENTS, (int) sizeof(struct local_buffer));
45
46 err = ftdev_init(&st_dev, THIS_MODULE,
47 num_online_cpus(), "sched_trace");
48 if (err)
49 goto err_out;
50
51 for (i = 0; i < st_dev.minor_cnt; i++) {
52 buf = &per_cpu(st_event_buffer, i);
53 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
54 sizeof(struct st_event_record),
55 buf->flag,
56 buf->record);
57 st_dev.minor[i].buf = &buf->ftbuf;
58 }
59 if (ok == st_dev.minor_cnt) {
60 st_dev.can_open = st_dev_can_open;
61 err = register_ftdev(&st_dev);
62 if (err)
63 goto err_dealloc;
64 } else {
65 err = -EINVAL;
66 goto err_dealloc;
67 }
68
69 return 0;
70
71err_dealloc:
72 ftdev_exit(&st_dev);
73err_out:
74 printk(KERN_WARNING "Could not register sched_trace module\n");
75 return err;
76}
77
78static void __exit exit_sched_task_trace(void)
79{
80 ftdev_exit(&st_dev);
81}
82
83module_init(init_sched_task_trace);
84module_exit(exit_sched_task_trace);
85
86
87static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
88{
89 struct st_event_record* rec = NULL;
90 struct local_buffer* buf;
91
92 buf = &get_cpu_var(st_event_buffer);
93 if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
94 rec->hdr.type = type;
95 rec->hdr.cpu = smp_processor_id();
96 rec->hdr.pid = t ? t->pid : 0;
97 rec->hdr.job = t ? t->rt_param.job_params.job_no : 0;
98 } else {
99 put_cpu_var(st_event_buffer);
100 }
101 /* rec will be NULL if it failed */
102 return rec;
103}
104
105static inline void put_record(struct st_event_record* rec)
106{
107 struct local_buffer* buf;
108 /* don't use get_cpu_var() here, get_record() did that already for us */
109 buf = this_cpu_ptr(&st_event_buffer);
110 ft_buffer_finish_write(&buf->ftbuf, rec);
111 /* matches the get_cpu_var() in get_record() */
112 put_cpu_var(st_event_buffer);
113}
114
115feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
116{
117 struct task_struct *t = (struct task_struct*) _task;
118 struct st_event_record* rec = get_record(ST_NAME, t);
119 int i;
120 if (rec) {
121 for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
122 rec->data.name.cmd[i] = t->comm[i];
123 put_record(rec);
124 }
125}
126
127feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
128{
129 struct task_struct *t = (struct task_struct*) _task;
130 struct st_event_record* rec = get_record(ST_PARAM, t);
131 if (rec) {
132 rec->data.param.wcet = get_exec_cost(t);
133 rec->data.param.period = get_rt_period(t);
134 rec->data.param.phase = get_rt_phase(t);
135 rec->data.param.partition = get_partition(t);
136 rec->data.param.class = get_class(t);
137 put_record(rec);
138 }
139}
140
141feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
142{
143 struct task_struct *t = (struct task_struct*) _task;
144 struct st_event_record* rec = get_record(ST_RELEASE, t);
145 if (rec) {
146 rec->data.release.release = get_release(t);
147 rec->data.release.deadline = get_deadline(t);
148 put_record(rec);
149 }
150}
151
152/* skipped: st_assigned_data, we don't use it atm */
153
154feather_callback void do_sched_trace_task_switch_to(unsigned long id,
155 unsigned long _task)
156{
157 struct task_struct *t = (struct task_struct*) _task;
158 struct st_event_record* rec;
159 if (is_realtime(t)) {
160 rec = get_record(ST_SWITCH_TO, t);
161 if (rec) {
162 rec->data.switch_to.when = now();
163 rec->data.switch_to.exec_time = get_exec_time(t);
164 put_record(rec);
165 }
166 }
167}
168
169feather_callback void do_sched_trace_task_switch_away(unsigned long id,
170 unsigned long _task)
171{
172 struct task_struct *t = (struct task_struct*) _task;
173 struct st_event_record* rec;
174 if (is_realtime(t)) {
175 rec = get_record(ST_SWITCH_AWAY, t);
176 if (rec) {
177 rec->data.switch_away.when = now();
178 rec->data.switch_away.exec_time = get_exec_time(t);
179 put_record(rec);
180 }
181 }
182}
183
184feather_callback void do_sched_trace_task_completion(unsigned long id,
185 unsigned long _task,
186 unsigned long forced)
187{
188 struct task_struct *t = (struct task_struct*) _task;
189 struct st_event_record* rec = get_record(ST_COMPLETION, t);
190 if (rec) {
191 rec->data.completion.when = now();
192 rec->data.completion.forced = forced;
193 put_record(rec);
194 }
195}
196
197feather_callback void do_sched_trace_task_block(unsigned long id,
198 unsigned long _task)
199{
200 struct task_struct *t = (struct task_struct*) _task;
201 struct st_event_record* rec = get_record(ST_BLOCK, t);
202 if (rec) {
203 rec->data.block.when = now();
204 put_record(rec);
205 }
206}
207
208feather_callback void do_sched_trace_task_resume(unsigned long id,
209 unsigned long _task)
210{
211 struct task_struct *t = (struct task_struct*) _task;
212 struct st_event_record* rec = get_record(ST_RESUME, t);
213 if (rec) {
214 rec->data.resume.when = now();
215 put_record(rec);
216 }
217}
218
219feather_callback void do_sched_trace_sys_release(unsigned long id,
220 unsigned long _start)
221{
222 lt_t *start = (lt_t*) _start;
223 struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL);
224 if (rec) {
225 rec->data.sys_release.when = now();
226 rec->data.sys_release.release = *start;
227 put_record(rec);
228 }
229}
230
231feather_callback void do_sched_trace_action(unsigned long id,
232 unsigned long _task,
233 unsigned long action)
234{
235 struct task_struct *t = (struct task_struct*) _task;
236 struct st_event_record* rec = get_record(ST_ACTION, t);
237
238 if (rec) {
239 rec->data.action.when = now();
240 rec->data.action.action = action;
241 put_record(rec);
242 }
243}