aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_task_trace.c
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
commitb1e1fea67bca3796d5f9133a92c300ec4fa93a4f (patch)
tree5cc1336e1fe1d6f93b1067e73e43381dd20db690 /litmus/sched_task_trace.c
parentf6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff)
Bjoern's Dissertation Code with Priority Donationwip-splitting-omlp-jerickso
Diffstat (limited to 'litmus/sched_task_trace.c')
-rw-r--r--litmus/sched_task_trace.c241
1 files changed, 241 insertions, 0 deletions
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
new file mode 100644
index 000000000000..5ef8d09ab41f
--- /dev/null
+++ b/litmus/sched_task_trace.c
@@ -0,0 +1,241 @@
1/*
2 * sched_task_trace.c -- record scheduling events to a byte stream
3 */
4
5#define NO_TASK_TRACE_DECLS
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/percpu.h>
10
11#include <litmus/ftdev.h>
12#include <litmus/litmus.h>
13
14#include <litmus/sched_trace.h>
15#include <litmus/feather_trace.h>
16#include <litmus/ftdev.h>
17
18
19#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT)
20
21#define now() litmus_clock()
22
23struct local_buffer {
24 struct st_event_record record[NO_EVENTS];
25 char flag[NO_EVENTS];
26 struct ft_buffer ftbuf;
27};
28
29DEFINE_PER_CPU(struct local_buffer, st_event_buffer);
30
31static struct ftdev st_dev;
32
33static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
34{
35 return cpu_online(cpu) ? 0 : -ENODEV;
36}
37
38static int __init init_sched_task_trace(void)
39{
40 struct local_buffer* buf;
41 int i, ok = 0, err;
42 printk("Allocated %u sched_trace_xxx() events per CPU "
43 "(buffer size: %d bytes)\n",
44 NO_EVENTS, (int) sizeof(struct local_buffer));
45
46 err = ftdev_init(&st_dev, THIS_MODULE,
47 num_online_cpus(), "sched_trace");
48 if (err)
49 goto err_out;
50
51 for (i = 0; i < st_dev.minor_cnt; i++) {
52 buf = &per_cpu(st_event_buffer, i);
53 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
54 sizeof(struct st_event_record),
55 buf->flag,
56 buf->record);
57 st_dev.minor[i].buf = &buf->ftbuf;
58 }
59 if (ok == st_dev.minor_cnt) {
60 st_dev.can_open = st_dev_can_open;
61 err = register_ftdev(&st_dev);
62 if (err)
63 goto err_dealloc;
64 } else {
65 err = -EINVAL;
66 goto err_dealloc;
67 }
68
69 return 0;
70
71err_dealloc:
72 ftdev_exit(&st_dev);
73err_out:
74 printk(KERN_WARNING "Could not register sched_trace module\n");
75 return err;
76}
77
78static void __exit exit_sched_task_trace(void)
79{
80 ftdev_exit(&st_dev);
81}
82
83module_init(init_sched_task_trace);
84module_exit(exit_sched_task_trace);
85
86
87static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
88{
89 struct st_event_record* rec = NULL;
90 struct local_buffer* buf;
91
92 buf = &get_cpu_var(st_event_buffer);
93 if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
94 rec->hdr.type = type;
95 rec->hdr.cpu = smp_processor_id();
96 rec->hdr.pid = t ? t->pid : 0;
97 rec->hdr.job = t ? t->rt_param.job_params.job_no : 0;
98 } else {
99 put_cpu_var(st_event_buffer);
100 }
101 /* rec will be NULL if it failed */
102 return rec;
103}
104
105static inline void put_record(struct st_event_record* rec)
106{
107 struct local_buffer* buf;
108 buf = &__get_cpu_var(st_event_buffer);
109 ft_buffer_finish_write(&buf->ftbuf, rec);
110 put_cpu_var(st_event_buffer);
111}
112
113feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
114{
115 struct task_struct *t = (struct task_struct*) _task;
116 struct st_event_record* rec = get_record(ST_NAME, t);
117 int i;
118 if (rec) {
119 for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
120 rec->data.name.cmd[i] = t->comm[i];
121 put_record(rec);
122 }
123}
124
125feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
126{
127 struct task_struct *t = (struct task_struct*) _task;
128 struct st_event_record* rec = get_record(ST_PARAM, t);
129 if (rec) {
130 rec->data.param.wcet = get_exec_cost(t);
131 rec->data.param.period = get_rt_period(t);
132 rec->data.param.phase = get_rt_phase(t);
133 rec->data.param.partition = get_partition(t);
134 rec->data.param.class = get_class(t);
135 put_record(rec);
136 }
137}
138
139feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
140{
141 struct task_struct *t = (struct task_struct*) _task;
142 struct st_event_record* rec = get_record(ST_RELEASE, t);
143 if (rec) {
144 rec->data.release.release = get_release(t);
145 rec->data.release.deadline = get_deadline(t);
146 put_record(rec);
147 }
148}
149
150/* skipped: st_assigned_data, we don't use it atm */
151
152feather_callback void do_sched_trace_task_switch_to(unsigned long id,
153 unsigned long _task)
154{
155 struct task_struct *t = (struct task_struct*) _task;
156 struct st_event_record* rec;
157 if (is_realtime(t)) {
158 rec = get_record(ST_SWITCH_TO, t);
159 if (rec) {
160 rec->data.switch_to.when = now();
161 rec->data.switch_to.exec_time = get_exec_time(t);
162 put_record(rec);
163 }
164 }
165}
166
167feather_callback void do_sched_trace_task_switch_away(unsigned long id,
168 unsigned long _task)
169{
170 struct task_struct *t = (struct task_struct*) _task;
171 struct st_event_record* rec;
172 if (is_realtime(t)) {
173 rec = get_record(ST_SWITCH_AWAY, t);
174 if (rec) {
175 rec->data.switch_away.when = now();
176 rec->data.switch_away.exec_time = get_exec_time(t);
177 put_record(rec);
178 }
179 }
180}
181
182feather_callback void do_sched_trace_task_completion(unsigned long id,
183 unsigned long _task,
184 unsigned long forced)
185{
186 struct task_struct *t = (struct task_struct*) _task;
187 struct st_event_record* rec = get_record(ST_COMPLETION, t);
188 if (rec) {
189 rec->data.completion.when = now();
190 rec->data.completion.forced = forced;
191 put_record(rec);
192 }
193}
194
195feather_callback void do_sched_trace_task_block(unsigned long id,
196 unsigned long _task)
197{
198 struct task_struct *t = (struct task_struct*) _task;
199 struct st_event_record* rec = get_record(ST_BLOCK, t);
200 if (rec) {
201 rec->data.block.when = now();
202 put_record(rec);
203 }
204}
205
206feather_callback void do_sched_trace_task_resume(unsigned long id,
207 unsigned long _task)
208{
209 struct task_struct *t = (struct task_struct*) _task;
210 struct st_event_record* rec = get_record(ST_RESUME, t);
211 if (rec) {
212 rec->data.resume.when = now();
213 put_record(rec);
214 }
215}
216
217feather_callback void do_sched_trace_sys_release(unsigned long id,
218 unsigned long _start)
219{
220 lt_t *start = (lt_t*) _start;
221 struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL);
222 if (rec) {
223 rec->data.sys_release.when = now();
224 rec->data.sys_release.release = *start;
225 put_record(rec);
226 }
227}
228
229feather_callback void do_sched_trace_action(unsigned long id,
230 unsigned long _task,
231 unsigned long action)
232{
233 struct task_struct *t = (struct task_struct*) _task;
234 struct st_event_record* rec = get_record(ST_ACTION, t);
235
236 if (rec) {
237 rec->data.action.when = now();
238 rec->data.action.action = action;
239 put_record(rec);
240 }
241}