aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorFelipe Cerqueira <felipec@mpi-sws.org>2013-02-12 07:45:47 -0500
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-08-07 03:46:46 -0400
commit755f7bf22de30de260264c2f0b04c783952618f1 (patch)
treecdf44fe8eeb688c23fe2dd432dd113fa51a5c791 /litmus
parentf88455305fc289bc2afe1f9c80598ac92f6363b9 (diff)
Add schedule tracing support
This patch introduces the sched_trace infrastructure, which in principle allows tracing the generated schedule. However, this patch does not yet integrate the callbacks with the kernel.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Kconfig34
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/sched_task_trace.c241
3 files changed, 276 insertions, 0 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig
index e4624ee20adc..19211ac1320a 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -24,6 +24,40 @@ config FEATHER_TRACE
24 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to 24 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to
25 actually enable any events. 25 actually enable any events.
26 26
27config SCHED_TASK_TRACE
28 bool "Trace real-time tasks"
29 depends on FEATHER_TRACE
30 default y
31 help
32 Include support for the sched_trace_XXX() tracing functions. This
33 allows the collection of real-time task events such as job
34 completions, job releases, early completions, etc. This results in a
35 small overhead in the scheduling code. Disable if the overhead is not
36 acceptable (e.g., benchmarking).
37
38 Say Yes for debugging.
39 Say No for overhead tracing.
40
41config SCHED_TASK_TRACE_SHIFT
42 int "Buffer size for sched_trace_xxx() events"
43 depends on SCHED_TASK_TRACE
44 range 8 13
45 default 9
46 help
47
48 Select the buffer size of sched_trace_xxx() events as a power of two.
49 These buffers are statically allocated as per-CPU data. Each event
50 requires 24 bytes storage plus one additional flag byte. Too large
51 buffers can cause issues with the per-cpu allocator (and waste
52 memory). Too small buffers can cause scheduling events to be lost. The
53 "right" size is workload dependent and depends on the number of tasks,
54 each task's period, each task's number of suspensions, and how often
55 the buffer is flushed.
56
57 Examples: 12 => 4k events
58 10 => 1k events
59 8 => 512 events
60
27config SCHED_OVERHEAD_TRACE 61config SCHED_OVERHEAD_TRACE
28 bool "Record timestamps for overhead measurements" 62 bool "Record timestamps for overhead measurements"
29 depends on FEATHER_TRACE 63 depends on FEATHER_TRACE
diff --git a/litmus/Makefile b/litmus/Makefile
index 07f065f9748f..6318f1c6fac8 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -3,5 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
6obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
6obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o 7obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
7obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o 8obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
new file mode 100644
index 000000000000..5ef8d09ab41f
--- /dev/null
+++ b/litmus/sched_task_trace.c
@@ -0,0 +1,241 @@
1/*
2 * sched_task_trace.c -- record scheduling events to a byte stream
3 */
4
5#define NO_TASK_TRACE_DECLS
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/percpu.h>
10
11#include <litmus/ftdev.h>
12#include <litmus/litmus.h>
13
14#include <litmus/sched_trace.h>
15#include <litmus/feather_trace.h>
16#include <litmus/ftdev.h>
17
18
19#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT)
20
21#define now() litmus_clock()
22
23struct local_buffer {
24 struct st_event_record record[NO_EVENTS];
25 char flag[NO_EVENTS];
26 struct ft_buffer ftbuf;
27};
28
29DEFINE_PER_CPU(struct local_buffer, st_event_buffer);
30
31static struct ftdev st_dev;
32
33static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
34{
35 return cpu_online(cpu) ? 0 : -ENODEV;
36}
37
38static int __init init_sched_task_trace(void)
39{
40 struct local_buffer* buf;
41 int i, ok = 0, err;
42 printk("Allocated %u sched_trace_xxx() events per CPU "
43 "(buffer size: %d bytes)\n",
44 NO_EVENTS, (int) sizeof(struct local_buffer));
45
46 err = ftdev_init(&st_dev, THIS_MODULE,
47 num_online_cpus(), "sched_trace");
48 if (err)
49 goto err_out;
50
51 for (i = 0; i < st_dev.minor_cnt; i++) {
52 buf = &per_cpu(st_event_buffer, i);
53 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
54 sizeof(struct st_event_record),
55 buf->flag,
56 buf->record);
57 st_dev.minor[i].buf = &buf->ftbuf;
58 }
59 if (ok == st_dev.minor_cnt) {
60 st_dev.can_open = st_dev_can_open;
61 err = register_ftdev(&st_dev);
62 if (err)
63 goto err_dealloc;
64 } else {
65 err = -EINVAL;
66 goto err_dealloc;
67 }
68
69 return 0;
70
71err_dealloc:
72 ftdev_exit(&st_dev);
73err_out:
74 printk(KERN_WARNING "Could not register sched_trace module\n");
75 return err;
76}
77
78static void __exit exit_sched_task_trace(void)
79{
80 ftdev_exit(&st_dev);
81}
82
83module_init(init_sched_task_trace);
84module_exit(exit_sched_task_trace);
85
86
87static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
88{
89 struct st_event_record* rec = NULL;
90 struct local_buffer* buf;
91
92 buf = &get_cpu_var(st_event_buffer);
93 if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
94 rec->hdr.type = type;
95 rec->hdr.cpu = smp_processor_id();
96 rec->hdr.pid = t ? t->pid : 0;
97 rec->hdr.job = t ? t->rt_param.job_params.job_no : 0;
98 } else {
99 put_cpu_var(st_event_buffer);
100 }
101 /* rec will be NULL if it failed */
102 return rec;
103}
104
105static inline void put_record(struct st_event_record* rec)
106{
107 struct local_buffer* buf;
108 buf = &__get_cpu_var(st_event_buffer);
109 ft_buffer_finish_write(&buf->ftbuf, rec);
110 put_cpu_var(st_event_buffer);
111}
112
113feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
114{
115 struct task_struct *t = (struct task_struct*) _task;
116 struct st_event_record* rec = get_record(ST_NAME, t);
117 int i;
118 if (rec) {
119 for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
120 rec->data.name.cmd[i] = t->comm[i];
121 put_record(rec);
122 }
123}
124
125feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
126{
127 struct task_struct *t = (struct task_struct*) _task;
128 struct st_event_record* rec = get_record(ST_PARAM, t);
129 if (rec) {
130 rec->data.param.wcet = get_exec_cost(t);
131 rec->data.param.period = get_rt_period(t);
132 rec->data.param.phase = get_rt_phase(t);
133 rec->data.param.partition = get_partition(t);
134 rec->data.param.class = get_class(t);
135 put_record(rec);
136 }
137}
138
139feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
140{
141 struct task_struct *t = (struct task_struct*) _task;
142 struct st_event_record* rec = get_record(ST_RELEASE, t);
143 if (rec) {
144 rec->data.release.release = get_release(t);
145 rec->data.release.deadline = get_deadline(t);
146 put_record(rec);
147 }
148}
149
150/* skipped: st_assigned_data, we don't use it atm */
151
152feather_callback void do_sched_trace_task_switch_to(unsigned long id,
153 unsigned long _task)
154{
155 struct task_struct *t = (struct task_struct*) _task;
156 struct st_event_record* rec;
157 if (is_realtime(t)) {
158 rec = get_record(ST_SWITCH_TO, t);
159 if (rec) {
160 rec->data.switch_to.when = now();
161 rec->data.switch_to.exec_time = get_exec_time(t);
162 put_record(rec);
163 }
164 }
165}
166
167feather_callback void do_sched_trace_task_switch_away(unsigned long id,
168 unsigned long _task)
169{
170 struct task_struct *t = (struct task_struct*) _task;
171 struct st_event_record* rec;
172 if (is_realtime(t)) {
173 rec = get_record(ST_SWITCH_AWAY, t);
174 if (rec) {
175 rec->data.switch_away.when = now();
176 rec->data.switch_away.exec_time = get_exec_time(t);
177 put_record(rec);
178 }
179 }
180}
181
182feather_callback void do_sched_trace_task_completion(unsigned long id,
183 unsigned long _task,
184 unsigned long forced)
185{
186 struct task_struct *t = (struct task_struct*) _task;
187 struct st_event_record* rec = get_record(ST_COMPLETION, t);
188 if (rec) {
189 rec->data.completion.when = now();
190 rec->data.completion.forced = forced;
191 put_record(rec);
192 }
193}
194
195feather_callback void do_sched_trace_task_block(unsigned long id,
196 unsigned long _task)
197{
198 struct task_struct *t = (struct task_struct*) _task;
199 struct st_event_record* rec = get_record(ST_BLOCK, t);
200 if (rec) {
201 rec->data.block.when = now();
202 put_record(rec);
203 }
204}
205
206feather_callback void do_sched_trace_task_resume(unsigned long id,
207 unsigned long _task)
208{
209 struct task_struct *t = (struct task_struct*) _task;
210 struct st_event_record* rec = get_record(ST_RESUME, t);
211 if (rec) {
212 rec->data.resume.when = now();
213 put_record(rec);
214 }
215}
216
217feather_callback void do_sched_trace_sys_release(unsigned long id,
218 unsigned long _start)
219{
220 lt_t *start = (lt_t*) _start;
221 struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL);
222 if (rec) {
223 rec->data.sys_release.when = now();
224 rec->data.sys_release.release = *start;
225 put_record(rec);
226 }
227}
228
229feather_callback void do_sched_trace_action(unsigned long id,
230 unsigned long _task,
231 unsigned long action)
232{
233 struct task_struct *t = (struct task_struct*) _task;
234 struct st_event_record* rec = get_record(ST_ACTION, t);
235
236 if (rec) {
237 rec->data.action.when = now();
238 rec->data.action.action = action;
239 put_record(rec);
240 }
241}