aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/sched_trace.h251
-rw-r--r--litmus/Kconfig34
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/sched_task_trace.c243
4 files changed, 529 insertions, 0 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
new file mode 100644
index 000000000000..6044d9f0a05a
--- /dev/null
+++ b/include/litmus/sched_trace.h
@@ -0,0 +1,251 @@
1/*
2 * sched_trace.h -- record scheduler events to a byte stream for offline analysis.
3 */
4#ifndef _LINUX_SCHED_TRACE_H_
5#define _LINUX_SCHED_TRACE_H_
6
7/* all times in nanoseconds */
8
9struct st_trace_header {
10 u8 type; /* Of what type is this record? */
11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */
14};
15
16#define ST_NAME_LEN 16
17struct st_name_data {
18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
19};
20
21struct st_param_data { /* regular params */
22 u32 wcet;
23 u32 period;
24 u32 phase;
25 u8 partition;
26 u8 class;
27 u8 __unused[2];
28};
29
30struct st_release_data { /* A job is was/is going to be released. */
31 u64 release; /* What's the release time? */
32 u64 deadline; /* By when must it finish? */
33};
34
35struct st_assigned_data { /* A job was asigned to a CPU. */
36 u64 when;
37 u8 target; /* Where should it execute? */
38 u8 __unused[7];
39};
40
41struct st_switch_to_data { /* A process was switched to on a given CPU. */
42 u64 when; /* When did this occur? */
43 u32 exec_time; /* Time the current job has executed. */
44 u8 __unused[4];
45
46};
47
48struct st_switch_away_data { /* A process was switched away from on a given CPU. */
49 u64 when;
50 u64 exec_time;
51};
52
53struct st_completion_data { /* A job completed. */
54 u64 when;
55 u8 forced:1; /* Set to 1 if job overran and kernel advanced to the
56 * next task automatically; set to 0 otherwise.
57 */
58 u8 __uflags:7;
59 u8 __unused[7];
60};
61
62struct st_block_data { /* A task blocks. */
63 u64 when;
64 u64 __unused;
65};
66
67struct st_resume_data { /* A task resumes. */
68 u64 when;
69 u64 __unused;
70};
71
72struct st_action_data {
73 u64 when;
74 u8 action;
75 u8 __unused[7];
76};
77
78struct st_sys_release_data {
79 u64 when;
80 u64 release;
81};
82
83#define DATA(x) struct st_ ## x ## _data x;
84
85typedef enum {
86 ST_NAME = 1, /* Start at one, so that we can spot
87 * uninitialized records. */
88 ST_PARAM,
89 ST_RELEASE,
90 ST_ASSIGNED,
91 ST_SWITCH_TO,
92 ST_SWITCH_AWAY,
93 ST_COMPLETION,
94 ST_BLOCK,
95 ST_RESUME,
96 ST_ACTION,
97 ST_SYS_RELEASE
98} st_event_record_type_t;
99
100struct st_event_record {
101 struct st_trace_header hdr;
102 union {
103 u64 raw[2];
104
105 DATA(name);
106 DATA(param);
107 DATA(release);
108 DATA(assigned);
109 DATA(switch_to);
110 DATA(switch_away);
111 DATA(completion);
112 DATA(block);
113 DATA(resume);
114 DATA(action);
115 DATA(sys_release);
116 } data;
117};
118
119#undef DATA
120
121#ifdef __KERNEL__
122
123#include <linux/sched.h>
124#include <litmus/feather_trace.h>
125
126#ifdef CONFIG_SCHED_TASK_TRACE
127
128#define SCHED_TRACE(id, callback, task) \
129 ft_event1(id, callback, task)
130#define SCHED_TRACE2(id, callback, task, xtra) \
131 ft_event2(id, callback, task, xtra)
132
133/* provide prototypes; needed on sparc64 */
134#ifndef NO_TASK_TRACE_DECLS
135feather_callback void do_sched_trace_task_name(unsigned long id,
136 struct task_struct* task);
137feather_callback void do_sched_trace_task_param(unsigned long id,
138 struct task_struct* task);
139feather_callback void do_sched_trace_task_release(unsigned long id,
140 struct task_struct* task);
141feather_callback void do_sched_trace_task_switch_to(unsigned long id,
142 struct task_struct* task);
143feather_callback void do_sched_trace_task_switch_away(unsigned long id,
144 struct task_struct* task);
145feather_callback void do_sched_trace_task_completion(unsigned long id,
146 struct task_struct* task,
147 unsigned long forced);
148feather_callback void do_sched_trace_task_block(unsigned long id,
149 struct task_struct* task);
150feather_callback void do_sched_trace_task_resume(unsigned long id,
151 struct task_struct* task);
152feather_callback void do_sched_trace_action(unsigned long id,
153 struct task_struct* task,
154 unsigned long action);
155feather_callback void do_sched_trace_sys_release(unsigned long id,
156 lt_t* start);
157
158#endif
159
160#else
161
162#define SCHED_TRACE(id, callback, task) /* no tracing */
163#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
164
165#endif
166
167#ifdef CONFIG_SCHED_LITMUS_TRACEPOINT
168
169#include <trace/events/litmus.h>
170
171#else
172
173/* Override trace macros to actually do nothing */
174#define trace_litmus_task_param(t)
175#define trace_litmus_task_release(t)
176#define trace_litmus_switch_to(t)
177#define trace_litmus_switch_away(prev)
178#define trace_litmus_task_completion(t, forced)
179#define trace_litmus_task_block(t)
180#define trace_litmus_task_resume(t)
181#define trace_litmus_sys_release(start)
182
183#endif
184
185
186#define SCHED_TRACE_BASE_ID 500
187
188
189#define sched_trace_task_name(t) \
190 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, \
191 do_sched_trace_task_name, t)
192
193#define sched_trace_task_param(t) \
194 do { \
195 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, \
196 do_sched_trace_task_param, t); \
197 } while (0)
198
199#define sched_trace_task_release(t) \
200 do { \
201 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \
202 do_sched_trace_task_release, t); \
203 } while (0)
204
205#define sched_trace_task_switch_to(t) \
206 do { \
207 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \
208 do_sched_trace_task_switch_to, t); \
209 } while (0)
210
211#define sched_trace_task_switch_away(t) \
212 do { \
213 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \
214 do_sched_trace_task_switch_away, t); \
215 } while (0)
216
217#define sched_trace_task_completion(t, forced) \
218 do { \
219 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, \
220 do_sched_trace_task_completion, t, \
221 (unsigned long) forced); \
222 } while (0)
223
224#define sched_trace_task_block(t) \
225 do { \
226 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \
227 do_sched_trace_task_block, t); \
228 } while (0)
229
230#define sched_trace_task_resume(t) \
231 do { \
232 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \
233 do_sched_trace_task_resume, t); \
234 } while (0)
235
236#define sched_trace_action(t, action) \
237 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \
238 do_sched_trace_action, t, (unsigned long) action);
239
240/* when is a pointer, it does not need an explicit cast to unsigned long */
241#define sched_trace_sys_release(when) \
242 do { \
243 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, \
244 do_sched_trace_sys_release, when); \
245 } while (0)
246
247#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
248
249#endif /* __KERNEL__ */
250
251#endif
diff --git a/litmus/Kconfig b/litmus/Kconfig
index e4624ee20adc..19211ac1320a 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -24,6 +24,40 @@ config FEATHER_TRACE
24 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to 24 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to
25 actually enable any events. 25 actually enable any events.
26 26
27config SCHED_TASK_TRACE
28 bool "Trace real-time tasks"
29 depends on FEATHER_TRACE
30 default y
31 help
32 Include support for the sched_trace_XXX() tracing functions. This
33 allows the collection of real-time task events such as job
34 completions, job releases, early completions, etc. This results in a
35 small overhead in the scheduling code. Disable if the overhead is not
36 acceptable (e.g., benchmarking).
37
38 Say Yes for debugging.
39 Say No for overhead tracing.
40
41config SCHED_TASK_TRACE_SHIFT
42 int "Buffer size for sched_trace_xxx() events"
43 depends on SCHED_TASK_TRACE
44 range 8 13
45 default 9
46 help
47
48 Select the buffer size of sched_trace_xxx() events as a power of two.
49 These buffers are statically allocated as per-CPU data. Each event
50 requires 24 bytes storage plus one additional flag byte. Too large
51 buffers can cause issues with the per-cpu allocator (and waste
52 memory). Too small buffers can cause scheduling events to be lost. The
53 "right" size is workload dependent and depends on the number of tasks,
54 each task's period, each task's number of suspensions, and how often
55 the buffer is flushed.
56
57 Examples: 12 => 4k events
58 10 => 1k events
59 8 => 512 events
60
27config SCHED_OVERHEAD_TRACE 61config SCHED_OVERHEAD_TRACE
28 bool "Record timestamps for overhead measurements" 62 bool "Record timestamps for overhead measurements"
29 depends on FEATHER_TRACE 63 depends on FEATHER_TRACE
diff --git a/litmus/Makefile b/litmus/Makefile
index 07f065f9748f..6318f1c6fac8 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -3,5 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
6obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
6obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o 7obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
7obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o 8obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
new file mode 100644
index 000000000000..4f95551df1b4
--- /dev/null
+++ b/litmus/sched_task_trace.c
@@ -0,0 +1,243 @@
1/*
2 * sched_task_trace.c -- record scheduling events to a byte stream
3 */
4
5#define NO_TASK_TRACE_DECLS
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/percpu.h>
10
11#include <litmus/ftdev.h>
12#include <litmus/litmus.h>
13
14#include <litmus/sched_trace.h>
15#include <litmus/feather_trace.h>
16#include <litmus/ftdev.h>
17
18
19#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT)
20
21#define now() litmus_clock()
22
23struct local_buffer {
24 struct st_event_record record[NO_EVENTS];
25 char flag[NO_EVENTS];
26 struct ft_buffer ftbuf;
27};
28
29DEFINE_PER_CPU(struct local_buffer, st_event_buffer);
30
31static struct ftdev st_dev;
32
33static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
34{
35 return cpu_online(cpu) ? 0 : -ENODEV;
36}
37
38static int __init init_sched_task_trace(void)
39{
40 struct local_buffer* buf;
41 int i, ok = 0, err;
42 printk("Allocated %u sched_trace_xxx() events per CPU "
43 "(buffer size: %d bytes)\n",
44 NO_EVENTS, (int) sizeof(struct local_buffer));
45
46 err = ftdev_init(&st_dev, THIS_MODULE,
47 num_online_cpus(), "sched_trace");
48 if (err)
49 goto err_out;
50
51 for (i = 0; i < st_dev.minor_cnt; i++) {
52 buf = &per_cpu(st_event_buffer, i);
53 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
54 sizeof(struct st_event_record),
55 buf->flag,
56 buf->record);
57 st_dev.minor[i].buf = &buf->ftbuf;
58 }
59 if (ok == st_dev.minor_cnt) {
60 st_dev.can_open = st_dev_can_open;
61 err = register_ftdev(&st_dev);
62 if (err)
63 goto err_dealloc;
64 } else {
65 err = -EINVAL;
66 goto err_dealloc;
67 }
68
69 return 0;
70
71err_dealloc:
72 ftdev_exit(&st_dev);
73err_out:
74 printk(KERN_WARNING "Could not register sched_trace module\n");
75 return err;
76}
77
78static void __exit exit_sched_task_trace(void)
79{
80 ftdev_exit(&st_dev);
81}
82
83module_init(init_sched_task_trace);
84module_exit(exit_sched_task_trace);
85
86
87static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
88{
89 struct st_event_record* rec = NULL;
90 struct local_buffer* buf;
91
92 buf = &get_cpu_var(st_event_buffer);
93 if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
94 rec->hdr.type = type;
95 rec->hdr.cpu = smp_processor_id();
96 rec->hdr.pid = t ? t->pid : 0;
97 rec->hdr.job = t ? t->rt_param.job_params.job_no : 0;
98 } else {
99 put_cpu_var(st_event_buffer);
100 }
101 /* rec will be NULL if it failed */
102 return rec;
103}
104
105static inline void put_record(struct st_event_record* rec)
106{
107 struct local_buffer* buf;
108 /* don't use get_cpu_var() here, get_record() did that already for us */
109 buf = this_cpu_ptr(&st_event_buffer);
110 ft_buffer_finish_write(&buf->ftbuf, rec);
111 /* matches the get_cpu_var() in get_record() */
112 put_cpu_var(st_event_buffer);
113}
114
115feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
116{
117 struct task_struct *t = (struct task_struct*) _task;
118 struct st_event_record* rec = get_record(ST_NAME, t);
119 int i;
120 if (rec) {
121 for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
122 rec->data.name.cmd[i] = t->comm[i];
123 put_record(rec);
124 }
125}
126
127feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
128{
129 struct task_struct *t = (struct task_struct*) _task;
130 struct st_event_record* rec = get_record(ST_PARAM, t);
131 if (rec) {
132 rec->data.param.wcet = get_exec_cost(t);
133 rec->data.param.period = get_rt_period(t);
134 rec->data.param.phase = get_rt_phase(t);
135 rec->data.param.partition = get_partition(t);
136 rec->data.param.class = get_class(t);
137 put_record(rec);
138 }
139}
140
141feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
142{
143 struct task_struct *t = (struct task_struct*) _task;
144 struct st_event_record* rec = get_record(ST_RELEASE, t);
145 if (rec) {
146 rec->data.release.release = get_release(t);
147 rec->data.release.deadline = get_deadline(t);
148 put_record(rec);
149 }
150}
151
152/* skipped: st_assigned_data, we don't use it atm */
153
154feather_callback void do_sched_trace_task_switch_to(unsigned long id,
155 unsigned long _task)
156{
157 struct task_struct *t = (struct task_struct*) _task;
158 struct st_event_record* rec;
159 if (is_realtime(t)) {
160 rec = get_record(ST_SWITCH_TO, t);
161 if (rec) {
162 rec->data.switch_to.when = now();
163 rec->data.switch_to.exec_time = get_exec_time(t);
164 put_record(rec);
165 }
166 }
167}
168
169feather_callback void do_sched_trace_task_switch_away(unsigned long id,
170 unsigned long _task)
171{
172 struct task_struct *t = (struct task_struct*) _task;
173 struct st_event_record* rec;
174 if (is_realtime(t)) {
175 rec = get_record(ST_SWITCH_AWAY, t);
176 if (rec) {
177 rec->data.switch_away.when = now();
178 rec->data.switch_away.exec_time = get_exec_time(t);
179 put_record(rec);
180 }
181 }
182}
183
184feather_callback void do_sched_trace_task_completion(unsigned long id,
185 unsigned long _task,
186 unsigned long forced)
187{
188 struct task_struct *t = (struct task_struct*) _task;
189 struct st_event_record* rec = get_record(ST_COMPLETION, t);
190 if (rec) {
191 rec->data.completion.when = now();
192 rec->data.completion.forced = forced;
193 put_record(rec);
194 }
195}
196
197feather_callback void do_sched_trace_task_block(unsigned long id,
198 unsigned long _task)
199{
200 struct task_struct *t = (struct task_struct*) _task;
201 struct st_event_record* rec = get_record(ST_BLOCK, t);
202 if (rec) {
203 rec->data.block.when = now();
204 put_record(rec);
205 }
206}
207
208feather_callback void do_sched_trace_task_resume(unsigned long id,
209 unsigned long _task)
210{
211 struct task_struct *t = (struct task_struct*) _task;
212 struct st_event_record* rec = get_record(ST_RESUME, t);
213 if (rec) {
214 rec->data.resume.when = now();
215 put_record(rec);
216 }
217}
218
219feather_callback void do_sched_trace_sys_release(unsigned long id,
220 unsigned long _start)
221{
222 lt_t *start = (lt_t*) _start;
223 struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL);
224 if (rec) {
225 rec->data.sys_release.when = now();
226 rec->data.sys_release.release = *start;
227 put_record(rec);
228 }
229}
230
231feather_callback void do_sched_trace_action(unsigned long id,
232 unsigned long _task,
233 unsigned long action)
234{
235 struct task_struct *t = (struct task_struct*) _task;
236 struct st_event_record* rec = get_record(ST_ACTION, t);
237
238 if (rec) {
239 rec->data.action.when = now();
240 rec->data.action.action = action;
241 put_record(rec);
242 }
243}