aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:18:47 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2017-05-26 17:12:26 -0400
commitb556571e81005147c0b72112b386e6820dcf30be (patch)
tree5f47e78f48bd9a718ecb7cca205ee0be38fbf024
parentabe0ce1ddd3cf6fc3300eb67a8f07089c6083a45 (diff)
Add schedule tracing support
This patch introduces the sched_trace infrastructure, which in principle allows tracing the generated schedule. However, this patch does not yet integrate the callbacks with the kernel. sched_trace: record exec_time in ST_COMPLETION records sched_trace: add sched_trace_last_suspension_as_completion() New tracepoint for tracing the completion of sporadic jobs.
-rw-r--r--include/litmus/sched_trace.h259
-rw-r--r--litmus/Kconfig34
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/sched_task_trace.c259
4 files changed, 553 insertions, 0 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
new file mode 100644
index 000000000000..589c63a142b5
--- /dev/null
+++ b/include/litmus/sched_trace.h
@@ -0,0 +1,259 @@
1/*
2 * sched_trace.h -- record scheduler events to a byte stream for offline analysis.
3 */
4#ifndef _LINUX_SCHED_TRACE_H_
5#define _LINUX_SCHED_TRACE_H_
6
7/* all times in nanoseconds */
8
9struct st_trace_header {
10 u8 type; /* Of what type is this record? */
11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */
14};
15
16#define ST_NAME_LEN 16
17struct st_name_data {
18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
19};
20
21struct st_param_data { /* regular params */
22 u32 wcet;
23 u32 period;
24 u32 phase;
25 u8 partition;
26 u8 class;
27 u8 __unused[2];
28};
29
30struct st_release_data { /* A job is was/is going to be released. */
31 u64 release; /* What's the release time? */
32 u64 deadline; /* By when must it finish? */
33};
34
35struct st_assigned_data { /* A job was asigned to a CPU. */
36 u64 when;
37 u8 target; /* Where should it execute? */
38 u8 __unused[7];
39};
40
41struct st_switch_to_data { /* A process was switched to on a given CPU. */
42 u64 when; /* When did this occur? */
43 u32 exec_time; /* Time the current job has executed. */
44 u8 __unused[4];
45
46};
47
48struct st_switch_away_data { /* A process was switched away from on a given CPU. */
49 u64 when;
50 u64 exec_time;
51};
52
53struct st_completion_data { /* A job completed. */
54 u64 when;
55 u64 forced:1; /* Set to 1 if job overran and kernel advanced to the
56 * next task automatically; set to 0 otherwise.
57 */
58 u64 exec_time:63; /* Actual execution time of job. */
59};
60
61struct st_block_data { /* A task blocks. */
62 u64 when;
63 u64 __unused;
64};
65
66struct st_resume_data { /* A task resumes. */
67 u64 when;
68 u64 __unused;
69};
70
71struct st_action_data {
72 u64 when;
73 u8 action;
74 u8 __unused[7];
75};
76
77struct st_sys_release_data {
78 u64 when;
79 u64 release;
80};
81
82#define DATA(x) struct st_ ## x ## _data x;
83
84typedef enum {
85 ST_NAME = 1, /* Start at one, so that we can spot
86 * uninitialized records. */
87 ST_PARAM,
88 ST_RELEASE,
89 ST_ASSIGNED,
90 ST_SWITCH_TO,
91 ST_SWITCH_AWAY,
92 ST_COMPLETION,
93 ST_BLOCK,
94 ST_RESUME,
95 ST_ACTION,
96 ST_SYS_RELEASE
97} st_event_record_type_t;
98
99struct st_event_record {
100 struct st_trace_header hdr;
101 union {
102 u64 raw[2];
103
104 DATA(name);
105 DATA(param);
106 DATA(release);
107 DATA(assigned);
108 DATA(switch_to);
109 DATA(switch_away);
110 DATA(completion);
111 DATA(block);
112 DATA(resume);
113 DATA(action);
114 DATA(sys_release);
115 } data;
116};
117
118#undef DATA
119
120#ifdef __KERNEL__
121
122#include <linux/sched.h>
123#include <litmus/feather_trace.h>
124
125#ifdef CONFIG_SCHED_TASK_TRACE
126
127#define SCHED_TRACE(id, callback, task) \
128 ft_event1(id, callback, task)
129#define SCHED_TRACE2(id, callback, task, xtra) \
130 ft_event2(id, callback, task, xtra)
131
132/* provide prototypes; needed on sparc64 */
133#ifndef NO_TASK_TRACE_DECLS
134feather_callback void do_sched_trace_task_name(unsigned long id,
135 struct task_struct* task);
136feather_callback void do_sched_trace_task_param(unsigned long id,
137 struct task_struct* task);
138feather_callback void do_sched_trace_task_release(unsigned long id,
139 struct task_struct* task);
140feather_callback void do_sched_trace_task_switch_to(unsigned long id,
141 struct task_struct* task);
142feather_callback void do_sched_trace_task_switch_away(unsigned long id,
143 struct task_struct* task);
144feather_callback void do_sched_trace_task_completion(unsigned long id,
145 struct task_struct* task,
146 unsigned long forced);
147feather_callback void do_sched_trace_last_suspension_as_completion(
148 unsigned long id,
149 struct task_struct* task);
150feather_callback void do_sched_trace_task_block(unsigned long id,
151 struct task_struct* task);
152feather_callback void do_sched_trace_task_resume(unsigned long id,
153 struct task_struct* task);
154feather_callback void do_sched_trace_action(unsigned long id,
155 struct task_struct* task,
156 unsigned long action);
157feather_callback void do_sched_trace_sys_release(unsigned long id,
158 lt_t* start);
159
160#endif
161
162#else
163
164#define SCHED_TRACE(id, callback, task) /* no tracing */
165#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
166
167#endif
168
169#ifdef CONFIG_SCHED_LITMUS_TRACEPOINT
170
171#include <trace/events/litmus.h>
172
173#else
174
175/* Override trace macros to actually do nothing */
176#define trace_litmus_task_param(t)
177#define trace_litmus_task_release(t)
178#define trace_litmus_switch_to(t)
179#define trace_litmus_switch_away(prev)
180#define trace_litmus_task_completion(t, forced)
181#define trace_litmus_task_block(t)
182#define trace_litmus_task_resume(t)
183#define trace_litmus_sys_release(start)
184
185#endif
186
187
188#define SCHED_TRACE_BASE_ID 500
189
190
191#define sched_trace_task_name(t) \
192 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, \
193 do_sched_trace_task_name, t)
194
195#define sched_trace_task_param(t) \
196 do { \
197 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, \
198 do_sched_trace_task_param, t); \
199 } while (0)
200
201#define sched_trace_task_release(t) \
202 do { \
203 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \
204 do_sched_trace_task_release, t); \
205 } while (0)
206
207#define sched_trace_task_switch_to(t) \
208 do { \
209 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \
210 do_sched_trace_task_switch_to, t); \
211 } while (0)
212
213#define sched_trace_task_switch_away(t) \
214 do { \
215 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \
216 do_sched_trace_task_switch_away, t); \
217 } while (0)
218
219#define sched_trace_task_completion(t, forced) \
220 do { \
221 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, \
222 do_sched_trace_task_completion, t, \
223 (unsigned long) forced); \
224 } while (0)
225
226#define sched_trace_task_block(t) \
227 do { \
228 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \
229 do_sched_trace_task_block, t); \
230 } while (0)
231
232#define sched_trace_task_resume(t) \
233 do { \
234 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \
235 do_sched_trace_task_resume, t); \
236 } while (0)
237
238#define sched_trace_action(t, action) \
239 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \
240 do_sched_trace_action, t, (unsigned long) action);
241
242/* when is a pointer, it does not need an explicit cast to unsigned long */
243#define sched_trace_sys_release(when) \
244 do { \
245 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, \
246 do_sched_trace_sys_release, when); \
247 } while (0)
248
249#define sched_trace_last_suspension_as_completion(t) \
250 do { \
251 SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, \
252 do_sched_trace_last_suspension_as_completion, t); \
253 } while (0)
254
255#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
256
257#endif /* __KERNEL__ */
258
259#endif
diff --git a/litmus/Kconfig b/litmus/Kconfig
index e4624ee20adc..19211ac1320a 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -24,6 +24,40 @@ config FEATHER_TRACE
24 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to 24 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to
25 actually enable any events. 25 actually enable any events.
26 26
27config SCHED_TASK_TRACE
28 bool "Trace real-time tasks"
29 depends on FEATHER_TRACE
30 default y
31 help
32 Include support for the sched_trace_XXX() tracing functions. This
33 allows the collection of real-time task events such as job
34 completions, job releases, early completions, etc. This results in a
35 small overhead in the scheduling code. Disable if the overhead is not
36 acceptable (e.g., benchmarking).
37
38 Say Yes for debugging.
39 Say No for overhead tracing.
40
41config SCHED_TASK_TRACE_SHIFT
42 int "Buffer size for sched_trace_xxx() events"
43 depends on SCHED_TASK_TRACE
44 range 8 13
45 default 9
46 help
47
48 Select the buffer size of sched_trace_xxx() events as a power of two.
49 These buffers are statically allocated as per-CPU data. Each event
50 requires 24 bytes storage plus one additional flag byte. Too large
51 buffers can cause issues with the per-cpu allocator (and waste
52 memory). Too small buffers can cause scheduling events to be lost. The
53 "right" size is workload dependent and depends on the number of tasks,
54 each task's period, each task's number of suspensions, and how often
55 the buffer is flushed.
56
57 Examples: 12 => 4k events
58 10 => 1k events
59 8 => 512 events
60
27config SCHED_OVERHEAD_TRACE 61config SCHED_OVERHEAD_TRACE
28 bool "Record timestamps for overhead measurements" 62 bool "Record timestamps for overhead measurements"
29 depends on FEATHER_TRACE 63 depends on FEATHER_TRACE
diff --git a/litmus/Makefile b/litmus/Makefile
index 07f065f9748f..6318f1c6fac8 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -3,5 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
6obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
6obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o 7obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
7obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o 8obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
new file mode 100644
index 000000000000..cfbc1c77383a
--- /dev/null
+++ b/litmus/sched_task_trace.c
@@ -0,0 +1,259 @@
1/*
2 * sched_task_trace.c -- record scheduling events to a byte stream
3 */
4
5#define NO_TASK_TRACE_DECLS
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/percpu.h>
10
11#include <litmus/ftdev.h>
12#include <litmus/litmus.h>
13
14#include <litmus/sched_trace.h>
15#include <litmus/feather_trace.h>
16#include <litmus/ftdev.h>
17
18
19#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT)
20
21#define now() litmus_clock()
22
23struct local_buffer {
24 struct st_event_record record[NO_EVENTS];
25 char flag[NO_EVENTS];
26 struct ft_buffer ftbuf;
27};
28
29DEFINE_PER_CPU(struct local_buffer, st_event_buffer);
30
31static struct ftdev st_dev;
32
33static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
34{
35 return cpu_online(cpu) ? 0 : -ENODEV;
36}
37
38static int __init init_sched_task_trace(void)
39{
40 struct local_buffer* buf;
41 int i, ok = 0, err;
42 printk("Allocated %u sched_trace_xxx() events per CPU "
43 "(buffer size: %d bytes)\n",
44 NO_EVENTS, (int) sizeof(struct local_buffer));
45
46 err = ftdev_init(&st_dev, THIS_MODULE,
47 num_online_cpus(), "sched_trace");
48 if (err)
49 goto err_out;
50
51 for (i = 0; i < st_dev.minor_cnt; i++) {
52 buf = &per_cpu(st_event_buffer, i);
53 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
54 sizeof(struct st_event_record),
55 buf->flag,
56 buf->record);
57 st_dev.minor[i].buf = &buf->ftbuf;
58 }
59 if (ok == st_dev.minor_cnt) {
60 st_dev.can_open = st_dev_can_open;
61 err = register_ftdev(&st_dev);
62 if (err)
63 goto err_dealloc;
64 } else {
65 err = -EINVAL;
66 goto err_dealloc;
67 }
68
69 return 0;
70
71err_dealloc:
72 ftdev_exit(&st_dev);
73err_out:
74 printk(KERN_WARNING "Could not register sched_trace module\n");
75 return err;
76}
77
78static void __exit exit_sched_task_trace(void)
79{
80 ftdev_exit(&st_dev);
81}
82
83module_init(init_sched_task_trace);
84module_exit(exit_sched_task_trace);
85
86
87static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
88{
89 struct st_event_record* rec = NULL;
90 struct local_buffer* buf;
91
92 buf = &get_cpu_var(st_event_buffer);
93 if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
94 rec->hdr.type = type;
95 rec->hdr.cpu = smp_processor_id();
96 rec->hdr.pid = t ? t->pid : 0;
97 rec->hdr.job = t ? t->rt_param.job_params.job_no : 0;
98 } else {
99 put_cpu_var(st_event_buffer);
100 }
101 /* rec will be NULL if it failed */
102 return rec;
103}
104
105static inline void put_record(struct st_event_record* rec)
106{
107 struct local_buffer* buf;
108 /* don't use get_cpu_var() here, get_record() did that already for us */
109 buf = this_cpu_ptr(&st_event_buffer);
110 ft_buffer_finish_write(&buf->ftbuf, rec);
111 /* matches the get_cpu_var() in get_record() */
112 put_cpu_var(st_event_buffer);
113}
114
115feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
116{
117 struct task_struct *t = (struct task_struct*) _task;
118 struct st_event_record* rec = get_record(ST_NAME, t);
119 int i;
120 if (rec) {
121 for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
122 rec->data.name.cmd[i] = t->comm[i];
123 put_record(rec);
124 }
125}
126
127feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
128{
129 struct task_struct *t = (struct task_struct*) _task;
130 struct st_event_record* rec = get_record(ST_PARAM, t);
131 if (rec) {
132 rec->data.param.wcet = get_exec_cost(t);
133 rec->data.param.period = get_rt_period(t);
134 rec->data.param.phase = get_rt_phase(t);
135 rec->data.param.partition = get_partition(t);
136 rec->data.param.class = get_class(t);
137 put_record(rec);
138 }
139}
140
141feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
142{
143 struct task_struct *t = (struct task_struct*) _task;
144 struct st_event_record* rec = get_record(ST_RELEASE, t);
145 if (rec) {
146 rec->data.release.release = get_release(t);
147 rec->data.release.deadline = get_deadline(t);
148 put_record(rec);
149 }
150}
151
152/* skipped: st_assigned_data, we don't use it atm */
153
154feather_callback void do_sched_trace_task_switch_to(unsigned long id,
155 unsigned long _task)
156{
157 struct task_struct *t = (struct task_struct*) _task;
158 struct st_event_record* rec;
159 if (is_realtime(t)) {
160 rec = get_record(ST_SWITCH_TO, t);
161 if (rec) {
162 rec->data.switch_to.when = now();
163 rec->data.switch_to.exec_time = get_exec_time(t);
164 put_record(rec);
165 }
166 }
167}
168
169feather_callback void do_sched_trace_task_switch_away(unsigned long id,
170 unsigned long _task)
171{
172 struct task_struct *t = (struct task_struct*) _task;
173 struct st_event_record* rec;
174 if (is_realtime(t)) {
175 rec = get_record(ST_SWITCH_AWAY, t);
176 if (rec) {
177 rec->data.switch_away.when = now();
178 rec->data.switch_away.exec_time = get_exec_time(t);
179 put_record(rec);
180 }
181 }
182}
183
184feather_callback void do_sched_trace_task_completion(unsigned long id,
185 unsigned long _task,
186 unsigned long forced)
187{
188 struct task_struct *t = (struct task_struct*) _task;
189 struct st_event_record* rec = get_record(ST_COMPLETION, t);
190 if (rec) {
191 rec->data.completion.when = now();
192 rec->data.completion.forced = forced;
193 rec->data.completion.exec_time = get_exec_time(t);
194 put_record(rec);
195 }
196}
197
198feather_callback void do_sched_trace_last_suspension_as_completion(
199 unsigned long id,
200 unsigned long _task)
201{
202 struct task_struct *t = (struct task_struct*) _task;
203 struct st_event_record* rec = get_record(ST_COMPLETION, t);
204 if (rec) {
205 rec->data.completion.when
206 = tsk_rt(t)->job_params.last_suspension;
207 rec->data.completion.forced = 0;
208 rec->data.completion.exec_time = get_exec_time(t);
209 put_record(rec);
210 }
211}
212
213feather_callback void do_sched_trace_task_block(unsigned long id,
214 unsigned long _task)
215{
216 struct task_struct *t = (struct task_struct*) _task;
217 struct st_event_record* rec = get_record(ST_BLOCK, t);
218 if (rec) {
219 rec->data.block.when = now();
220 put_record(rec);
221 }
222}
223
224feather_callback void do_sched_trace_task_resume(unsigned long id,
225 unsigned long _task)
226{
227 struct task_struct *t = (struct task_struct*) _task;
228 struct st_event_record* rec = get_record(ST_RESUME, t);
229 if (rec) {
230 rec->data.resume.when = now();
231 put_record(rec);
232 }
233}
234
235feather_callback void do_sched_trace_sys_release(unsigned long id,
236 unsigned long _start)
237{
238 lt_t *start = (lt_t*) _start;
239 struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL);
240 if (rec) {
241 rec->data.sys_release.when = now();
242 rec->data.sys_release.release = *start;
243 put_record(rec);
244 }
245}
246
247feather_callback void do_sched_trace_action(unsigned long id,
248 unsigned long _task,
249 unsigned long action)
250{
251 struct task_struct *t = (struct task_struct*) _task;
252 struct st_event_record* rec = get_record(ST_ACTION, t);
253
254 if (rec) {
255 rec->data.action.when = now();
256 rec->data.action.action = action;
257 put_record(rec);
258 }
259}