aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@sprg.uniroma2.it>2012-02-11 15:41:19 -0500
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-03-30 09:39:33 -0400
commit26bafa3b7880a323d83b8ea71bdb8e2118a5cba0 (patch)
tree0ca24b2d23aa5588bbe0f14ad50318d6da3479b6
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Add kernel-style events for sched_trace_XXX() functions
Enable kernel-style events (tracepoint) for Litmus. Litmus events trace the same functions as the sched_trace_XXX(), but can be enabled independently. So, why another tracing infrastructure then: - Litmus tracepoints can be recorded and analyzed together (single time reference) with all other kernel tracing events (e.g., sched:sched_switch, etc.). It's easier to correlate the effects of kernel events on litmus tasks. - It enables a quick way to visualize and process schedule traces using trace-cmd utility and kernelshark visualizer. Kernelshark lacks unit-trace's schedule-correctness checks, but it enables a fast view of schedule traces and it has several filtering options (for all kernel events, not only Litmus').
-rw-r--r--include/litmus/sched_trace.h105
-rw-r--r--include/trace/events/litmus.h231
-rw-r--r--kernel/sched.c6
-rw-r--r--litmus/Kconfig18
4 files changed, 334 insertions, 26 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 7ca34cb1388..82bde824129 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -164,34 +164,93 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
164 164
165#endif 165#endif
166 166
167#ifdef CONFIG_SCHED_LITMUS_TRACEPOINT
168
169#include <trace/events/litmus.h>
170
171#else
172
173/* Override trace macros to actually do nothing */
174#define trace_litmus_task_param(t)
175#define trace_litmus_task_release(t)
176#define trace_litmus_switch_to(t)
177#define trace_litmus_switch_away(prev)
178#define trace_litmus_task_completion(t, forced)
179#define trace_litmus_task_block(t)
180#define trace_litmus_task_resume(t)
181#define trace_litmus_sys_release(start)
182
183#endif
184
167 185
168#define SCHED_TRACE_BASE_ID 500 186#define SCHED_TRACE_BASE_ID 500
169 187
170 188
171#define sched_trace_task_name(t) \ 189#define sched_trace_task_name(t) \
172 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t) 190 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, \
173#define sched_trace_task_param(t) \ 191 do_sched_trace_task_name, t)
174 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t) 192
175#define sched_trace_task_release(t) \ 193#define sched_trace_task_param(t) \
176 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t) 194 do { \
177#define sched_trace_task_switch_to(t) \ 195 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, \
178 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t) 196 do_sched_trace_task_param, t); \
179#define sched_trace_task_switch_away(t) \ 197 trace_litmus_task_param(t); \
180 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) 198 } while (0)
181#define sched_trace_task_completion(t, forced) \ 199
182 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ 200#define sched_trace_task_release(t) \
183 (unsigned long) forced) 201 do { \
184#define sched_trace_task_block(t) \ 202 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \
185 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) 203 do_sched_trace_task_release, t); \
186#define sched_trace_task_resume(t) \ 204 trace_litmus_task_release(t); \
187 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) 205 } while (0)
188#define sched_trace_action(t, action) \ 206
189 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, do_sched_trace_action, t, \ 207#define sched_trace_task_switch_to(t) \
190 (unsigned long) action); 208 do { \
191/* when is a pointer, it does not need an explicit cast to unsigned long */ 209 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \
192#define sched_trace_sys_release(when) \ 210 do_sched_trace_task_switch_to, t); \
193 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) 211 trace_litmus_switch_to(t); \
212 } while (0)
213
214#define sched_trace_task_switch_away(t) \
215 do { \
216 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \
217 do_sched_trace_task_switch_away, t); \
218 trace_litmus_switch_away(t); \
219 } while (0)
220
221#define sched_trace_task_completion(t, forced) \
222 do { \
223 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, \
224 do_sched_trace_task_completion, t, \
225 (unsigned long) forced); \
226 trace_litmus_task_completion(t, forced); \
227 } while (0)
228
229#define sched_trace_task_block(t) \
230 do { \
231 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \
232 do_sched_trace_task_block, t); \
233 trace_litmus_task_block(t); \
234 } while (0)
235
236#define sched_trace_task_resume(t) \
237 do { \
238 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \
239 do_sched_trace_task_resume, t); \
240 trace_litmus_task_resume(t); \
241 } while (0)
242
243#define sched_trace_action(t, action) \
244 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \
245 do_sched_trace_action, t, (unsigned long) action);
194 246
247/* when is a pointer, it does not need an explicit cast to unsigned long */
248#define sched_trace_sys_release(when) \
249 do { \
250 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, \
251 do_sched_trace_sys_release, when); \
252 trace_litmus_sys_release(when); \
253 } while (0)
195 254
196#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 255#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
197 256
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h
new file mode 100644
index 00000000000..0fffcee02be
--- /dev/null
+++ b/include/trace/events/litmus.h
@@ -0,0 +1,231 @@
1/*
2 * LITMUS^RT kernel style scheduling tracepoints
3 */
4#undef TRACE_SYSTEM
5#define TRACE_SYSTEM litmus
6
7#if !defined(_SCHED_TASK_TRACEPOINT_H) || defined(TRACE_HEADER_MULTI_READ)
8#define _SCHED_TASK_TRACEPOINT_H
9
10#include <linux/tracepoint.h>
11
12#include <litmus/litmus.h>
13#include <litmus/rt_param.h>
14
15/*
16 * Tracing task admission
17 */
18TRACE_EVENT(litmus_task_param,
19
20 TP_PROTO(struct task_struct *t),
21
22 TP_ARGS(t),
23
24 TP_STRUCT__entry(
25 __field( pid_t, pid )
26 __field( unsigned int, job )
27 __field( lt_t, wcet )
28 __field( lt_t, period )
29 __field( lt_t, phase )
30 __field( int, partition )
31 ),
32
33 TP_fast_assign(
34 __entry->pid = t ? t->pid : 0;
35 __entry->job = t ? t->rt_param.job_params.job_no : 0;
36 __entry->wcet = get_exec_cost(t);
37 __entry->period = get_rt_period(t);
38 __entry->phase = get_rt_phase(t);
39 __entry->partition = get_partition(t);
40 ),
41
42 TP_printk("period(%d, %Lu).\nwcet(%d, %Lu).\n",
43 __entry->pid, __entry->period,
44 __entry->pid, __entry->wcet)
45);
46
47/*
48 * Tracing jobs release
49 */
50TRACE_EVENT(litmus_task_release,
51
52 TP_PROTO(struct task_struct *t),
53
54 TP_ARGS(t),
55
56 TP_STRUCT__entry(
57 __field( pid_t, pid )
58 __field( unsigned int, job )
59 __field( lt_t, release )
60 __field( lt_t, deadline )
61 ),
62
63 TP_fast_assign(
64 __entry->pid = t ? t->pid : 0;
65 __entry->job = t ? t->rt_param.job_params.job_no : 0;
66 __entry->release = get_release(t);
67 __entry->deadline = get_deadline(t);
68 ),
69
70 TP_printk("release(job(%u, %u)): %Lu\ndeadline(job(%u, %u)): %Lu\n",
71 __entry->pid, __entry->job, __entry->release,
72 __entry->pid, __entry->job, __entry->deadline)
73);
74
75/*
76 * Tracepoint for switching to new task
77 */
78TRACE_EVENT(litmus_switch_to,
79
80 TP_PROTO(struct task_struct *t),
81
82 TP_ARGS(t),
83
84 TP_STRUCT__entry(
85 __field( pid_t, pid )
86 __field( unsigned int, job )
87 __field( lt_t, when )
88 __field( lt_t, exec_time )
89 ),
90
91 TP_fast_assign(
92 __entry->pid = is_realtime(t) ? t->pid : 0;
93 __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0;
94 __entry->when = litmus_clock();
95 __entry->exec_time = get_exec_time(t);
96 ),
97
98 TP_printk("switch_to(job(%u, %u)): %Lu (exec: %Lu)\n",
99 __entry->pid, __entry->job,
100 __entry->when, __entry->exec_time)
101);
102
103/*
104 * Tracepoint for switching away previous task
105 */
106TRACE_EVENT(litmus_switch_away,
107
108 TP_PROTO(struct task_struct *t),
109
110 TP_ARGS(t),
111
112 TP_STRUCT__entry(
113 __field( pid_t, pid )
114 __field( unsigned int, job )
115 __field( lt_t, when )
116 __field( lt_t, exec_time )
117 ),
118
119 TP_fast_assign(
120 __entry->pid = is_realtime(t) ? t->pid : 0;
121 __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0;
122 __entry->when = litmus_clock();
123 __entry->exec_time = get_exec_time(t);
124 ),
125
126 TP_printk("switch_away(job(%u, %u)): %Lu (exec: %Lu)\n",
127 __entry->pid, __entry->job,
128 __entry->when, __entry->exec_time)
129);
130
131/*
132 * Tracing jobs completion
133 */
134TRACE_EVENT(litmus_task_completion,
135
136 TP_PROTO(struct task_struct *t, unsigned long forced),
137
138 TP_ARGS(t, forced),
139
140 TP_STRUCT__entry(
141 __field( pid_t, pid )
142 __field( unsigned int, job )
143 __field( lt_t, when )
144 __field( unsigned long, forced )
145 ),
146
147 TP_fast_assign(
148 __entry->pid = t ? t->pid : 0;
149 __entry->job = t ? t->rt_param.job_params.job_no : 0;
150 __entry->when = litmus_clock();
151 __entry->forced = forced;
152 ),
153
154 TP_printk("completed(job(%u, %u)): %Lu (forced: %lu)\n",
155 __entry->pid, __entry->job,
156 __entry->when, __entry->forced)
157);
158
159/*
160 * Trace blocking tasks.
161 */
162TRACE_EVENT(litmus_task_block,
163
164 TP_PROTO(struct task_struct *t),
165
166 TP_ARGS(t),
167
168 TP_STRUCT__entry(
169 __field( pid_t, pid )
170 __field( lt_t, when )
171 ),
172
173 TP_fast_assign(
174 __entry->pid = t ? t->pid : 0;
175 __entry->when = litmus_clock();
176 ),
177
178 TP_printk("(%u) blocks: %Lu\n", __entry->pid, __entry->when)
179);
180
181/*
182 * Tracing jobs resume
183 */
184TRACE_EVENT(litmus_task_resume,
185
186 TP_PROTO(struct task_struct *t),
187
188 TP_ARGS(t),
189
190 TP_STRUCT__entry(
191 __field( pid_t, pid )
192 __field( unsigned int, job )
193 __field( lt_t, when )
194 ),
195
196 TP_fast_assign(
197 __entry->pid = t ? t->pid : 0;
198 __entry->job = t ? t->rt_param.job_params.job_no : 0;
199 __entry->when = litmus_clock();
200 ),
201
202 TP_printk("resume(job(%u, %u)): %Lu\n",
203 __entry->pid, __entry->job, __entry->when)
204);
205
206/*
207 * Trace synchronous release
208 */
209TRACE_EVENT(litmus_sys_release,
210
211 TP_PROTO(lt_t *start),
212
213 TP_ARGS(start),
214
215 TP_STRUCT__entry(
216 __field( lt_t, rel )
217 __field( lt_t, when )
218 ),
219
220 TP_fast_assign(
221 __entry->rel = *start;
222 __entry->when = litmus_clock();
223 ),
224
225 TP_printk("SynRelease(%Lu) at %Lu\n", __entry->rel, __entry->when)
226);
227
228#endif /* _SCHED_TASK_TRACEPOINT_H */
229
230/* Must stay outside the protection */
231#include <trace/define_trace.h>
diff --git a/kernel/sched.c b/kernel/sched.c
index baaca61bc3a..2229d0deec4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -80,14 +80,14 @@
80#include "workqueue_sched.h" 80#include "workqueue_sched.h"
81#include "sched_autogroup.h" 81#include "sched_autogroup.h"
82 82
83#define CREATE_TRACE_POINTS
84#include <trace/events/sched.h>
85
83#include <litmus/sched_trace.h> 86#include <litmus/sched_trace.h>
84#include <litmus/trace.h> 87#include <litmus/trace.h>
85 88
86static void litmus_tick(struct rq*, struct task_struct*); 89static void litmus_tick(struct rq*, struct task_struct*);
87 90
88#define CREATE_TRACE_POINTS
89#include <trace/events/sched.h>
90
91/* 91/*
92 * Convert user-nice values [ -20 ... 0 ... 19 ] 92 * Convert user-nice values [ -20 ... 0 ... 19 ]
93 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 93 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 94b48e19957..68459d4dca4 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -138,6 +138,24 @@ config SCHED_TASK_TRACE_SHIFT
138 10 => 1k events 138 10 => 1k events
139 8 => 512 events 139 8 => 512 events
140 140
141config SCHED_LITMUS_TRACEPOINT
142 bool "Enable Event/Tracepoint Tracing for real-time task tracing"
143 depends on TRACEPOINTS
144 default n
145 help
146 Enable kernel-style events (tracepoint) for Litmus. Litmus events
147 trace the same functions as the above sched_trace_XXX(), but can
148 be enabled independently.
149 Litmus tracepoints can be recorded and analyzed together (single
150 time reference) with all other kernel tracing events (e.g.,
151 sched:sched_switch, etc.).
152
153 This also enables a quick way to visualize schedule traces using
154 trace-cmd utility and kernelshark visualizer.
155
156 Say Yes for debugging and visualization purposes.
157 Say No for overhead tracing.
158
141config SCHED_OVERHEAD_TRACE 159config SCHED_OVERHEAD_TRACE
142 bool "Record timestamps for overhead measurements" 160 bool "Record timestamps for overhead measurements"
143 depends on FEATHER_TRACE 161 depends on FEATHER_TRACE