aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/sched_trace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/sched_trace.h')
-rw-r--r--include/litmus/sched_trace.h174
1 files changed, 156 insertions, 18 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 7ca34cb13881..232c7588d103 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -11,12 +11,12 @@ struct st_trace_header {
11 u8 cpu; /* On which CPU was it recorded? */ 11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */ 12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */ 13 u32 job; /* The job sequence number. */
14}; 14} __attribute__((packed));
15 15
16#define ST_NAME_LEN 16 16#define ST_NAME_LEN 16
17struct st_name_data { 17struct st_name_data {
18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ 18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
19}; 19} __attribute__((packed));
20 20
21struct st_param_data { /* regular params */ 21struct st_param_data { /* regular params */
22 u32 wcet; 22 u32 wcet;
@@ -25,30 +25,29 @@ struct st_param_data { /* regular params */
25 u8 partition; 25 u8 partition;
26 u8 class; 26 u8 class;
27 u8 __unused[2]; 27 u8 __unused[2];
28}; 28} __attribute__((packed));
29 29
30struct st_release_data { /* A job is was/is going to be released. */ 30struct st_release_data { /* A job is was/is going to be released. */
31 u64 release; /* What's the release time? */ 31 u64 release; /* What's the release time? */
32 u64 deadline; /* By when must it finish? */ 32 u64 deadline; /* By when must it finish? */
33}; 33} __attribute__((packed));
34 34
35struct st_assigned_data { /* A job was asigned to a CPU. */ 35struct st_assigned_data { /* A job was asigned to a CPU. */
36 u64 when; 36 u64 when;
37 u8 target; /* Where should it execute? */ 37 u8 target; /* Where should it execute? */
38 u8 __unused[7]; 38 u8 __unused[7];
39}; 39} __attribute__((packed));
40 40
41struct st_switch_to_data { /* A process was switched to on a given CPU. */ 41struct st_switch_to_data { /* A process was switched to on a given CPU. */
42 u64 when; /* When did this occur? */ 42 u64 when; /* When did this occur? */
43 u32 exec_time; /* Time the current job has executed. */ 43 u32 exec_time; /* Time the current job has executed. */
44 u8 __unused[4]; 44 u8 __unused[4];
45 45} __attribute__((packed));
46};
47 46
48struct st_switch_away_data { /* A process was switched away from on a given CPU. */ 47struct st_switch_away_data { /* A process was switched away from on a given CPU. */
49 u64 when; 48 u64 when;
50 u64 exec_time; 49 u64 exec_time;
51}; 50} __attribute__((packed));
52 51
53struct st_completion_data { /* A job completed. */ 52struct st_completion_data { /* A job completed. */
54 u64 when; 53 u64 when;
@@ -56,35 +55,92 @@ struct st_completion_data { /* A job completed. */
56 * next task automatically; set to 0 otherwise. 55 * next task automatically; set to 0 otherwise.
57 */ 56 */
58 u8 __uflags:7; 57 u8 __uflags:7;
59 u8 __unused[7]; 58 u16 nv_int_count;
60}; 59 u8 __unused[5];
60} __attribute__((packed));
61 61
62struct st_block_data { /* A task blocks. */ 62struct st_block_data { /* A task blocks. */
63 u64 when; 63 u64 when;
64 u64 __unused; 64 u64 __unused;
65}; 65} __attribute__((packed));
66 66
67struct st_resume_data { /* A task resumes. */ 67struct st_resume_data { /* A task resumes. */
68 u64 when; 68 u64 when;
69 u64 __unused; 69 u64 __unused;
70}; 70} __attribute__((packed));
71 71
72struct st_action_data { 72struct st_action_data {
73 u64 when; 73 u64 when;
74 u8 action; 74 u8 action;
75 u8 __unused[7]; 75 u8 __unused[7];
76}; 76} __attribute__((packed));
77 77
78struct st_sys_release_data { 78struct st_sys_release_data {
79 u64 when; 79 u64 when;
80 u64 release; 80 u64 release;
81}; 81} __attribute__((packed));
82
83
84struct st_tasklet_release_data {
85 u64 when;
86 u64 __unused;
87} __attribute__((packed));
88
89struct st_tasklet_begin_data {
90 u64 when;
91 u16 exe_pid;
92 u8 __unused[6];
93} __attribute__((packed));
94
95struct st_tasklet_end_data {
96 u64 when;
97 u16 exe_pid;
98 u8 flushed;
99 u8 __unused[5];
100} __attribute__((packed));
101
102
103struct st_work_release_data {
104 u64 when;
105 u64 __unused;
106} __attribute__((packed));
107
108struct st_work_begin_data {
109 u64 when;
110 u16 exe_pid;
111 u8 __unused[6];
112} __attribute__((packed));
113
114struct st_work_end_data {
115 u64 when;
116 u16 exe_pid;
117 u8 flushed;
118 u8 __unused[5];
119} __attribute__((packed));
120
121struct st_effective_priority_change_data {
122 u64 when;
123 u16 inh_pid;
124 u8 __unused[6];
125} __attribute__((packed));
126
127struct st_nv_interrupt_begin_data {
128 u64 when;
129 u32 device;
130 u32 serialNumber;
131} __attribute__((packed));
132
133struct st_nv_interrupt_end_data {
134 u64 when;
135 u32 device;
136 u32 serialNumber;
137} __attribute__((packed));
82 138
83#define DATA(x) struct st_ ## x ## _data x; 139#define DATA(x) struct st_ ## x ## _data x;
84 140
85typedef enum { 141typedef enum {
86 ST_NAME = 1, /* Start at one, so that we can spot 142 ST_NAME = 1, /* Start at one, so that we can spot
87 * uninitialized records. */ 143 * uninitialized records. */
88 ST_PARAM, 144 ST_PARAM,
89 ST_RELEASE, 145 ST_RELEASE,
90 ST_ASSIGNED, 146 ST_ASSIGNED,
@@ -94,7 +150,16 @@ typedef enum {
94 ST_BLOCK, 150 ST_BLOCK,
95 ST_RESUME, 151 ST_RESUME,
96 ST_ACTION, 152 ST_ACTION,
97 ST_SYS_RELEASE 153 ST_SYS_RELEASE,
154 ST_TASKLET_RELEASE,
155 ST_TASKLET_BEGIN,
156 ST_TASKLET_END,
157 ST_WORK_RELEASE,
158 ST_WORK_BEGIN,
159 ST_WORK_END,
160 ST_EFF_PRIO_CHANGE,
161 ST_NV_INTERRUPT_BEGIN,
162 ST_NV_INTERRUPT_END,
98} st_event_record_type_t; 163} st_event_record_type_t;
99 164
100struct st_event_record { 165struct st_event_record {
@@ -113,8 +178,17 @@ struct st_event_record {
113 DATA(resume); 178 DATA(resume);
114 DATA(action); 179 DATA(action);
115 DATA(sys_release); 180 DATA(sys_release);
181 DATA(tasklet_release);
182 DATA(tasklet_begin);
183 DATA(tasklet_end);
184 DATA(work_release);
185 DATA(work_begin);
186 DATA(work_end);
187 DATA(effective_priority_change);
188 DATA(nv_interrupt_begin);
189 DATA(nv_interrupt_end);
116 } data; 190 } data;
117}; 191} __attribute__((packed));
118 192
119#undef DATA 193#undef DATA
120 194
@@ -129,6 +203,8 @@ struct st_event_record {
129 ft_event1(id, callback, task) 203 ft_event1(id, callback, task)
130#define SCHED_TRACE2(id, callback, task, xtra) \ 204#define SCHED_TRACE2(id, callback, task, xtra) \
131 ft_event2(id, callback, task, xtra) 205 ft_event2(id, callback, task, xtra)
206#define SCHED_TRACE3(id, callback, task, xtra1, xtra2) \
207 ft_event3(id, callback, task, xtra1, xtra2)
132 208
133/* provide prototypes; needed on sparc64 */ 209/* provide prototypes; needed on sparc64 */
134#ifndef NO_TASK_TRACE_DECLS 210#ifndef NO_TASK_TRACE_DECLS
@@ -155,12 +231,45 @@ feather_callback void do_sched_trace_action(unsigned long id,
155feather_callback void do_sched_trace_sys_release(unsigned long id, 231feather_callback void do_sched_trace_sys_release(unsigned long id,
156 lt_t* start); 232 lt_t* start);
157 233
234
235feather_callback void do_sched_trace_tasklet_release(unsigned long id,
236 struct task_struct* owner);
237feather_callback void do_sched_trace_tasklet_begin(unsigned long id,
238 struct task_struct* owner);
239feather_callback void do_sched_trace_tasklet_end(unsigned long id,
240 struct task_struct* owner,
241 unsigned long flushed);
242
243feather_callback void do_sched_trace_work_release(unsigned long id,
244 struct task_struct* owner);
245feather_callback void do_sched_trace_work_begin(unsigned long id,
246 struct task_struct* owner,
247 struct task_struct* exe);
248feather_callback void do_sched_trace_work_end(unsigned long id,
249 struct task_struct* owner,
250 struct task_struct* exe,
251 unsigned long flushed);
252
253feather_callback void do_sched_trace_eff_prio_change(unsigned long id,
254 struct task_struct* task,
255 struct task_struct* inh);
256
257feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id,
258 u32 device);
259feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id,
260 unsigned long unused);
261
262
263/* returns true if we're tracing an interrupt on current CPU */
264/* int is_interrupt_tracing_active(void); */
265
158#endif 266#endif
159 267
160#else 268#else
161 269
162#define SCHED_TRACE(id, callback, task) /* no tracing */ 270#define SCHED_TRACE(id, callback, task) /* no tracing */
163#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ 271#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
272#define SCHED_TRACE3(id, callback, task, xtra1, xtra2)
164 273
165#endif 274#endif
166 275
@@ -193,6 +302,35 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
193 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) 302 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when)
194 303
195 304
305#define sched_trace_tasklet_release(t) \
306 SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t)
307
308#define sched_trace_tasklet_begin(t) \
309 SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, do_sched_trace_tasklet_begin, t)
310
311#define sched_trace_tasklet_end(t, flushed) \
312 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 13, do_sched_trace_tasklet_end, t, flushed)
313
314
315#define sched_trace_work_release(t) \
316 SCHED_TRACE(SCHED_TRACE_BASE_ID + 14, do_sched_trace_work_release, t)
317
318#define sched_trace_work_begin(t, e) \
319 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 15, do_sched_trace_work_begin, t, e)
320
321#define sched_trace_work_end(t, e, flushed) \
322 SCHED_TRACE3(SCHED_TRACE_BASE_ID + 16, do_sched_trace_work_end, t, e, flushed)
323
324
325#define sched_trace_eff_prio_change(t, inh) \
326 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 17, do_sched_trace_eff_prio_change, t, inh)
327
328
329#define sched_trace_nv_interrupt_begin(d) \
330 SCHED_TRACE(SCHED_TRACE_BASE_ID + 18, do_sched_trace_nv_interrupt_begin, d)
331#define sched_trace_nv_interrupt_end(d) \
332 SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d)
333
196#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 334#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
197 335
198#endif /* __KERNEL__ */ 336#endif /* __KERNEL__ */