diff options
Diffstat (limited to 'litmus/sched_task_trace.c')
-rw-r--r-- | litmus/sched_task_trace.c | 232 |
1 files changed, 225 insertions, 7 deletions
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 5ef8d09ab41f..d079df2b292a 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/percpu.h> | 9 | #include <linux/percpu.h> |
10 | #include <linux/hardirq.h> | ||
10 | 11 | ||
11 | #include <litmus/ftdev.h> | 12 | #include <litmus/ftdev.h> |
12 | #include <litmus/litmus.h> | 13 | #include <litmus/litmus.h> |
@@ -16,13 +17,13 @@ | |||
16 | #include <litmus/ftdev.h> | 17 | #include <litmus/ftdev.h> |
17 | 18 | ||
18 | 19 | ||
19 | #define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) | 20 | #define NUM_EVENTS (1 << (CONFIG_SCHED_TASK_TRACE_SHIFT+11)) |
20 | 21 | ||
21 | #define now() litmus_clock() | 22 | #define now() litmus_clock() |
22 | 23 | ||
23 | struct local_buffer { | 24 | struct local_buffer { |
24 | struct st_event_record record[NO_EVENTS]; | 25 | struct st_event_record record[NUM_EVENTS]; |
25 | char flag[NO_EVENTS]; | 26 | char flag[NUM_EVENTS]; |
26 | struct ft_buffer ftbuf; | 27 | struct ft_buffer ftbuf; |
27 | }; | 28 | }; |
28 | 29 | ||
@@ -41,7 +42,7 @@ static int __init init_sched_task_trace(void) | |||
41 | int i, ok = 0, err; | 42 | int i, ok = 0, err; |
42 | printk("Allocated %u sched_trace_xxx() events per CPU " | 43 | printk("Allocated %u sched_trace_xxx() events per CPU " |
43 | "(buffer size: %d bytes)\n", | 44 | "(buffer size: %d bytes)\n", |
44 | NO_EVENTS, (int) sizeof(struct local_buffer)); | 45 | NUM_EVENTS, (int) sizeof(struct local_buffer)); |
45 | 46 | ||
46 | err = ftdev_init(&st_dev, THIS_MODULE, | 47 | err = ftdev_init(&st_dev, THIS_MODULE, |
47 | num_online_cpus(), "sched_trace"); | 48 | num_online_cpus(), "sched_trace"); |
@@ -50,7 +51,7 @@ static int __init init_sched_task_trace(void) | |||
50 | 51 | ||
51 | for (i = 0; i < st_dev.minor_cnt; i++) { | 52 | for (i = 0; i < st_dev.minor_cnt; i++) { |
52 | buf = &per_cpu(st_event_buffer, i); | 53 | buf = &per_cpu(st_event_buffer, i); |
53 | ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | 54 | ok += init_ft_buffer(&buf->ftbuf, NUM_EVENTS, |
54 | sizeof(struct st_event_record), | 55 | sizeof(struct st_event_record), |
55 | buf->flag, | 56 | buf->flag, |
56 | buf->record); | 57 | buf->record); |
@@ -154,7 +155,8 @@ feather_callback void do_sched_trace_task_switch_to(unsigned long id, | |||
154 | { | 155 | { |
155 | struct task_struct *t = (struct task_struct*) _task; | 156 | struct task_struct *t = (struct task_struct*) _task; |
156 | struct st_event_record* rec; | 157 | struct st_event_record* rec; |
157 | if (is_realtime(t)) { | 158 | //if (is_realtime(t)) /* comment out to trace EVERYTHING */ |
159 | { | ||
158 | rec = get_record(ST_SWITCH_TO, t); | 160 | rec = get_record(ST_SWITCH_TO, t); |
159 | if (rec) { | 161 | if (rec) { |
160 | rec->data.switch_to.when = now(); | 162 | rec->data.switch_to.when = now(); |
@@ -169,7 +171,8 @@ feather_callback void do_sched_trace_task_switch_away(unsigned long id, | |||
169 | { | 171 | { |
170 | struct task_struct *t = (struct task_struct*) _task; | 172 | struct task_struct *t = (struct task_struct*) _task; |
171 | struct st_event_record* rec; | 173 | struct st_event_record* rec; |
172 | if (is_realtime(t)) { | 174 | //if (is_realtime(t)) /* comment out to trace EVERYTHING */ |
175 | { | ||
173 | rec = get_record(ST_SWITCH_AWAY, t); | 176 | rec = get_record(ST_SWITCH_AWAY, t); |
174 | if (rec) { | 177 | if (rec) { |
175 | rec->data.switch_away.when = now(); | 178 | rec->data.switch_away.when = now(); |
@@ -188,6 +191,9 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, | |||
188 | if (rec) { | 191 | if (rec) { |
189 | rec->data.completion.when = now(); | 192 | rec->data.completion.when = now(); |
190 | rec->data.completion.forced = forced; | 193 | rec->data.completion.forced = forced; |
194 | #ifdef LITMUS_NVIDIA | ||
195 | rec->data.completion.nv_int_count = (u16)atomic_read(&tsk_rt(t)->nv_int_count); | ||
196 | #endif | ||
191 | put_record(rec); | 197 | put_record(rec); |
192 | } | 198 | } |
193 | } | 199 | } |
@@ -239,3 +245,215 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
239 | put_record(rec); | 245 | put_record(rec); |
240 | } | 246 | } |
241 | } | 247 | } |
248 | |||
249 | |||
250 | feather_callback void do_sched_trace_tasklet_release(unsigned long id, | ||
251 | unsigned long _owner) | ||
252 | { | ||
253 | struct task_struct *t = (struct task_struct*) _owner; | ||
254 | struct st_event_record *rec = get_record(ST_TASKLET_RELEASE, t); | ||
255 | |||
256 | if (rec) { | ||
257 | rec->data.tasklet_release.when = now(); | ||
258 | put_record(rec); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | |||
263 | feather_callback void do_sched_trace_tasklet_begin(unsigned long id, | ||
264 | unsigned long _owner) | ||
265 | { | ||
266 | struct task_struct *t = (struct task_struct*) _owner; | ||
267 | struct st_event_record *rec = get_record(ST_TASKLET_BEGIN, t); | ||
268 | |||
269 | if (rec) { | ||
270 | rec->data.tasklet_begin.when = now(); | ||
271 | |||
272 | if(!in_interrupt()) | ||
273 | rec->data.tasklet_begin.exe_pid = current->pid; | ||
274 | else | ||
275 | rec->data.tasklet_begin.exe_pid = 0; | ||
276 | |||
277 | put_record(rec); | ||
278 | } | ||
279 | } | ||
280 | EXPORT_SYMBOL(do_sched_trace_tasklet_begin); | ||
281 | |||
282 | |||
283 | feather_callback void do_sched_trace_tasklet_end(unsigned long id, | ||
284 | unsigned long _owner, | ||
285 | unsigned long _flushed) | ||
286 | { | ||
287 | struct task_struct *t = (struct task_struct*) _owner; | ||
288 | struct st_event_record *rec = get_record(ST_TASKLET_END, t); | ||
289 | |||
290 | if (rec) { | ||
291 | rec->data.tasklet_end.when = now(); | ||
292 | rec->data.tasklet_end.flushed = _flushed; | ||
293 | |||
294 | if(!in_interrupt()) | ||
295 | rec->data.tasklet_end.exe_pid = current->pid; | ||
296 | else | ||
297 | rec->data.tasklet_end.exe_pid = 0; | ||
298 | |||
299 | put_record(rec); | ||
300 | } | ||
301 | } | ||
302 | EXPORT_SYMBOL(do_sched_trace_tasklet_end); | ||
303 | |||
304 | |||
305 | feather_callback void do_sched_trace_work_release(unsigned long id, | ||
306 | unsigned long _owner) | ||
307 | { | ||
308 | struct task_struct *t = (struct task_struct*) _owner; | ||
309 | struct st_event_record *rec = get_record(ST_WORK_RELEASE, t); | ||
310 | |||
311 | if (rec) { | ||
312 | rec->data.work_release.when = now(); | ||
313 | put_record(rec); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | |||
318 | feather_callback void do_sched_trace_work_begin(unsigned long id, | ||
319 | unsigned long _owner, | ||
320 | unsigned long _exe) | ||
321 | { | ||
322 | struct task_struct *t = (struct task_struct*) _owner; | ||
323 | struct st_event_record *rec = get_record(ST_WORK_BEGIN, t); | ||
324 | |||
325 | if (rec) { | ||
326 | struct task_struct *exe = (struct task_struct*) _exe; | ||
327 | rec->data.work_begin.exe_pid = exe->pid; | ||
328 | rec->data.work_begin.when = now(); | ||
329 | put_record(rec); | ||
330 | } | ||
331 | } | ||
332 | EXPORT_SYMBOL(do_sched_trace_work_begin); | ||
333 | |||
334 | |||
335 | feather_callback void do_sched_trace_work_end(unsigned long id, | ||
336 | unsigned long _owner, | ||
337 | unsigned long _exe, | ||
338 | unsigned long _flushed) | ||
339 | { | ||
340 | struct task_struct *t = (struct task_struct*) _owner; | ||
341 | struct st_event_record *rec = get_record(ST_WORK_END, t); | ||
342 | |||
343 | if (rec) { | ||
344 | struct task_struct *exe = (struct task_struct*) _exe; | ||
345 | rec->data.work_end.exe_pid = exe->pid; | ||
346 | rec->data.work_end.flushed = _flushed; | ||
347 | rec->data.work_end.when = now(); | ||
348 | put_record(rec); | ||
349 | } | ||
350 | } | ||
351 | EXPORT_SYMBOL(do_sched_trace_work_end); | ||
352 | |||
353 | |||
354 | feather_callback void do_sched_trace_eff_prio_change(unsigned long id, | ||
355 | unsigned long _task, | ||
356 | unsigned long _inh) | ||
357 | { | ||
358 | struct task_struct *t = (struct task_struct*) _task; | ||
359 | struct st_event_record *rec = get_record(ST_EFF_PRIO_CHANGE, t); | ||
360 | |||
361 | if (rec) { | ||
362 | struct task_struct *inh = (struct task_struct*) _inh; | ||
363 | rec->data.effective_priority_change.when = now(); | ||
364 | rec->data.effective_priority_change.inh_pid = (inh != NULL) ? | ||
365 | inh->pid : | ||
366 | 0xffff; | ||
367 | |||
368 | put_record(rec); | ||
369 | } | ||
370 | } | ||
371 | |||
372 | /* pray for no nesting of nv interrupts on same CPU... */ | ||
373 | struct tracing_interrupt_map | ||
374 | { | ||
375 | int active; | ||
376 | int count; | ||
377 | unsigned long data[128]; // assume nesting less than 128... | ||
378 | unsigned long serial[128]; | ||
379 | }; | ||
380 | DEFINE_PER_CPU(struct tracing_interrupt_map, active_interrupt_tracing); | ||
381 | |||
382 | |||
383 | DEFINE_PER_CPU(u32, intCounter); | ||
384 | |||
385 | feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id, | ||
386 | unsigned long _device) | ||
387 | { | ||
388 | struct st_event_record *rec; | ||
389 | u32 serialNum; | ||
390 | |||
391 | { | ||
392 | u32* serial; | ||
393 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
394 | if(!int_map->active == 0xcafebabe) | ||
395 | { | ||
396 | int_map->count++; | ||
397 | } | ||
398 | else | ||
399 | { | ||
400 | int_map->active = 0xcafebabe; | ||
401 | int_map->count = 1; | ||
402 | } | ||
403 | //int_map->data[int_map->count-1] = _device; | ||
404 | |||
405 | serial = &per_cpu(intCounter, smp_processor_id()); | ||
406 | *serial += num_online_cpus(); | ||
407 | serialNum = *serial; | ||
408 | int_map->serial[int_map->count-1] = serialNum; | ||
409 | } | ||
410 | |||
411 | rec = get_record(ST_NV_INTERRUPT_BEGIN, NULL); | ||
412 | if(rec) { | ||
413 | u32 device = _device; | ||
414 | rec->data.nv_interrupt_begin.when = now(); | ||
415 | rec->data.nv_interrupt_begin.device = device; | ||
416 | rec->data.nv_interrupt_begin.serialNumber = serialNum; | ||
417 | put_record(rec); | ||
418 | } | ||
419 | } | ||
420 | EXPORT_SYMBOL(do_sched_trace_nv_interrupt_begin); | ||
421 | |||
422 | /* | ||
423 | int is_interrupt_tracing_active(void) | ||
424 | { | ||
425 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
426 | if(int_map->active == 0xcafebabe) | ||
427 | return 1; | ||
428 | return 0; | ||
429 | } | ||
430 | */ | ||
431 | |||
432 | feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, unsigned long _device) | ||
433 | { | ||
434 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
435 | if(int_map->active == 0xcafebabe) | ||
436 | { | ||
437 | struct st_event_record *rec = get_record(ST_NV_INTERRUPT_END, NULL); | ||
438 | |||
439 | int_map->count--; | ||
440 | if(int_map->count == 0) | ||
441 | int_map->active = 0; | ||
442 | |||
443 | if(rec) { | ||
444 | u32 device = _device; | ||
445 | rec->data.nv_interrupt_end.when = now(); | ||
446 | //rec->data.nv_interrupt_end.device = int_map->data[int_map->count]; | ||
447 | rec->data.nv_interrupt_end.device = device; | ||
448 | rec->data.nv_interrupt_end.serialNumber = int_map->serial[int_map->count]; | ||
449 | put_record(rec); | ||
450 | } | ||
451 | } | ||
452 | } | ||
453 | EXPORT_SYMBOL(do_sched_trace_nv_interrupt_end); | ||
454 | |||
455 | |||
456 | |||
457 | |||
458 | |||
459 | |||