From bcbc4c0486323cdc6d377253b4fc70ab9d030540 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Sat, 11 Feb 2012 21:41:19 +0100 Subject: Add kernel-style events for sched_trace_XXX() functions Enable kernel-style events (tracepoint) for Litmus. Litmus events trace the same functions as the sched_trace_XXX(), but can be enabled independently. So, why another tracing infrastructure then: - Litmus tracepoints can be recorded and analyzed together (single time reference) with all other kernel tracing events (e.g., sched:sched_switch, etc.). It's easier to correlate the effects of kernel events on litmus tasks. - It enables a quick way to visualize and process schedule traces using trace-cmd utility and kernelshark visualizer. Kernelshark lacks unit-trace's schedule-correctness checks, but it enables a fast view of schedule traces and it has several filtering options (for all kernel events, not only Litmus'). --- include/litmus/sched_trace.h | 105 ++++++++++++++----- include/trace/events/litmus.h | 231 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched.c | 6 +- litmus/Kconfig | 18 ++++ 4 files changed, 334 insertions(+), 26 deletions(-) create mode 100644 include/trace/events/litmus.h diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 7ca34cb13881..82bde8241298 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h @@ -164,34 +164,93 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, #endif +#ifdef CONFIG_SCHED_LITMUS_TRACEPOINT + +#include + +#else + +/* Override trace macros to actually do nothing */ +#define trace_litmus_task_param(t) +#define trace_litmus_task_release(t) +#define trace_litmus_switch_to(t) +#define trace_litmus_switch_away(prev) +#define trace_litmus_task_completion(t, forced) +#define trace_litmus_task_block(t) +#define trace_litmus_task_resume(t) +#define trace_litmus_sys_release(start) + +#endif + #define SCHED_TRACE_BASE_ID 500 -#define sched_trace_task_name(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t) -#define sched_trace_task_param(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t) -#define sched_trace_task_release(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t) -#define sched_trace_task_switch_to(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t) -#define sched_trace_task_switch_away(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) -#define sched_trace_task_completion(t, forced) \ - SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ - (unsigned long) forced) -#define sched_trace_task_block(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) -#define sched_trace_task_resume(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) -#define sched_trace_action(t, action) \ - SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, do_sched_trace_action, t, \ - (unsigned long) action); -/* when is a pointer, it does not need an explicit cast to unsigned long */ -#define sched_trace_sys_release(when) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) +#define sched_trace_task_name(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, \ + do_sched_trace_task_name, t) + +#define sched_trace_task_param(t) \ + do { \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, \ + do_sched_trace_task_param, t); \ + trace_litmus_task_param(t); \ + } while (0) + +#define sched_trace_task_release(t) \ + do { \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \ + do_sched_trace_task_release, t); \ + trace_litmus_task_release(t); \ + } while (0) + +#define sched_trace_task_switch_to(t) \ + do { \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \ + do_sched_trace_task_switch_to, t); \ + trace_litmus_switch_to(t); \ + } while (0) + +#define sched_trace_task_switch_away(t) \ + do { \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \ + do_sched_trace_task_switch_away, t); \ + trace_litmus_switch_away(t); \ + } while (0) + +#define sched_trace_task_completion(t, forced) \ + do { \ + SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, \ + do_sched_trace_task_completion, t, \ + (unsigned long) forced); \ + trace_litmus_task_completion(t, forced); \ + } while (0) + +#define sched_trace_task_block(t) \ + do { \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \ + do_sched_trace_task_block, t); \ + trace_litmus_task_block(t); \ + } while (0) + +#define sched_trace_task_resume(t) \ + do { \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \ + do_sched_trace_task_resume, t); \ + trace_litmus_task_resume(t); \ + } while (0) + +#define sched_trace_action(t, action) \ + SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \ + do_sched_trace_action, t, (unsigned long) action); +/* when is a pointer, it does not need an explicit cast to unsigned long */ +#define sched_trace_sys_release(when) \ + do { \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, \ + do_sched_trace_sys_release, when); \ + trace_litmus_sys_release(when); \ + } while (0) #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h new file mode 100644 index 000000000000..0fffcee02be0 --- /dev/null +++ b/include/trace/events/litmus.h @@ -0,0 +1,231 @@ +/* + * LITMUS^RT kernel style scheduling tracepoints + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM litmus + +#if !defined(_SCHED_TASK_TRACEPOINT_H) || defined(TRACE_HEADER_MULTI_READ) +#define _SCHED_TASK_TRACEPOINT_H + +#include + +#include +#include + +/* + * Tracing task admission + */ +TRACE_EVENT(litmus_task_param, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, wcet ) + __field( lt_t, period ) + __field( lt_t, phase ) + __field( int, partition ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->wcet = get_exec_cost(t); + __entry->period = get_rt_period(t); + __entry->phase = get_rt_phase(t); + __entry->partition = get_partition(t); + ), + + TP_printk("period(%d, %Lu).\nwcet(%d, %Lu).\n", + __entry->pid, __entry->period, + __entry->pid, __entry->wcet) +); + +/* + * Tracing jobs release + */ +TRACE_EVENT(litmus_task_release, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, release ) + __field( lt_t, deadline ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->release = get_release(t); + __entry->deadline = get_deadline(t); + ), + + TP_printk("release(job(%u, %u)): %Lu\ndeadline(job(%u, %u)): %Lu\n", + __entry->pid, __entry->job, __entry->release, + __entry->pid, __entry->job, __entry->deadline) +); + +/* + * Tracepoint for switching to new task + */ +TRACE_EVENT(litmus_switch_to, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, when ) + __field( lt_t, exec_time ) + ), + + TP_fast_assign( + __entry->pid = is_realtime(t) ? t->pid : 0; + __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0; + __entry->when = litmus_clock(); + __entry->exec_time = get_exec_time(t); + ), + + TP_printk("switch_to(job(%u, %u)): %Lu (exec: %Lu)\n", + __entry->pid, __entry->job, + __entry->when, __entry->exec_time) +); + +/* + * Tracepoint for switching away previous task + */ +TRACE_EVENT(litmus_switch_away, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, when ) + __field( lt_t, exec_time ) + ), + + TP_fast_assign( + __entry->pid = is_realtime(t) ? t->pid : 0; + __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0; + __entry->when = litmus_clock(); + __entry->exec_time = get_exec_time(t); + ), + + TP_printk("switch_away(job(%u, %u)): %Lu (exec: %Lu)\n", + __entry->pid, __entry->job, + __entry->when, __entry->exec_time) +); + +/* + * Tracing jobs completion + */ +TRACE_EVENT(litmus_task_completion, + + TP_PROTO(struct task_struct *t, unsigned long forced), + + TP_ARGS(t, forced), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, when ) + __field( unsigned long, forced ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->when = litmus_clock(); + __entry->forced = forced; + ), + + TP_printk("completed(job(%u, %u)): %Lu (forced: %lu)\n", + __entry->pid, __entry->job, + __entry->when, __entry->forced) +); + +/* + * Trace blocking tasks. + */ +TRACE_EVENT(litmus_task_block, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( lt_t, when ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->when = litmus_clock(); + ), + + TP_printk("(%u) blocks: %Lu\n", __entry->pid, __entry->when) +); + +/* + * Tracing jobs resume + */ +TRACE_EVENT(litmus_task_resume, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, when ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->when = litmus_clock(); + ), + + TP_printk("resume(job(%u, %u)): %Lu\n", + __entry->pid, __entry->job, __entry->when) +); + +/* + * Trace synchronous release + */ +TRACE_EVENT(litmus_sys_release, + + TP_PROTO(lt_t *start), + + TP_ARGS(start), + + TP_STRUCT__entry( + __field( lt_t, rel ) + __field( lt_t, when ) + ), + + TP_fast_assign( + __entry->rel = *start; + __entry->when = litmus_clock(); + ), + + TP_printk("SynRelease(%Lu) at %Lu\n", __entry->rel, __entry->when) +); + +#endif /* _SCHED_TASK_TRACEPOINT_H */ + +/* Must stay outside the protection */ +#include diff --git a/kernel/sched.c b/kernel/sched.c index baaca61bc3a3..2229d0deec4b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -80,14 +80,14 @@ #include "workqueue_sched.h" #include "sched_autogroup.h" +#define CREATE_TRACE_POINTS +#include + #include #include static void litmus_tick(struct rq*, struct task_struct*); -#define CREATE_TRACE_POINTS -#include - /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], diff --git a/litmus/Kconfig b/litmus/Kconfig index 94b48e199577..68459d4dca41 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -138,6 +138,24 @@ config SCHED_TASK_TRACE_SHIFT 10 => 1k events 8 => 512 events +config SCHED_LITMUS_TRACEPOINT + bool "Enable Event/Tracepoint Tracing for real-time task tracing" + depends on TRACEPOINTS + default n + help + Enable kernel-style events (tracepoint) for Litmus. Litmus events + trace the same functions as the above sched_trace_XXX(), but can + be enabled independently. + Litmus tracepoints can be recorded and analyzed together (single + time reference) with all other kernel tracing events (e.g., + sched:sched_switch, etc.). + + This also enables a quick way to visualize schedule traces using + trace-cmd utility and kernelshark visualizer. + + Say Yes for debugging and visualization purposes. + Say No for overhead tracing. + config SCHED_OVERHEAD_TRACE bool "Record timestamps for overhead measurements" depends on FEATHER_TRACE -- cgit v1.2.2