From 2f41251f9febad2e54fa338ea63a62818970af0c Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Fri, 18 Dec 2009 09:08:44 -0500 Subject: Test kernel tracing events capabilities Can events be applied to LITMUS code instead of sched_task_trace ? PROS: - architectural indipendency - easy porting on newer kernel version - lock free ring buffer implementation already there CONS: - need userspace tools conversion to slightly different format - is it possible to replicate all the previous functionalities? - only sched_trace_* functions can be implemented through events, TRACE() debugging features are still implemented in the old way (??? cannot we simply use the tracing features of the kernel for debugging purposes ????) --- include/trace/events/litmus.h | 147 ++++++++++++++++++++++++++++++++++++++++++ include/trace/events/sched.h | 3 + kernel/sched.c | 3 +- litmus/litmus.c | 8 ++- litmus/rt_domain.c | 5 +- litmus/sched_gsn_edf.c | 11 +++- litmus/sched_litmus.c | 3 +- 7 files changed, 172 insertions(+), 8 deletions(-) create mode 100644 include/trace/events/litmus.h diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h new file mode 100644 index 000000000000..e5216a8502c2 --- /dev/null +++ b/include/trace/events/litmus.h @@ -0,0 +1,147 @@ +/* + * LITMUS^RT scheduling events + * included from sched. + */ +#include +#include + +/* + * Tracing task admission + */ +TRACE_EVENT(litmus_task_param, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, wcet ) + __field( lt_t, period ) + __field( lt_t, phase ) + __field( int, partition ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->wcet = get_exec_cost(t); + __entry->period = get_rt_period(t); + __entry->phase = get_rt_phase(t); + __entry->partition = get_partition(t); + ), + + TP_printk("period(%d, %Lu).\nwcet(%d, %Lu).\n", + __entry->pid, __entry->period, + __entry->pid, __entry->wcet) +); + +/* + * Tracing jobs release + */ +TRACE_EVENT(litmus_task_release, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, release ) + __field( lt_t, deadline ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->release = get_release(t); + __entry->deadline = get_deadline(t); + ), + + TP_printk("released(job(%u, %u), %Lu).\ndeadline(job(%u, %u), %Lu).\n", + __entry->pid, __entry->job, __entry->release, + __entry->pid, __entry->job, __entry->deadline) +); + +/* + * Tracing jobs completion + */ +TRACE_EVENT(litmus_task_completion, + + TP_PROTO(struct task_struct *t, unsigned long forced), + + TP_ARGS(t, forced), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, when ) + __field( unsigned long, forced ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->when = litmus_clock(); + __entry->forced = forced; + ), + + TP_printk("completed(job(%u, %u), %Lu).\n", + __entry->pid, __entry->job, __entry->when) +); + +/* + * Tracing jobs resume + */ +TRACE_EVENT(litmus_task_resume, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, when ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->when = litmus_clock(); + ), + + TP_printk("resumed(job(%u, %u), %Lu).\n", + __entry->pid, __entry->job, __entry->when) +); + +/* + * Tracepoint for switching away previous task + */ +TRACE_EVENT(litmus_switch_away, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned int, job ) + __field( lt_t, when ) + __field( lt_t, exec_time ) + ), + + TP_fast_assign( + __entry->pid = t ? t->pid : 0; + __entry->job = t ? t->rt_param.job_params.job_no : 0; + __entry->when = litmus_clock(); + __entry->exec_time = get_exec_time(t); + ), + + TP_printk("scheduled(job(%u, %u), %Lu, %Lu).\n", + __entry->pid, __entry->job, + __entry->when, __entry->exec_time) +); + diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 4069c43f4187..0e1d542ca961 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -472,6 +472,9 @@ TRACE_EVENT(sched_stat_iowait, (unsigned long long)__entry->delay) ); +/* include LITMUS^RT scheduling events */ +#include "./litmus.h" + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched.c b/kernel/sched.c index ee894ee8a0bb..5ff48213781c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5459,7 +5459,8 @@ need_resched: release_kernel_lock(prev); need_resched_nonpreemptible: TS_SCHED_START; - sched_trace_task_switch_away(prev); +// sched_trace_task_switch_away(prev); + trace_litmus_switch_away(prev); schedule_debug(prev); diff --git a/litmus/litmus.c b/litmus/litmus.c index de751a14d77c..c4c74fe299fc 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -19,6 +19,8 @@ #include +#include + #include /* Number of RT tasks that exist in the system */ @@ -342,7 +344,8 @@ long litmus_admit_task(struct task_struct* tsk) if (!retval) { sched_trace_task_name(tsk); - sched_trace_task_param(tsk); +// sched_trace_task_param(tsk); + trace_litmus_task_param(tsk); atomic_inc(&rt_task_count); } @@ -355,7 +358,8 @@ out: void litmus_exit_task(struct task_struct* tsk) { if (is_realtime(tsk)) { - sched_trace_task_completion(tsk, 1); +// sched_trace_task_completion(tsk, 1); + trace_litmus_task_completion(tsk, 1); litmus->task_exit(tsk); diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 62c9fdcd22be..d5ece53fa61e 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c @@ -19,6 +19,8 @@ #include +#include + #include static int dummy_resched(rt_domain_t *rt) @@ -181,7 +183,8 @@ static void arm_release_timer(rt_domain_t *_rt) list_for_each_safe(pos, safe, &list) { /* pick task of work list */ t = list_entry(pos, struct task_struct, rt_param.list); - sched_trace_task_release(t); +// sched_trace_task_release(t); + trace_litmus_task_release(t); list_del(pos); /* put into release heap while holding release_lock */ diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index a223e69f2efb..5386f0a24587 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -18,6 +18,8 @@ #include #include +#include + #include #include @@ -321,7 +323,8 @@ static noinline void job_completion(struct task_struct *t, int forced) { BUG_ON(!t); - sched_trace_task_completion(t, forced); +// sched_trace_task_completion(t, forced); + trace_litmus_task_completion(t, forced); TRACE_TASK(t, "job_completion().\n"); @@ -330,7 +333,8 @@ static noinline void job_completion(struct task_struct *t, int forced) /* prepare for next period */ prepare_for_next_period(t); if (is_released(t, litmus_clock())) - sched_trace_task_release(t); + trace_litmus_task_release(t); +// sched_trace_task_release(t); /* unlink */ unlink(t); /* requeue @@ -558,7 +562,8 @@ static void gsnedf_task_wake_up(struct task_struct *task) if (is_tardy(task, now)) { /* new sporadic release */ release_at(task, now); - sched_trace_task_release(task); +// sched_trace_task_release(task); + trace_litmus_task_release(task); } else { if (task->rt.time_slice) { diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 4d1cdacbeb9f..2aa609466cde 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -145,7 +145,8 @@ static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, int wakeup) { if (wakeup) { - sched_trace_task_resume(p); +// sched_trace_task_resume(p); + trace_litmus_task_resume(p); tsk_rt(p)->present = 1; litmus->task_wake_up(p); } else -- cgit v1.2.2