From 26bafa3b7880a323d83b8ea71bdb8e2118a5cba0 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Sat, 11 Feb 2012 21:41:19 +0100 Subject: Add kernel-style events for sched_trace_XXX() functions Enable kernel-style events (tracepoint) for Litmus. Litmus events trace the same functions as the sched_trace_XXX(), but can be enabled independently. So, why another tracing infrastructure then: - Litmus tracepoints can be recorded and analyzed together (single time reference) with all other kernel tracing events (e.g., sched:sched_switch, etc.). It's easier to correlate the effects of kernel events on litmus tasks. - It enables a quick way to visualize and process schedule traces using trace-cmd utility and kernelshark visualizer. Kernelshark lacks unit-trace's schedule-correctness checks, but it enables a fast view of schedule traces and it has several filtering options (for all kernel events, not only Litmus'). --- kernel/sched.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index baaca61bc3a3..2229d0deec4b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -80,14 +80,14 @@ #include "workqueue_sched.h" #include "sched_autogroup.h" +#define CREATE_TRACE_POINTS +#include + #include #include static void litmus_tick(struct rq*, struct task_struct*); -#define CREATE_TRACE_POINTS -#include - /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], -- cgit v1.2.2 From 9434d607f4ddaf2374dafd86c20ed4336230ecfa Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Mon, 17 Sep 2012 09:57:37 -0400 Subject: Do processor state transitions in schedule_tail(). Fixes a bug in Litmus where processor scheduling states could become corrupted. Corruption can occur when a just-forked thread is externally forced to be scheduled by SCHED_LITMUS before this just-forked thread can complete post-fork processing. Specifically, before schedule_tail() has completed. --- kernel/sched.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 2229d0deec4b..65aba7ec564d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3163,16 +3163,26 @@ static inline void post_schedule(struct rq *rq) asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { - struct rq *rq = this_rq(); - + struct rq *rq; + + preempt_disable(); + + rq = this_rq(); finish_task_switch(rq, prev); + sched_trace_task_switch_to(current); + /* * FIXME: do we need to worry about rq being invalidated by the * task_switch? */ post_schedule(rq); + if (sched_state_validate_switch()) + litmus_reschedule_local(); + + preempt_enable(); + #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ preempt_enable(); -- cgit v1.2.2