aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-03-02 12:04:55 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-04-12 14:58:40 -0400
commit5cdc24ae24b6517d594072b27873b1d46416eae3 (patch)
treebab9fcb6fde1bae2b5857cd4838fe5847f927de5 /litmus
parentad92e346f66397c431b8856fb1eb15be29415b04 (diff)
Add preemption-and-migration overhead tracing supportwip-pm-ovd
Add per-task accounting (based on shared control page) of information needed to trace preemption and migration overheads from userspace
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Kconfig10
-rw-r--r--litmus/sched_litmus.c54
2 files changed, 64 insertions, 0 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 874794f64af1..77f65e512136 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -67,6 +67,16 @@ config SCHED_OVERHEAD_TRACE
67 Export event stream for overhead tracing. 67 Export event stream for overhead tracing.
68 Say Yes for overhead tracing. 68 Say Yes for overhead tracing.
69 69
70config PREEMPT_MIGRAT_OVD
71 bool "Preemption and migration overhead measurements"
72 default n
73 help
74 Include support for tracing preemption and migration overheads
75 in userspace. Unless you know the mess you are going into,
76 the safe choice is No. If you _really_ want to have some fun,
77 than say Yes and have a look in the liblitmus2010 library for
78 some program examples.
79
70config SCHED_DEBUG_TRACE 80config SCHED_DEBUG_TRACE
71 bool "TRACE() debugging" 81 bool "TRACE() debugging"
72 default y 82 default y
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index c1fc7748e590..be29cbe15fc5 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -316,3 +316,57 @@ const struct sched_class litmus_sched_class = {
316 .prio_changed = prio_changed_litmus, 316 .prio_changed = prio_changed_litmus,
317 .switched_to = switched_to_litmus, 317 .switched_to = switched_to_litmus,
318}; 318};
319
320#ifdef CONFIG_PREEMPT_MIGRAT_OVD
321/* trace_preempt_migrat_ovd() executes during finish_task_switch()
322 * before (possibly) giving control back to userspace
323 * Using shared rw (FIXME ro?) ctrl page we keep the userspace
324 * monitoring program updated on what happened during our execution
325 */
326static void trace_preempt_migrat_ovd(struct task_struct *prev,
327 struct task_struct *curr,
328 struct rq *rq)
329{
330 struct control_page *ctrl;
331
332 /* Update bookkeeping variables that are shared with userspace
333 * to conduct appropriate benchmarking (preempt, migration)
334 */
335 if (is_realtime(prev) && tsk_rt(prev)->ctrl_page) {
336 rq->litmus.last_rt_task = prev->pid;
337 tsk_rt(prev)->ctrl_page->preempt_start = get_cycles();
338 }
339
340 if (is_realtime(current) && tsk_rt(current)->ctrl_page) {
341 ctrl = tsk_rt(current)->ctrl_page;
342 ctrl->preempt_end = get_cycles();
343
344 /* If the last RT task to be scheduled was not
345 * this task, then the preemption "counts", and
346 * we increment sched_count. Otherwise, we do
347 * not. On a migration this might not get
348 * incrememented, and we need to check for a cpu
349 * change in that case (cpu is always updated).
350 */
351 if (rq->litmus.last_rt_task != current->pid)
352 ctrl->sched_count++;
353
354 /* "our" userspace will be eventually awoke sometimes
355 * in the "near" future, we write on the shared page
356 * some information for it
357 */
358 ctrl->cpu = rq->cpu;
359 ctrl->job_count = tsk_rt(current)->job_params.job_no;
360 ctrl->last_rt_task = rq->litmus.last_rt_task;
361 /* userspace has all the information to compute the length
362 * of this preemption
363 */
364 }
365}
366#else
367static inline void trace_preempt_migrat_ovd(struct task_struct *prev,
368 struct task_struct *curr,
369 struct rq *rq)
370{
371}
372#endif