diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-03-02 12:04:55 -0500 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-04-12 14:58:40 -0400 |
commit | 5cdc24ae24b6517d594072b27873b1d46416eae3 (patch) | |
tree | bab9fcb6fde1bae2b5857cd4838fe5847f927de5 | |
parent | ad92e346f66397c431b8856fb1eb15be29415b04 (diff) |
Add preemption-and-migration overhead tracing supportwip-pm-ovd
Add per-task accounting (based on shared control page) of information
needed to trace preemption and migration overheads from userspace
-rw-r--r-- | include/litmus/rt_param.h | 19 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | litmus/Kconfig | 10 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 54 |
4 files changed, 88 insertions, 1 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 5b94d1a8eea7..31630d729dbb 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -50,13 +50,26 @@ struct rt_task { | |||
50 | * determining preemption/migration overheads). | 50 | * determining preemption/migration overheads). |
51 | */ | 51 | */ |
52 | struct control_page { | 52 | struct control_page { |
53 | /* preemption and migration overheads measurement*/ | ||
54 | /* execution start time */ | ||
55 | lt_t preempt_end; | ||
56 | /* increased if last_rt != current */ | ||
57 | unsigned long sched_count; | ||
58 | unsigned int cpu; | ||
59 | /* job number for this task */ | ||
60 | unsigned long job_count; | ||
61 | /* last rt task before current one */ | ||
62 | unsigned long last_rt_task; | ||
63 | /* execution stop time */ | ||
64 | lt_t preempt_start; | ||
65 | |||
53 | /* Is the task currently in a non-preemptive section? */ | 66 | /* Is the task currently in a non-preemptive section? */ |
54 | int np_flag; | 67 | int np_flag; |
55 | /* Should the task call into the kernel when it leaves | 68 | /* Should the task call into the kernel when it leaves |
56 | * its non-preemptive section? */ | 69 | * its non-preemptive section? */ |
57 | int delayed_preemption; | 70 | int delayed_preemption; |
58 | 71 | ||
59 | /* to be extended */ | 72 | /* what do you want to share today ? */ |
60 | }; | 73 | }; |
61 | 74 | ||
62 | /* don't export internal data structures to user space (liblitmus) */ | 75 | /* don't export internal data structures to user space (liblitmus) */ |
@@ -175,6 +188,10 @@ struct rt_param { | |||
175 | */ | 188 | */ |
176 | struct list_head list; | 189 | struct list_head list; |
177 | 190 | ||
191 | /* FIXME for overhead tracing it would be better | ||
192 | * having this page on top of rt_param and having rt_param | ||
193 | * closer to the top of task_struct | ||
194 | */ | ||
178 | /* Pointer to the page shared between userspace and kernel. */ | 195 | /* Pointer to the page shared between userspace and kernel. */ |
179 | struct control_page * ctrl_page; | 196 | struct control_page * ctrl_page; |
180 | }; | 197 | }; |
diff --git a/kernel/sched.c b/kernel/sched.c index 1701eaebb79c..69fb86c87915 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -489,6 +489,9 @@ struct rt_rq { | |||
489 | struct litmus_rq { | 489 | struct litmus_rq { |
490 | unsigned long nr_running; | 490 | unsigned long nr_running; |
491 | struct task_struct *prev; | 491 | struct task_struct *prev; |
492 | #ifdef CONFIG_PREEMPT_MIGRAT_OVD | ||
493 | unsigned int last_rt_task; | ||
494 | #endif | ||
492 | }; | 495 | }; |
493 | 496 | ||
494 | #ifdef CONFIG_SMP | 497 | #ifdef CONFIG_SMP |
@@ -2766,8 +2769,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2766 | */ | 2769 | */ |
2767 | prev_state = prev->state; | 2770 | prev_state = prev->state; |
2768 | finish_arch_switch(prev); | 2771 | finish_arch_switch(prev); |
2772 | |||
2769 | litmus->finish_switch(prev); | 2773 | litmus->finish_switch(prev); |
2770 | prev->rt_param.stack_in_use = NO_CPU; | 2774 | prev->rt_param.stack_in_use = NO_CPU; |
2775 | trace_preempt_migrat_ovd(prev, current, rq); | ||
2776 | |||
2771 | perf_event_task_sched_in(current, cpu_of(rq)); | 2777 | perf_event_task_sched_in(current, cpu_of(rq)); |
2772 | finish_lock_switch(rq, prev); | 2778 | finish_lock_switch(rq, prev); |
2773 | 2779 | ||
diff --git a/litmus/Kconfig b/litmus/Kconfig index 874794f64af1..77f65e512136 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -67,6 +67,16 @@ config SCHED_OVERHEAD_TRACE | |||
67 | Export event stream for overhead tracing. | 67 | Export event stream for overhead tracing. |
68 | Say Yes for overhead tracing. | 68 | Say Yes for overhead tracing. |
69 | 69 | ||
70 | config PREEMPT_MIGRAT_OVD | ||
71 | bool "Preemption and migration overhead measurements" | ||
72 | default n | ||
73 | help | ||
74 | Include support for tracing preemption and migration overheads | ||
75 | in userspace. Unless you know the mess you are going into, | ||
76 | the safe choice is No. If you _really_ want to have some fun, | ||
77 | than say Yes and have a look in the liblitmus2010 library for | ||
78 | some program examples. | ||
79 | |||
70 | config SCHED_DEBUG_TRACE | 80 | config SCHED_DEBUG_TRACE |
71 | bool "TRACE() debugging" | 81 | bool "TRACE() debugging" |
72 | default y | 82 | default y |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index c1fc7748e590..be29cbe15fc5 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -316,3 +316,57 @@ const struct sched_class litmus_sched_class = { | |||
316 | .prio_changed = prio_changed_litmus, | 316 | .prio_changed = prio_changed_litmus, |
317 | .switched_to = switched_to_litmus, | 317 | .switched_to = switched_to_litmus, |
318 | }; | 318 | }; |
319 | |||
320 | #ifdef CONFIG_PREEMPT_MIGRAT_OVD | ||
321 | /* trace_preempt_migrat_ovd() executes during finish_task_switch() | ||
322 | * before (possibly) giving control back to userspace | ||
323 | * Using shared rw (FIXME ro?) ctrl page we keep the userspace | ||
324 | * monitoring program updated on what happened during our execution | ||
325 | */ | ||
326 | static void trace_preempt_migrat_ovd(struct task_struct *prev, | ||
327 | struct task_struct *curr, | ||
328 | struct rq *rq) | ||
329 | { | ||
330 | struct control_page *ctrl; | ||
331 | |||
332 | /* Update bookkeeping variables that are shared with userspace | ||
333 | * to conduct appropriate benchmarking (preempt, migration) | ||
334 | */ | ||
335 | if (is_realtime(prev) && tsk_rt(prev)->ctrl_page) { | ||
336 | rq->litmus.last_rt_task = prev->pid; | ||
337 | tsk_rt(prev)->ctrl_page->preempt_start = get_cycles(); | ||
338 | } | ||
339 | |||
340 | if (is_realtime(current) && tsk_rt(current)->ctrl_page) { | ||
341 | ctrl = tsk_rt(current)->ctrl_page; | ||
342 | ctrl->preempt_end = get_cycles(); | ||
343 | |||
344 | /* If the last RT task to be scheduled was not | ||
345 | * this task, then the preemption "counts", and | ||
346 | * we increment sched_count. Otherwise, we do | ||
347 | * not. On a migration this might not get | ||
348 | * incrememented, and we need to check for a cpu | ||
349 | * change in that case (cpu is always updated). | ||
350 | */ | ||
351 | if (rq->litmus.last_rt_task != current->pid) | ||
352 | ctrl->sched_count++; | ||
353 | |||
354 | /* "our" userspace will be eventually awoke sometimes | ||
355 | * in the "near" future, we write on the shared page | ||
356 | * some information for it | ||
357 | */ | ||
358 | ctrl->cpu = rq->cpu; | ||
359 | ctrl->job_count = tsk_rt(current)->job_params.job_no; | ||
360 | ctrl->last_rt_task = rq->litmus.last_rt_task; | ||
361 | /* userspace has all the information to compute the length | ||
362 | * of this preemption | ||
363 | */ | ||
364 | } | ||
365 | } | ||
366 | #else | ||
367 | static inline void trace_preempt_migrat_ovd(struct task_struct *prev, | ||
368 | struct task_struct *curr, | ||
369 | struct rq *rq) | ||
370 | { | ||
371 | } | ||
372 | #endif | ||