diff options
author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2012-08-15 07:34:27 -0400 |
---|---|---|
committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2012-10-18 16:14:02 -0400 |
commit | 80e2d84533265a8231534f10c56d9eba01cec99a (patch) | |
tree | b980aed88b8cfa79ab6d271b93fb51093694f855 | |
parent | 5454446ed47708b26d40337e47affea5e1f298ef (diff) |
Feather-Trace: add support for locking-related syscall overheads
Support recording timestamps that allow tracing the entry and exit
costs of locking-related system calls.
-rw-r--r-- | include/litmus/litmus.h | 16 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 6 | ||||
-rw-r--r-- | litmus/locking.c | 15 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 5 |
4 files changed, 42 insertions, 0 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 807b7888695a..f7893ef18162 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -259,4 +259,20 @@ static inline quanta_t time2quanta(lt_t time, enum round round) | |||
259 | /* By how much is cpu staggered behind CPU 0? */ | 259 | /* By how much is cpu staggered behind CPU 0? */ |
260 | u64 cpu_stagger_offset(int cpu); | 260 | u64 cpu_stagger_offset(int cpu); |
261 | 261 | ||
262 | static inline struct control_page* get_control_page(struct task_struct *t) | ||
263 | { | ||
264 | return tsk_rt(t)->ctrl_page; | ||
265 | } | ||
266 | |||
267 | static inline int has_control_page(struct task_struct* t) | ||
268 | { | ||
269 | return tsk_rt(t)->ctrl_page != NULL; | ||
270 | } | ||
271 | |||
272 | |||
273 | #define TS_SYSCALL_IN_START \ | ||
274 | if (has_control_page(current)) { \ | ||
275 | __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \ | ||
276 | } | ||
277 | |||
262 | #endif | 278 | #endif |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index fac939dbd33a..6456ed04fddb 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -89,8 +89,14 @@ union np_flag { | |||
89 | * determining preemption/migration overheads). | 89 | * determining preemption/migration overheads). |
90 | */ | 90 | */ |
91 | struct control_page { | 91 | struct control_page { |
92 | /* This flag is used by userspace to communicate non-preempive | ||
93 | * sections. */ | ||
92 | volatile union np_flag sched; | 94 | volatile union np_flag sched; |
93 | 95 | ||
96 | /* Locking overhead tracing: userspace records here the time stamp | ||
97 | * prior to starting the system call. */ | ||
98 | uint64_t ts_syscall_start; /* Feather-Trace cycles */ | ||
99 | |||
94 | /* to be extended */ | 100 | /* to be extended */ |
95 | }; | 101 | }; |
96 | 102 | ||
diff --git a/litmus/locking.c b/litmus/locking.c index df7d0a939c08..43d9aece2e74 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -1,3 +1,5 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <litmus/litmus.h> | ||
1 | #include <litmus/fdso.h> | 3 | #include <litmus/fdso.h> |
2 | 4 | ||
3 | #ifdef CONFIG_LITMUS_LOCKING | 5 | #ifdef CONFIG_LITMUS_LOCKING |
@@ -72,6 +74,10 @@ asmlinkage long sys_litmus_lock(int lock_od) | |||
72 | struct od_table_entry* entry; | 74 | struct od_table_entry* entry; |
73 | struct litmus_lock* l; | 75 | struct litmus_lock* l; |
74 | 76 | ||
77 | TS_SYSCALL_IN_START; | ||
78 | |||
79 | TS_SYSCALL_IN_END; | ||
80 | |||
75 | TS_LOCK_START; | 81 | TS_LOCK_START; |
76 | 82 | ||
77 | entry = get_entry_for_od(lock_od); | 83 | entry = get_entry_for_od(lock_od); |
@@ -85,6 +91,8 @@ asmlinkage long sys_litmus_lock(int lock_od) | |||
85 | * this into account when computing overheads. */ | 91 | * this into account when computing overheads. */ |
86 | TS_LOCK_END; | 92 | TS_LOCK_END; |
87 | 93 | ||
94 | TS_SYSCALL_OUT_START; | ||
95 | |||
88 | return err; | 96 | return err; |
89 | } | 97 | } |
90 | 98 | ||
@@ -94,6 +102,10 @@ asmlinkage long sys_litmus_unlock(int lock_od) | |||
94 | struct od_table_entry* entry; | 102 | struct od_table_entry* entry; |
95 | struct litmus_lock* l; | 103 | struct litmus_lock* l; |
96 | 104 | ||
105 | TS_SYSCALL_IN_START; | ||
106 | |||
107 | TS_SYSCALL_IN_END; | ||
108 | |||
97 | TS_UNLOCK_START; | 109 | TS_UNLOCK_START; |
98 | 110 | ||
99 | entry = get_entry_for_od(lock_od); | 111 | entry = get_entry_for_od(lock_od); |
@@ -107,6 +119,8 @@ asmlinkage long sys_litmus_unlock(int lock_od) | |||
107 | * account when computing overheads. */ | 119 | * account when computing overheads. */ |
108 | TS_UNLOCK_END; | 120 | TS_UNLOCK_END; |
109 | 121 | ||
122 | TS_SYSCALL_OUT_START; | ||
123 | |||
110 | return err; | 124 | return err; |
111 | } | 125 | } |
112 | 126 | ||
@@ -156,6 +170,7 @@ out: | |||
156 | return passed; | 170 | return passed; |
157 | } | 171 | } |
158 | 172 | ||
173 | |||
159 | #else | 174 | #else |
160 | 175 | ||
161 | struct fdso_ops generic_lock_ops = {}; | 176 | struct fdso_ops generic_lock_ops = {}; |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 6553948407de..6b32cf09abbd 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -194,6 +194,9 @@ static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, | |||
194 | 194 | ||
195 | static void yield_task_litmus(struct rq *rq) | 195 | static void yield_task_litmus(struct rq *rq) |
196 | { | 196 | { |
197 | TS_SYSCALL_IN_START; | ||
198 | TS_SYSCALL_IN_END; | ||
199 | |||
197 | BUG_ON(rq->curr != current); | 200 | BUG_ON(rq->curr != current); |
198 | /* sched_yield() is called to trigger delayed preemptions. | 201 | /* sched_yield() is called to trigger delayed preemptions. |
199 | * Thus, mark the current task as needing to be rescheduled. | 202 | * Thus, mark the current task as needing to be rescheduled. |
@@ -202,6 +205,8 @@ static void yield_task_litmus(struct rq *rq) | |||
202 | */ | 205 | */ |
203 | clear_exit_np(current); | 206 | clear_exit_np(current); |
204 | litmus_reschedule_local(); | 207 | litmus_reschedule_local(); |
208 | |||
209 | TS_SYSCALL_OUT_START; | ||
205 | } | 210 | } |
206 | 211 | ||
207 | /* Plugins are responsible for this. | 212 | /* Plugins are responsible for this. |