diff options
author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2013-06-23 08:40:52 -0400 |
---|---|---|
committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2013-08-07 03:46:37 -0400 |
commit | e089fc170eb4c33c8703bf09cacdff7323cf7575 (patch) | |
tree | 206ea5e20ea0552077d7076f007b4d8b22fdfb40 /litmus | |
parent | a20a7105a2206eb4b9c791aa276f4233bb453214 (diff) |
Feather-Trace: add LITMUS^RT overhead tracing infrastructure
This patch adds the main infrastructure for tracing overheads in
LITMUS^RT. It does not yet introduce any tracepoints into the kernel.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/Kconfig | 25 | ||||
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/trace.c | 284 |
3 files changed, 310 insertions, 0 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig index 7456eb209cf7..0c7e06b922a1 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -24,6 +24,31 @@ config FEATHER_TRACE | |||
24 | you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to | 24 | you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to |
25 | actually enable any events. | 25 | actually enable any events. |
26 | 26 | ||
27 | config SCHED_OVERHEAD_TRACE | ||
28 | bool "Record timestamps for overhead measurements" | ||
29 | depends on FEATHER_TRACE | ||
30 | default y | ||
31 | help | ||
32 | Export event stream for overhead tracing. | ||
33 | Say Yes for overhead tracing. | ||
34 | |||
35 | config SCHED_OVERHEAD_TRACE_SHIFT | ||
36 | int "Buffer size for Feather-Trace overhead data" | ||
37 | depends on SCHED_OVERHEAD_TRACE | ||
38 | range 15 32 | ||
39 | default 22 | ||
40 | help | ||
41 | |||
42 | Select the buffer size for the Feather-Trace overhead tracing | ||
43 | infrastructure (/dev/litmus/ft_trace0 & ftcat) as a power of two. The | ||
44 | larger the buffer, the less likely the chance of buffer overflows if | ||
45 | the ftcat process is starved by real-time activity. In machines with | ||
46 | large memories, large buffer sizes are recommended. | ||
47 | |||
48 | Examples: 16 => 2 MB | ||
49 | 24 => 512 MB | ||
50 | 26 => 2G MB | ||
51 | |||
27 | endmenu | 52 | endmenu |
28 | 53 | ||
29 | endmenu | 54 | endmenu |
diff --git a/litmus/Makefile b/litmus/Makefile index bca61e6deb71..99f90c34f141 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -3,3 +3,4 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 5 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
6 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o | ||
diff --git a/litmus/trace.c b/litmus/trace.c new file mode 100644 index 000000000000..7d29590252fd --- /dev/null +++ b/litmus/trace.c | |||
@@ -0,0 +1,284 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/uaccess.h> | ||
4 | |||
5 | #include <litmus/ftdev.h> | ||
6 | #include <litmus/trace.h> | ||
7 | |||
8 | /* dummy definition of is_realtime() */ | ||
9 | #define is_realtime(t) (0) | ||
10 | |||
11 | /******************************************************************************/ | ||
12 | /* Allocation */ | ||
13 | /******************************************************************************/ | ||
14 | |||
15 | static struct ftdev overhead_dev; | ||
16 | |||
17 | #define trace_ts_buf overhead_dev.minor[0].buf | ||
18 | |||
19 | static unsigned int ts_seq_no = 0; | ||
20 | |||
21 | DEFINE_PER_CPU(atomic_t, irq_fired_count); | ||
22 | |||
23 | void ft_irq_fired(void) | ||
24 | { | ||
25 | /* Only called with preemptions disabled. */ | ||
26 | atomic_inc(&__get_cpu_var(irq_fired_count)); | ||
27 | } | ||
28 | |||
29 | static inline void clear_irq_fired(void) | ||
30 | { | ||
31 | atomic_set(&__raw_get_cpu_var(irq_fired_count), 0); | ||
32 | } | ||
33 | |||
34 | static inline unsigned int get_and_clear_irq_fired(void) | ||
35 | { | ||
36 | /* This is potentially not atomic since we might migrate if | ||
37 | * preemptions are not disabled. As a tradeoff between | ||
38 | * accuracy and tracing overheads, this seems acceptable. | ||
39 | * If it proves to be a problem, then one could add a callback | ||
40 | * from the migration code to invalidate irq_fired_count. | ||
41 | */ | ||
42 | return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); | ||
43 | } | ||
44 | |||
45 | static inline void save_irq_flags(struct timestamp *ts, unsigned int irq_count) | ||
46 | { | ||
47 | /* Store how many interrupts occurred. */ | ||
48 | ts->irq_count = irq_count; | ||
49 | /* Extra flag because ts->irq_count overflows quickly. */ | ||
50 | ts->irq_flag = irq_count > 0; | ||
51 | |||
52 | } | ||
53 | |||
54 | static inline void write_timestamp(uint8_t event, | ||
55 | uint8_t type, | ||
56 | uint8_t cpu, | ||
57 | uint16_t pid_fragment, | ||
58 | unsigned int irq_count, | ||
59 | int record_irq, | ||
60 | int hide_irq, | ||
61 | uint64_t timestamp, | ||
62 | int record_timestamp) | ||
63 | { | ||
64 | unsigned long flags; | ||
65 | unsigned int seq_no; | ||
66 | struct timestamp *ts; | ||
67 | |||
68 | /* Avoid preemptions while recording the timestamp. This reduces the | ||
69 | * number of "out of order" timestamps in the stream and makes | ||
70 | * post-processing easier. */ | ||
71 | |||
72 | local_irq_save(flags); | ||
73 | |||
74 | seq_no = fetch_and_inc((int *) &ts_seq_no); | ||
75 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | ||
76 | ts->event = event; | ||
77 | ts->seq_no = seq_no; | ||
78 | |||
79 | ts->task_type = type; | ||
80 | ts->pid = pid_fragment; | ||
81 | |||
82 | ts->cpu = cpu; | ||
83 | |||
84 | if (record_irq) | ||
85 | irq_count = get_and_clear_irq_fired(); | ||
86 | |||
87 | save_irq_flags(ts, irq_count - hide_irq); | ||
88 | |||
89 | if (record_timestamp) | ||
90 | timestamp = ft_timestamp(); | ||
91 | |||
92 | ts->timestamp = timestamp; | ||
93 | ft_buffer_finish_write(trace_ts_buf, ts); | ||
94 | } | ||
95 | |||
96 | local_irq_restore(flags); | ||
97 | } | ||
98 | |||
99 | static void __add_timestamp_user(struct timestamp *pre_recorded) | ||
100 | { | ||
101 | unsigned long flags; | ||
102 | unsigned int seq_no; | ||
103 | struct timestamp *ts; | ||
104 | |||
105 | |||
106 | local_irq_save(flags); | ||
107 | |||
108 | seq_no = fetch_and_inc((int *) &ts_seq_no); | ||
109 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | ||
110 | *ts = *pre_recorded; | ||
111 | ts->seq_no = seq_no; | ||
112 | ts->cpu = raw_smp_processor_id(); | ||
113 | save_irq_flags(ts, get_and_clear_irq_fired()); | ||
114 | ft_buffer_finish_write(trace_ts_buf, ts); | ||
115 | } | ||
116 | |||
117 | local_irq_restore(flags); | ||
118 | } | ||
119 | |||
120 | feather_callback void save_timestamp(unsigned long event) | ||
121 | { | ||
122 | write_timestamp(event, TSK_UNKNOWN, | ||
123 | raw_smp_processor_id(), | ||
124 | current->pid, | ||
125 | 0, 1, 0, | ||
126 | 0, 1); | ||
127 | } | ||
128 | |||
129 | feather_callback void save_timestamp_def(unsigned long event, | ||
130 | unsigned long type) | ||
131 | { | ||
132 | write_timestamp(event, type, | ||
133 | raw_smp_processor_id(), | ||
134 | current->pid, | ||
135 | 0, 1, 0, | ||
136 | 0, 1); | ||
137 | } | ||
138 | |||
139 | feather_callback void save_timestamp_task(unsigned long event, | ||
140 | unsigned long t_ptr) | ||
141 | { | ||
142 | struct task_struct *t = (struct task_struct *) t_ptr; | ||
143 | int rt = is_realtime(t); | ||
144 | |||
145 | write_timestamp(event, rt ? TSK_RT : TSK_BE, | ||
146 | raw_smp_processor_id(), | ||
147 | t->pid, | ||
148 | 0, 1, 0, | ||
149 | 0, 1); | ||
150 | } | ||
151 | |||
152 | feather_callback void save_timestamp_cpu(unsigned long event, | ||
153 | unsigned long cpu) | ||
154 | { | ||
155 | write_timestamp(event, TSK_UNKNOWN, cpu, current->pid, | ||
156 | 0, 1, 0, | ||
157 | 0, 1); | ||
158 | } | ||
159 | |||
160 | /* fake timestamp to user-reported time */ | ||
161 | feather_callback void save_timestamp_time(unsigned long event, | ||
162 | unsigned long ptr) | ||
163 | { | ||
164 | uint64_t* time = (uint64_t*) ptr; | ||
165 | |||
166 | write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE, | ||
167 | raw_smp_processor_id(), current->pid, | ||
168 | 0, 1, 0, | ||
169 | *time, 0); | ||
170 | } | ||
171 | |||
172 | /* Record user-reported IRQ count */ | ||
173 | feather_callback void save_timestamp_irq(unsigned long event, | ||
174 | unsigned long irq_counter_ptr) | ||
175 | { | ||
176 | uint64_t* irqs = (uint64_t*) irq_counter_ptr; | ||
177 | |||
178 | write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE, | ||
179 | raw_smp_processor_id(), current->pid, | ||
180 | *irqs, 0, 0, | ||
181 | 0, 1); | ||
182 | } | ||
183 | |||
184 | /* Suppress one IRQ from the irq count. Used by TS_SEND_RESCHED_END, which is | ||
185 | * called from within an interrupt that is expected. */ | ||
186 | feather_callback void save_timestamp_hide_irq(unsigned long event) | ||
187 | { | ||
188 | write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE, | ||
189 | raw_smp_processor_id(), current->pid, | ||
190 | 0, 1, 1, | ||
191 | 0, 1); | ||
192 | } | ||
193 | |||
194 | /******************************************************************************/ | ||
195 | /* DEVICE FILE DRIVER */ | ||
196 | /******************************************************************************/ | ||
197 | |||
198 | #define NO_TIMESTAMPS (2 << CONFIG_SCHED_OVERHEAD_TRACE_SHIFT) | ||
199 | |||
200 | static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
201 | { | ||
202 | unsigned int count = NO_TIMESTAMPS; | ||
203 | |||
204 | /* An overhead-tracing timestamp should be exactly 16 bytes long. */ | ||
205 | BUILD_BUG_ON(sizeof(struct timestamp) != 16); | ||
206 | |||
207 | while (count && !trace_ts_buf) { | ||
208 | printk("time stamp buffer: trying to allocate %u time stamps.\n", count); | ||
209 | ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); | ||
210 | count /= 2; | ||
211 | } | ||
212 | return ftdev->minor[idx].buf ? 0 : -ENOMEM; | ||
213 | } | ||
214 | |||
215 | static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
216 | { | ||
217 | free_ft_buffer(ftdev->minor[idx].buf); | ||
218 | ftdev->minor[idx].buf = NULL; | ||
219 | } | ||
220 | |||
221 | static ssize_t write_timestamp_from_user(struct ft_buffer* buf, size_t len, | ||
222 | const char __user *from) | ||
223 | { | ||
224 | ssize_t consumed = 0; | ||
225 | struct timestamp ts; | ||
226 | |||
227 | /* don't give us partial timestamps */ | ||
228 | if (len % sizeof(ts)) | ||
229 | return -EINVAL; | ||
230 | |||
231 | while (len >= sizeof(ts)) { | ||
232 | if (copy_from_user(&ts, from, sizeof(ts))) { | ||
233 | consumed = -EFAULT; | ||
234 | goto out; | ||
235 | } | ||
236 | len -= sizeof(ts); | ||
237 | from += sizeof(ts); | ||
238 | consumed += sizeof(ts); | ||
239 | |||
240 | __add_timestamp_user(&ts); | ||
241 | } | ||
242 | |||
243 | out: | ||
244 | return consumed; | ||
245 | } | ||
246 | |||
247 | static int __init init_ft_overhead_trace(void) | ||
248 | { | ||
249 | int err, cpu; | ||
250 | |||
251 | printk("Initializing Feather-Trace overhead tracing device.\n"); | ||
252 | err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); | ||
253 | if (err) | ||
254 | goto err_out; | ||
255 | |||
256 | overhead_dev.alloc = alloc_timestamp_buffer; | ||
257 | overhead_dev.free = free_timestamp_buffer; | ||
258 | overhead_dev.write = write_timestamp_from_user; | ||
259 | |||
260 | err = register_ftdev(&overhead_dev); | ||
261 | if (err) | ||
262 | goto err_dealloc; | ||
263 | |||
264 | /* initialize IRQ flags */ | ||
265 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
266 | clear_irq_fired(); | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | |||
271 | err_dealloc: | ||
272 | ftdev_exit(&overhead_dev); | ||
273 | err_out: | ||
274 | printk(KERN_WARNING "Could not register ft_trace module.\n"); | ||
275 | return err; | ||
276 | } | ||
277 | |||
278 | static void __exit exit_ft_overhead_trace(void) | ||
279 | { | ||
280 | ftdev_exit(&overhead_dev); | ||
281 | } | ||
282 | |||
283 | module_init(init_ft_overhead_trace); | ||
284 | module_exit(exit_ft_overhead_trace); | ||