diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-05-12 15:20:44 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:40:36 -0400 |
commit | 60a11774b38fef1ab90b18c5353bd1c7c4d311c8 (patch) | |
tree | e2a6fd066b0dba6dcd776d07383e2932055cf66a /kernel/trace/trace_selftest.c | |
parent | e1c08bdd9fa73e44096e5a82c0d5928b04ab02c8 (diff) |
ftrace: add self-tests
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r-- | kernel/trace/trace_selftest.c | 415 |
1 files changed, 415 insertions, 0 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c new file mode 100644 index 000000000000..ef4d3cc009f5 --- /dev/null +++ b/kernel/trace/trace_selftest.c | |||
@@ -0,0 +1,415 @@ | |||
1 | /* Include in trace.c */ | ||
2 | |||
3 | #include <linux/kthread.h> | ||
4 | |||
5 | static inline int trace_valid_entry(struct trace_entry *entry) | ||
6 | { | ||
7 | switch (entry->type) { | ||
8 | case TRACE_FN: | ||
9 | case TRACE_CTX: | ||
10 | return 1; | ||
11 | } | ||
12 | return 0; | ||
13 | } | ||
14 | |||
15 | static int | ||
16 | trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data) | ||
17 | { | ||
18 | struct page *page; | ||
19 | struct trace_entry *entries; | ||
20 | int idx = 0; | ||
21 | int i; | ||
22 | |||
23 | page = list_entry(data->trace_pages.next, struct page, lru); | ||
24 | entries = page_address(page); | ||
25 | |||
26 | if (data->trace != entries) | ||
27 | goto failed; | ||
28 | |||
29 | /* | ||
30 | * The starting trace buffer always has valid elements, | ||
31 | * if any element exits. | ||
32 | */ | ||
33 | entries = data->trace; | ||
34 | |||
35 | for (i = 0; i < tr->entries; i++) { | ||
36 | |||
37 | if (i < data->trace_idx && | ||
38 | !trace_valid_entry(&entries[idx])) { | ||
39 | printk(KERN_CONT ".. invalid entry %d ", entries[idx].type); | ||
40 | goto failed; | ||
41 | } | ||
42 | |||
43 | idx++; | ||
44 | if (idx >= ENTRIES_PER_PAGE) { | ||
45 | page = virt_to_page(entries); | ||
46 | if (page->lru.next == &data->trace_pages) { | ||
47 | if (i != tr->entries - 1) { | ||
48 | printk(KERN_CONT ".. entries buffer mismatch"); | ||
49 | goto failed; | ||
50 | } | ||
51 | } else { | ||
52 | page = list_entry(page->lru.next, struct page, lru); | ||
53 | entries = page_address(page); | ||
54 | } | ||
55 | idx = 0; | ||
56 | } | ||
57 | } | ||
58 | |||
59 | page = virt_to_page(entries); | ||
60 | if (page->lru.next != &data->trace_pages) { | ||
61 | printk(KERN_CONT ".. too many entries"); | ||
62 | goto failed; | ||
63 | } | ||
64 | |||
65 | return 0; | ||
66 | |||
67 | failed: | ||
68 | printk(KERN_CONT ".. corrupted trace buffer .. "); | ||
69 | return -1; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Test the trace buffer to see if all the elements | ||
74 | * are still sane. | ||
75 | */ | ||
76 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | ||
77 | { | ||
78 | unsigned long cnt = 0; | ||
79 | int cpu; | ||
80 | int ret = 0; | ||
81 | |||
82 | for_each_possible_cpu(cpu) { | ||
83 | if (!tr->data[cpu]->trace) | ||
84 | continue; | ||
85 | |||
86 | cnt += tr->data[cpu]->trace_idx; | ||
87 | printk("%d: count = %ld\n", cpu, cnt); | ||
88 | |||
89 | ret = trace_test_buffer_cpu(tr, tr->data[cpu]); | ||
90 | if (ret) | ||
91 | break; | ||
92 | } | ||
93 | |||
94 | if (count) | ||
95 | *count = cnt; | ||
96 | |||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | #ifdef CONFIG_FTRACE | ||
101 | /* | ||
102 | * Simple verification test of ftrace function tracer. | ||
103 | * Enable ftrace, sleep 1/10 second, and then read the trace | ||
104 | * buffer to see if all is in order. | ||
105 | */ | ||
106 | int | ||
107 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | ||
108 | { | ||
109 | unsigned long count; | ||
110 | int ret; | ||
111 | |||
112 | /* make sure functions have been recorded */ | ||
113 | ret = ftrace_force_update(); | ||
114 | if (ret) { | ||
115 | printk(KERN_CONT ".. ftraced failed .. "); | ||
116 | return ret; | ||
117 | } | ||
118 | |||
119 | /* start the tracing */ | ||
120 | tr->ctrl = 1; | ||
121 | trace->init(tr); | ||
122 | /* Sleep for a 1/10 of a second */ | ||
123 | msleep(100); | ||
124 | /* stop the tracing. */ | ||
125 | tr->ctrl = 0; | ||
126 | trace->ctrl_update(tr); | ||
127 | /* check the trace buffer */ | ||
128 | ret = trace_test_buffer(tr, &count); | ||
129 | trace->reset(tr); | ||
130 | |||
131 | if (!ret && !count) { | ||
132 | printk(KERN_CONT ".. no entries found .."); | ||
133 | ret = -1; | ||
134 | } | ||
135 | |||
136 | return ret; | ||
137 | } | ||
138 | #endif /* CONFIG_FTRACE */ | ||
139 | |||
140 | #ifdef CONFIG_IRQSOFF_TRACER | ||
141 | int | ||
142 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | ||
143 | { | ||
144 | unsigned long save_max = tracing_max_latency; | ||
145 | unsigned long count; | ||
146 | int ret; | ||
147 | |||
148 | /* start the tracing */ | ||
149 | tr->ctrl = 1; | ||
150 | trace->init(tr); | ||
151 | /* reset the max latency */ | ||
152 | tracing_max_latency = 0; | ||
153 | /* disable interrupts for a bit */ | ||
154 | local_irq_disable(); | ||
155 | udelay(100); | ||
156 | local_irq_enable(); | ||
157 | /* stop the tracing. */ | ||
158 | tr->ctrl = 0; | ||
159 | trace->ctrl_update(tr); | ||
160 | /* check both trace buffers */ | ||
161 | ret = trace_test_buffer(tr, NULL); | ||
162 | if (!ret) | ||
163 | ret = trace_test_buffer(&max_tr, &count); | ||
164 | trace->reset(tr); | ||
165 | |||
166 | if (!ret && !count) { | ||
167 | printk(KERN_CONT ".. no entries found .."); | ||
168 | ret = -1; | ||
169 | } | ||
170 | |||
171 | tracing_max_latency = save_max; | ||
172 | |||
173 | return ret; | ||
174 | } | ||
175 | #endif /* CONFIG_IRQSOFF_TRACER */ | ||
176 | |||
177 | #ifdef CONFIG_PREEMPT_TRACER | ||
178 | int | ||
179 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | ||
180 | { | ||
181 | unsigned long save_max = tracing_max_latency; | ||
182 | unsigned long count; | ||
183 | int ret; | ||
184 | |||
185 | /* start the tracing */ | ||
186 | tr->ctrl = 1; | ||
187 | trace->init(tr); | ||
188 | /* reset the max latency */ | ||
189 | tracing_max_latency = 0; | ||
190 | /* disable preemption for a bit */ | ||
191 | preempt_disable(); | ||
192 | udelay(100); | ||
193 | preempt_enable(); | ||
194 | /* stop the tracing. */ | ||
195 | tr->ctrl = 0; | ||
196 | trace->ctrl_update(tr); | ||
197 | /* check both trace buffers */ | ||
198 | ret = trace_test_buffer(tr, NULL); | ||
199 | if (!ret) | ||
200 | ret = trace_test_buffer(&max_tr, &count); | ||
201 | trace->reset(tr); | ||
202 | |||
203 | if (!ret && !count) { | ||
204 | printk(KERN_CONT ".. no entries found .."); | ||
205 | ret = -1; | ||
206 | } | ||
207 | |||
208 | tracing_max_latency = save_max; | ||
209 | |||
210 | return ret; | ||
211 | } | ||
212 | #endif /* CONFIG_PREEMPT_TRACER */ | ||
213 | |||
214 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | ||
215 | int | ||
216 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | ||
217 | { | ||
218 | unsigned long save_max = tracing_max_latency; | ||
219 | unsigned long count; | ||
220 | int ret; | ||
221 | |||
222 | /* start the tracing */ | ||
223 | tr->ctrl = 1; | ||
224 | trace->init(tr); | ||
225 | |||
226 | /* reset the max latency */ | ||
227 | tracing_max_latency = 0; | ||
228 | |||
229 | /* disable preemption and interrupts for a bit */ | ||
230 | preempt_disable(); | ||
231 | local_irq_disable(); | ||
232 | udelay(100); | ||
233 | preempt_enable(); | ||
234 | /* reverse the order of preempt vs irqs */ | ||
235 | local_irq_enable(); | ||
236 | |||
237 | /* stop the tracing. */ | ||
238 | tr->ctrl = 0; | ||
239 | trace->ctrl_update(tr); | ||
240 | /* check both trace buffers */ | ||
241 | ret = trace_test_buffer(tr, NULL); | ||
242 | if (ret) | ||
243 | goto out; | ||
244 | |||
245 | ret = trace_test_buffer(&max_tr, &count); | ||
246 | if (ret) | ||
247 | goto out; | ||
248 | |||
249 | if (!ret && !count) { | ||
250 | printk(KERN_CONT ".. no entries found .."); | ||
251 | ret = -1; | ||
252 | goto out; | ||
253 | } | ||
254 | |||
255 | /* do the test by disabling interrupts first this time */ | ||
256 | tracing_max_latency = 0; | ||
257 | tr->ctrl = 1; | ||
258 | trace->ctrl_update(tr); | ||
259 | preempt_disable(); | ||
260 | local_irq_disable(); | ||
261 | udelay(100); | ||
262 | preempt_enable(); | ||
263 | /* reverse the order of preempt vs irqs */ | ||
264 | local_irq_enable(); | ||
265 | |||
266 | /* stop the tracing. */ | ||
267 | tr->ctrl = 0; | ||
268 | trace->ctrl_update(tr); | ||
269 | /* check both trace buffers */ | ||
270 | ret = trace_test_buffer(tr, NULL); | ||
271 | if (ret) | ||
272 | goto out; | ||
273 | |||
274 | ret = trace_test_buffer(&max_tr, &count); | ||
275 | |||
276 | if (!ret && !count) { | ||
277 | printk(KERN_CONT ".. no entries found .."); | ||
278 | ret = -1; | ||
279 | goto out; | ||
280 | } | ||
281 | |||
282 | out: | ||
283 | trace->reset(tr); | ||
284 | tracing_max_latency = save_max; | ||
285 | |||
286 | return ret; | ||
287 | } | ||
288 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | ||
289 | |||
290 | #ifdef CONFIG_SCHED_TRACER | ||
291 | static int trace_wakeup_test_thread(void *data) | ||
292 | { | ||
293 | struct completion *x = data; | ||
294 | |||
295 | /* Make this a RT thread, doesn't need to be too high */ | ||
296 | |||
297 | rt_mutex_setprio(current, MAX_RT_PRIO - 5); | ||
298 | |||
299 | /* Make it know we have a new prio */ | ||
300 | complete(x); | ||
301 | |||
302 | /* now go to sleep and let the test wake us up */ | ||
303 | set_current_state(TASK_INTERRUPTIBLE); | ||
304 | schedule(); | ||
305 | |||
306 | /* we are awake, now wait to disappear */ | ||
307 | while (!kthread_should_stop()) { | ||
308 | /* | ||
309 | * This is an RT task, do short sleeps to let | ||
310 | * others run. | ||
311 | */ | ||
312 | msleep(100); | ||
313 | } | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | int | ||
319 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | ||
320 | { | ||
321 | unsigned long save_max = tracing_max_latency; | ||
322 | struct task_struct *p; | ||
323 | struct completion isrt; | ||
324 | unsigned long count; | ||
325 | int ret; | ||
326 | |||
327 | init_completion(&isrt); | ||
328 | |||
329 | /* create a high prio thread */ | ||
330 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | ||
331 | if (!IS_ERR(p)) { | ||
332 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | ||
333 | return -1; | ||
334 | } | ||
335 | |||
336 | /* make sure the thread is running at an RT prio */ | ||
337 | wait_for_completion(&isrt); | ||
338 | |||
339 | /* start the tracing */ | ||
340 | tr->ctrl = 1; | ||
341 | trace->init(tr); | ||
342 | /* reset the max latency */ | ||
343 | tracing_max_latency = 0; | ||
344 | |||
345 | /* sleep to let the RT thread sleep too */ | ||
346 | msleep(100); | ||
347 | |||
348 | /* | ||
349 | * Yes this is slightly racy. It is possible that for some | ||
350 | * strange reason that the RT thread we created, did not | ||
351 | * call schedule for 100ms after doing the completion, | ||
352 | * and we do a wakeup on a task that already is awake. | ||
353 | * But that is extremely unlikely, and the worst thing that | ||
354 | * happens in such a case, is that we disable tracing. | ||
355 | * Honestly, if this race does happen something is horrible | ||
356 | * wrong with the system. | ||
357 | */ | ||
358 | |||
359 | wake_up_process(p); | ||
360 | |||
361 | /* stop the tracing. */ | ||
362 | tr->ctrl = 0; | ||
363 | trace->ctrl_update(tr); | ||
364 | /* check both trace buffers */ | ||
365 | ret = trace_test_buffer(tr, NULL); | ||
366 | if (!ret) | ||
367 | ret = trace_test_buffer(&max_tr, &count); | ||
368 | |||
369 | |||
370 | trace->reset(tr); | ||
371 | |||
372 | tracing_max_latency = save_max; | ||
373 | |||
374 | /* kill the thread */ | ||
375 | kthread_stop(p); | ||
376 | |||
377 | if (!ret && !count) { | ||
378 | printk(KERN_CONT ".. no entries found .."); | ||
379 | ret = -1; | ||
380 | } | ||
381 | |||
382 | return ret; | ||
383 | } | ||
384 | #endif /* CONFIG_SCHED_TRACER */ | ||
385 | |||
386 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | ||
387 | int | ||
388 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | ||
389 | { | ||
390 | unsigned long count; | ||
391 | int ret; | ||
392 | |||
393 | /* start the tracing */ | ||
394 | tr->ctrl = 1; | ||
395 | trace->init(tr); | ||
396 | /* Sleep for a 1/10 of a second */ | ||
397 | msleep(100); | ||
398 | /* stop the tracing. */ | ||
399 | tr->ctrl = 0; | ||
400 | trace->ctrl_update(tr); | ||
401 | /* check the trace buffer */ | ||
402 | ret = trace_test_buffer(tr, &count); | ||
403 | trace->reset(tr); | ||
404 | |||
405 | if (!ret && !count) { | ||
406 | printk(KERN_CONT ".. no entries found .."); | ||
407 | ret = -1; | ||
408 | } | ||
409 | |||
410 | return ret; | ||
411 | } | ||
412 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | ||
413 | |||
414 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
415 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||