aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-05-12 15:20:44 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:40:36 -0400
commit60a11774b38fef1ab90b18c5353bd1c7c4d311c8 (patch)
treee2a6fd066b0dba6dcd776d07383e2932055cf66a /kernel
parente1c08bdd9fa73e44096e5a82c0d5928b04ab02c8 (diff)
ftrace: add self-tests
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig13
-rw-r--r--kernel/trace/trace.c63
-rw-r--r--kernel/trace/trace.h31
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_irqsoff.c9
-rw-r--r--kernel/trace/trace_sched_switch.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c3
-rw-r--r--kernel/trace/trace_selftest.c415
8 files changed, 538 insertions, 2 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index cad9db1dee02..3f73a1710242 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -105,3 +105,16 @@ config DYNAMIC_FTRACE
105 wakes up once a second and checks to see if any ftrace calls 105 wakes up once a second and checks to see if any ftrace calls
106 were made. If so, it runs stop_machine (stops all CPUS) 106 were made. If so, it runs stop_machine (stops all CPUS)
107 and modifies the code to jump over the call to ftrace. 107 and modifies the code to jump over the call to ftrace.
108
109config FTRACE_SELFTEST
110 bool
111
112config FTRACE_STARTUP_TEST
113 bool "Perform a startup test on ftrace"
114 depends on TRACING
115 select FTRACE_SELFTEST
116 help
117 This option performs a series of startup tests on ftrace. On bootup
118 a series of tests are made to verify that the tracer is
119 functioning properly. It will do tests on all the configured
120 tracers of ftrace.
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9bad2379115a..f6d026f17dbb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -32,6 +32,8 @@
32unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 32unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33unsigned long __read_mostly tracing_thresh; 33unsigned long __read_mostly tracing_thresh;
34 34
35static int tracing_disabled = 1;
36
35static long notrace 37static long notrace
36ns2usecs(cycle_t nsec) 38ns2usecs(cycle_t nsec)
37{ 39{
@@ -217,11 +219,48 @@ int register_tracer(struct tracer *type)
217 } 219 }
218 } 220 }
219 221
222#ifdef CONFIG_FTRACE_STARTUP_TEST
223 if (type->selftest) {
224 struct tracer *saved_tracer = current_trace;
225 struct trace_array_cpu *data;
226 struct trace_array *tr = &global_trace;
227 int saved_ctrl = tr->ctrl;
228 int i;
229 /*
230 * Run a selftest on this tracer.
231 * Here we reset the trace buffer, and set the current
232 * tracer to be this tracer. The tracer can then run some
233 * internal tracing to verify that everything is in order.
234 * If we fail, we do not register this tracer.
235 */
236 for_each_possible_cpu(i) {
237 if (!data->trace)
238 continue;
239 data = tr->data[i];
240 tracing_reset(data);
241 }
242 current_trace = type;
243 tr->ctrl = 0;
244 /* the test is responsible for initializing and enabling */
245 pr_info("Testing tracer %s: ", type->name);
246 ret = type->selftest(type, tr);
247 /* the test is responsible for resetting too */
248 current_trace = saved_tracer;
249 tr->ctrl = saved_ctrl;
250 if (ret) {
251 printk(KERN_CONT "FAILED!\n");
252 goto out;
253 }
254 printk(KERN_CONT "PASSED\n");
255 }
256#endif
257
220 type->next = trace_types; 258 type->next = trace_types;
221 trace_types = type; 259 trace_types = type;
222 len = strlen(type->name); 260 len = strlen(type->name);
223 if (len > max_tracer_type_len) 261 if (len > max_tracer_type_len)
224 max_tracer_type_len = len; 262 max_tracer_type_len = len;
263
225 out: 264 out:
226 mutex_unlock(&trace_types_lock); 265 mutex_unlock(&trace_types_lock);
227 266
@@ -985,6 +1024,11 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
985{ 1024{
986 struct trace_iterator *iter; 1025 struct trace_iterator *iter;
987 1026
1027 if (tracing_disabled) {
1028 *ret = -ENODEV;
1029 return NULL;
1030 }
1031
988 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 1032 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
989 if (!iter) { 1033 if (!iter) {
990 *ret = -ENOMEM; 1034 *ret = -ENOMEM;
@@ -1023,6 +1067,9 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1023 1067
1024int tracing_open_generic(struct inode *inode, struct file *filp) 1068int tracing_open_generic(struct inode *inode, struct file *filp)
1025{ 1069{
1070 if (tracing_disabled)
1071 return -ENODEV;
1072
1026 filp->private_data = inode->i_private; 1073 filp->private_data = inode->i_private;
1027 return 0; 1074 return 0;
1028} 1075}
@@ -1128,6 +1175,9 @@ static int show_traces_open(struct inode *inode, struct file *file)
1128{ 1175{
1129 int ret; 1176 int ret;
1130 1177
1178 if (tracing_disabled)
1179 return -ENODEV;
1180
1131 ret = seq_open(file, &show_traces_seq_ops); 1181 ret = seq_open(file, &show_traces_seq_ops);
1132 if (!ret) { 1182 if (!ret) {
1133 struct seq_file *m = file->private_data; 1183 struct seq_file *m = file->private_data;
@@ -1452,6 +1502,11 @@ struct dentry *tracing_init_dentry(void)
1452 return d_tracer; 1502 return d_tracer;
1453} 1503}
1454 1504
1505#ifdef CONFIG_FTRACE_SELFTEST
1506/* Let selftest have access to static functions in this file */
1507#include "trace_selftest.c"
1508#endif
1509
1455static __init void tracer_init_debugfs(void) 1510static __init void tracer_init_debugfs(void)
1456{ 1511{
1457 struct dentry *d_tracer; 1512 struct dentry *d_tracer;
@@ -1585,6 +1640,7 @@ __init static int tracer_alloc_buffers(void)
1585 void *array; 1640 void *array;
1586 struct page *page; 1641 struct page *page;
1587 int pages = 0; 1642 int pages = 0;
1643 int ret = -ENOMEM;
1588 int i; 1644 int i;
1589 1645
1590 /* Allocate the first page for all buffers */ 1646 /* Allocate the first page for all buffers */
@@ -1650,6 +1706,9 @@ __init static int tracer_alloc_buffers(void)
1650 register_tracer(&no_tracer); 1706 register_tracer(&no_tracer);
1651 current_trace = &no_tracer; 1707 current_trace = &no_tracer;
1652 1708
1709 /* All seems OK, enable tracing */
1710 tracing_disabled = 0;
1711
1653 return 0; 1712 return 0;
1654 1713
1655 free_buffers: 1714 free_buffers:
@@ -1678,7 +1737,7 @@ __init static int tracer_alloc_buffers(void)
1678 } 1737 }
1679#endif 1738#endif
1680 } 1739 }
1681 return -ENOMEM; 1740 return ret;
1682} 1741}
1683 1742
1684device_initcall(tracer_alloc_buffers); 1743fs_initcall(tracer_alloc_buffers);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 83e257e38084..88edbf1f6788 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -99,6 +99,10 @@ struct tracer {
99 void (*start)(struct trace_iterator *iter); 99 void (*start)(struct trace_iterator *iter);
100 void (*stop)(struct trace_iterator *iter); 100 void (*stop)(struct trace_iterator *iter);
101 void (*ctrl_update)(struct trace_array *tr); 101 void (*ctrl_update)(struct trace_array *tr);
102#ifdef CONFIG_FTRACE_STARTUP_TEST
103 int (*selftest)(struct tracer *trace,
104 struct trace_array *tr);
105#endif
102 struct tracer *next; 106 struct tracer *next;
103 int print_max; 107 int print_max;
104}; 108};
@@ -185,4 +189,31 @@ extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
185extern unsigned long ftrace_update_tot_cnt; 189extern unsigned long ftrace_update_tot_cnt;
186#endif 190#endif
187 191
192#ifdef CONFIG_FTRACE_STARTUP_TEST
193#ifdef CONFIG_FTRACE
194extern int trace_selftest_startup_function(struct tracer *trace,
195 struct trace_array *tr);
196#endif
197#ifdef CONFIG_IRQSOFF_TRACER
198extern int trace_selftest_startup_irqsoff(struct tracer *trace,
199 struct trace_array *tr);
200#endif
201#ifdef CONFIG_PREEMPT_TRACER
202extern int trace_selftest_startup_preemptoff(struct tracer *trace,
203 struct trace_array *tr);
204#endif
205#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
206extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
207 struct trace_array *tr);
208#endif
209#ifdef CONFIG_SCHED_TRACER
210extern int trace_selftest_startup_wakeup(struct tracer *trace,
211 struct trace_array *tr);
212#endif
213#ifdef CONFIG_CONTEXT_SWITCH_TRACER
214extern int trace_selftest_startup_sched_switch(struct tracer *trace,
215 struct trace_array *tr);
216#endif
217#endif /* CONFIG_FTRACE_STARTUP_TEST */
218
188#endif /* _LINUX_KERNEL_TRACE_H */ 219#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 82988c5336e0..5d8ad7a09605 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -63,6 +63,9 @@ static struct tracer function_trace __read_mostly =
63 .init = function_trace_init, 63 .init = function_trace_init,
64 .reset = function_trace_reset, 64 .reset = function_trace_reset,
65 .ctrl_update = function_trace_ctrl_update, 65 .ctrl_update = function_trace_ctrl_update,
66#ifdef CONFIG_FTRACE_SELFTEST
67 .selftest = trace_selftest_startup_function,
68#endif
66}; 69};
67 70
68static __init int init_function_trace(void) 71static __init int init_function_trace(void)
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 74165f611f36..14183b8f79c5 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -432,6 +432,9 @@ static struct tracer irqsoff_tracer __read_mostly =
432 .close = irqsoff_tracer_close, 432 .close = irqsoff_tracer_close,
433 .ctrl_update = irqsoff_tracer_ctrl_update, 433 .ctrl_update = irqsoff_tracer_ctrl_update,
434 .print_max = 1, 434 .print_max = 1,
435#ifdef CONFIG_FTRACE_SELFTEST
436 .selftest = trace_selftest_startup_irqsoff,
437#endif
435}; 438};
436# define register_irqsoff(trace) register_tracer(&trace) 439# define register_irqsoff(trace) register_tracer(&trace)
437#else 440#else
@@ -455,6 +458,9 @@ static struct tracer preemptoff_tracer __read_mostly =
455 .close = irqsoff_tracer_close, 458 .close = irqsoff_tracer_close,
456 .ctrl_update = irqsoff_tracer_ctrl_update, 459 .ctrl_update = irqsoff_tracer_ctrl_update,
457 .print_max = 1, 460 .print_max = 1,
461#ifdef CONFIG_FTRACE_SELFTEST
462 .selftest = trace_selftest_startup_preemptoff,
463#endif
458}; 464};
459# define register_preemptoff(trace) register_tracer(&trace) 465# define register_preemptoff(trace) register_tracer(&trace)
460#else 466#else
@@ -480,6 +486,9 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
480 .close = irqsoff_tracer_close, 486 .close = irqsoff_tracer_close,
481 .ctrl_update = irqsoff_tracer_ctrl_update, 487 .ctrl_update = irqsoff_tracer_ctrl_update,
482 .print_max = 1, 488 .print_max = 1,
489#ifdef CONFIG_FTRACE_SELFTEST
490 .selftest = trace_selftest_startup_preemptirqsoff,
491#endif
483}; 492};
484 493
485# define register_preemptirqsoff(trace) register_tracer(&trace) 494# define register_preemptirqsoff(trace) register_tracer(&trace)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 2715267be469..6c9284103a62 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -107,6 +107,9 @@ static struct tracer sched_switch_trace __read_mostly =
107 .init = sched_switch_trace_init, 107 .init = sched_switch_trace_init,
108 .reset = sched_switch_trace_reset, 108 .reset = sched_switch_trace_reset,
109 .ctrl_update = sched_switch_trace_ctrl_update, 109 .ctrl_update = sched_switch_trace_ctrl_update,
110#ifdef CONFIG_FTRACE_SELFTEST
111 .selftest = trace_selftest_startup_sched_switch,
112#endif
110}; 113};
111 114
112__init static int init_sched_switch_trace(void) 115__init static int init_sched_switch_trace(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 7c3ccefcf4c3..3d10ff01f805 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -295,6 +295,9 @@ static struct tracer wakeup_tracer __read_mostly =
295 .close = wakeup_tracer_close, 295 .close = wakeup_tracer_close,
296 .ctrl_update = wakeup_tracer_ctrl_update, 296 .ctrl_update = wakeup_tracer_ctrl_update,
297 .print_max = 1, 297 .print_max = 1,
298#ifdef CONFIG_FTRACE_SELFTEST
299 .selftest = trace_selftest_startup_wakeup,
300#endif
298}; 301};
299 302
300__init static int init_wakeup_tracer(void) 303__init static int init_wakeup_tracer(void)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
new file mode 100644
index 000000000000..ef4d3cc009f5
--- /dev/null
+++ b/kernel/trace/trace_selftest.c
@@ -0,0 +1,415 @@
1/* Include in trace.c */
2
3#include <linux/kthread.h>
4
5static inline int trace_valid_entry(struct trace_entry *entry)
6{
7 switch (entry->type) {
8 case TRACE_FN:
9 case TRACE_CTX:
10 return 1;
11 }
12 return 0;
13}
14
15static int
16trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
17{
18 struct page *page;
19 struct trace_entry *entries;
20 int idx = 0;
21 int i;
22
23 page = list_entry(data->trace_pages.next, struct page, lru);
24 entries = page_address(page);
25
26 if (data->trace != entries)
27 goto failed;
28
29 /*
30 * The starting trace buffer always has valid elements,
31 * if any element exits.
32 */
33 entries = data->trace;
34
35 for (i = 0; i < tr->entries; i++) {
36
37 if (i < data->trace_idx &&
38 !trace_valid_entry(&entries[idx])) {
39 printk(KERN_CONT ".. invalid entry %d ", entries[idx].type);
40 goto failed;
41 }
42
43 idx++;
44 if (idx >= ENTRIES_PER_PAGE) {
45 page = virt_to_page(entries);
46 if (page->lru.next == &data->trace_pages) {
47 if (i != tr->entries - 1) {
48 printk(KERN_CONT ".. entries buffer mismatch");
49 goto failed;
50 }
51 } else {
52 page = list_entry(page->lru.next, struct page, lru);
53 entries = page_address(page);
54 }
55 idx = 0;
56 }
57 }
58
59 page = virt_to_page(entries);
60 if (page->lru.next != &data->trace_pages) {
61 printk(KERN_CONT ".. too many entries");
62 goto failed;
63 }
64
65 return 0;
66
67 failed:
68 printk(KERN_CONT ".. corrupted trace buffer .. ");
69 return -1;
70}
71
72/*
73 * Test the trace buffer to see if all the elements
74 * are still sane.
75 */
76static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
77{
78 unsigned long cnt = 0;
79 int cpu;
80 int ret = 0;
81
82 for_each_possible_cpu(cpu) {
83 if (!tr->data[cpu]->trace)
84 continue;
85
86 cnt += tr->data[cpu]->trace_idx;
87 printk("%d: count = %ld\n", cpu, cnt);
88
89 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
90 if (ret)
91 break;
92 }
93
94 if (count)
95 *count = cnt;
96
97 return ret;
98}
99
100#ifdef CONFIG_FTRACE
101/*
102 * Simple verification test of ftrace function tracer.
103 * Enable ftrace, sleep 1/10 second, and then read the trace
104 * buffer to see if all is in order.
105 */
106int
107trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
108{
109 unsigned long count;
110 int ret;
111
112 /* make sure functions have been recorded */
113 ret = ftrace_force_update();
114 if (ret) {
115 printk(KERN_CONT ".. ftraced failed .. ");
116 return ret;
117 }
118
119 /* start the tracing */
120 tr->ctrl = 1;
121 trace->init(tr);
122 /* Sleep for a 1/10 of a second */
123 msleep(100);
124 /* stop the tracing. */
125 tr->ctrl = 0;
126 trace->ctrl_update(tr);
127 /* check the trace buffer */
128 ret = trace_test_buffer(tr, &count);
129 trace->reset(tr);
130
131 if (!ret && !count) {
132 printk(KERN_CONT ".. no entries found ..");
133 ret = -1;
134 }
135
136 return ret;
137}
138#endif /* CONFIG_FTRACE */
139
140#ifdef CONFIG_IRQSOFF_TRACER
141int
142trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
143{
144 unsigned long save_max = tracing_max_latency;
145 unsigned long count;
146 int ret;
147
148 /* start the tracing */
149 tr->ctrl = 1;
150 trace->init(tr);
151 /* reset the max latency */
152 tracing_max_latency = 0;
153 /* disable interrupts for a bit */
154 local_irq_disable();
155 udelay(100);
156 local_irq_enable();
157 /* stop the tracing. */
158 tr->ctrl = 0;
159 trace->ctrl_update(tr);
160 /* check both trace buffers */
161 ret = trace_test_buffer(tr, NULL);
162 if (!ret)
163 ret = trace_test_buffer(&max_tr, &count);
164 trace->reset(tr);
165
166 if (!ret && !count) {
167 printk(KERN_CONT ".. no entries found ..");
168 ret = -1;
169 }
170
171 tracing_max_latency = save_max;
172
173 return ret;
174}
175#endif /* CONFIG_IRQSOFF_TRACER */
176
177#ifdef CONFIG_PREEMPT_TRACER
178int
179trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
180{
181 unsigned long save_max = tracing_max_latency;
182 unsigned long count;
183 int ret;
184
185 /* start the tracing */
186 tr->ctrl = 1;
187 trace->init(tr);
188 /* reset the max latency */
189 tracing_max_latency = 0;
190 /* disable preemption for a bit */
191 preempt_disable();
192 udelay(100);
193 preempt_enable();
194 /* stop the tracing. */
195 tr->ctrl = 0;
196 trace->ctrl_update(tr);
197 /* check both trace buffers */
198 ret = trace_test_buffer(tr, NULL);
199 if (!ret)
200 ret = trace_test_buffer(&max_tr, &count);
201 trace->reset(tr);
202
203 if (!ret && !count) {
204 printk(KERN_CONT ".. no entries found ..");
205 ret = -1;
206 }
207
208 tracing_max_latency = save_max;
209
210 return ret;
211}
212#endif /* CONFIG_PREEMPT_TRACER */
213
214#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
215int
216trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
217{
218 unsigned long save_max = tracing_max_latency;
219 unsigned long count;
220 int ret;
221
222 /* start the tracing */
223 tr->ctrl = 1;
224 trace->init(tr);
225
226 /* reset the max latency */
227 tracing_max_latency = 0;
228
229 /* disable preemption and interrupts for a bit */
230 preempt_disable();
231 local_irq_disable();
232 udelay(100);
233 preempt_enable();
234 /* reverse the order of preempt vs irqs */
235 local_irq_enable();
236
237 /* stop the tracing. */
238 tr->ctrl = 0;
239 trace->ctrl_update(tr);
240 /* check both trace buffers */
241 ret = trace_test_buffer(tr, NULL);
242 if (ret)
243 goto out;
244
245 ret = trace_test_buffer(&max_tr, &count);
246 if (ret)
247 goto out;
248
249 if (!ret && !count) {
250 printk(KERN_CONT ".. no entries found ..");
251 ret = -1;
252 goto out;
253 }
254
255 /* do the test by disabling interrupts first this time */
256 tracing_max_latency = 0;
257 tr->ctrl = 1;
258 trace->ctrl_update(tr);
259 preempt_disable();
260 local_irq_disable();
261 udelay(100);
262 preempt_enable();
263 /* reverse the order of preempt vs irqs */
264 local_irq_enable();
265
266 /* stop the tracing. */
267 tr->ctrl = 0;
268 trace->ctrl_update(tr);
269 /* check both trace buffers */
270 ret = trace_test_buffer(tr, NULL);
271 if (ret)
272 goto out;
273
274 ret = trace_test_buffer(&max_tr, &count);
275
276 if (!ret && !count) {
277 printk(KERN_CONT ".. no entries found ..");
278 ret = -1;
279 goto out;
280 }
281
282 out:
283 trace->reset(tr);
284 tracing_max_latency = save_max;
285
286 return ret;
287}
288#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
289
290#ifdef CONFIG_SCHED_TRACER
291static int trace_wakeup_test_thread(void *data)
292{
293 struct completion *x = data;
294
295 /* Make this a RT thread, doesn't need to be too high */
296
297 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
298
299 /* Make it know we have a new prio */
300 complete(x);
301
302 /* now go to sleep and let the test wake us up */
303 set_current_state(TASK_INTERRUPTIBLE);
304 schedule();
305
306 /* we are awake, now wait to disappear */
307 while (!kthread_should_stop()) {
308 /*
309 * This is an RT task, do short sleeps to let
310 * others run.
311 */
312 msleep(100);
313 }
314
315 return 0;
316}
317
318int
319trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
320{
321 unsigned long save_max = tracing_max_latency;
322 struct task_struct *p;
323 struct completion isrt;
324 unsigned long count;
325 int ret;
326
327 init_completion(&isrt);
328
329 /* create a high prio thread */
330 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
331 if (!IS_ERR(p)) {
332 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
333 return -1;
334 }
335
336 /* make sure the thread is running at an RT prio */
337 wait_for_completion(&isrt);
338
339 /* start the tracing */
340 tr->ctrl = 1;
341 trace->init(tr);
342 /* reset the max latency */
343 tracing_max_latency = 0;
344
345 /* sleep to let the RT thread sleep too */
346 msleep(100);
347
348 /*
349 * Yes this is slightly racy. It is possible that for some
350 * strange reason that the RT thread we created, did not
351 * call schedule for 100ms after doing the completion,
352 * and we do a wakeup on a task that already is awake.
353 * But that is extremely unlikely, and the worst thing that
354 * happens in such a case, is that we disable tracing.
355 * Honestly, if this race does happen something is horrible
356 * wrong with the system.
357 */
358
359 wake_up_process(p);
360
361 /* stop the tracing. */
362 tr->ctrl = 0;
363 trace->ctrl_update(tr);
364 /* check both trace buffers */
365 ret = trace_test_buffer(tr, NULL);
366 if (!ret)
367 ret = trace_test_buffer(&max_tr, &count);
368
369
370 trace->reset(tr);
371
372 tracing_max_latency = save_max;
373
374 /* kill the thread */
375 kthread_stop(p);
376
377 if (!ret && !count) {
378 printk(KERN_CONT ".. no entries found ..");
379 ret = -1;
380 }
381
382 return ret;
383}
384#endif /* CONFIG_SCHED_TRACER */
385
386#ifdef CONFIG_CONTEXT_SWITCH_TRACER
387int
388trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
389{
390 unsigned long count;
391 int ret;
392
393 /* start the tracing */
394 tr->ctrl = 1;
395 trace->init(tr);
396 /* Sleep for a 1/10 of a second */
397 msleep(100);
398 /* stop the tracing. */
399 tr->ctrl = 0;
400 trace->ctrl_update(tr);
401 /* check the trace buffer */
402 ret = trace_test_buffer(tr, &count);
403 trace->reset(tr);
404
405 if (!ret && !count) {
406 printk(KERN_CONT ".. no entries found ..");
407 ret = -1;
408 }
409
410 return ret;
411}
412#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
413
414#ifdef CONFIG_DYNAMIC_FTRACE
415#endif /* CONFIG_DYNAMIC_FTRACE */