aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_selftest.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r--kernel/trace/trace_selftest.c274
1 files changed, 164 insertions, 110 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 0911b7e073bf..88c8eb70f54a 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -9,65 +9,30 @@ static inline int trace_valid_entry(struct trace_entry *entry)
9 case TRACE_FN: 9 case TRACE_FN:
10 case TRACE_CTX: 10 case TRACE_CTX:
11 case TRACE_WAKE: 11 case TRACE_WAKE:
12 case TRACE_CONT:
12 case TRACE_STACK: 13 case TRACE_STACK:
14 case TRACE_PRINT:
13 case TRACE_SPECIAL: 15 case TRACE_SPECIAL:
16 case TRACE_BRANCH:
14 return 1; 17 return 1;
15 } 18 }
16 return 0; 19 return 0;
17} 20}
18 21
19static int 22static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
20trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
21{ 23{
22 struct trace_entry *entries; 24 struct ring_buffer_event *event;
23 struct page *page; 25 struct trace_entry *entry;
24 int idx = 0;
25 int i;
26 26
27 BUG_ON(list_empty(&data->trace_pages)); 27 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 page = list_entry(data->trace_pages.next, struct page, lru); 28 entry = ring_buffer_event_data(event);
29 entries = page_address(page);
30 29
31 check_pages(data); 30 if (!trace_valid_entry(entry)) {
32 if (head_page(data) != entries)
33 goto failed;
34
35 /*
36 * The starting trace buffer always has valid elements,
37 * if any element exists.
38 */
39 entries = head_page(data);
40
41 for (i = 0; i < tr->entries; i++) {
42
43 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
44 printk(KERN_CONT ".. invalid entry %d ", 31 printk(KERN_CONT ".. invalid entry %d ",
45 entries[idx].type); 32 entry->type);
46 goto failed; 33 goto failed;
47 } 34 }
48
49 idx++;
50 if (idx >= ENTRIES_PER_PAGE) {
51 page = virt_to_page(entries);
52 if (page->lru.next == &data->trace_pages) {
53 if (i != tr->entries - 1) {
54 printk(KERN_CONT ".. entries buffer mismatch");
55 goto failed;
56 }
57 } else {
58 page = list_entry(page->lru.next, struct page, lru);
59 entries = page_address(page);
60 }
61 idx = 0;
62 }
63 } 35 }
64
65 page = virt_to_page(entries);
66 if (page->lru.next != &data->trace_pages) {
67 printk(KERN_CONT ".. too many entries");
68 goto failed;
69 }
70
71 return 0; 36 return 0;
72 37
73 failed: 38 failed:
@@ -87,20 +52,18 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
87 int cpu, ret = 0; 52 int cpu, ret = 0;
88 53
89 /* Don't allow flipping of max traces now */ 54 /* Don't allow flipping of max traces now */
90 raw_local_irq_save(flags); 55 local_irq_save(flags);
91 __raw_spin_lock(&ftrace_max_lock); 56 __raw_spin_lock(&ftrace_max_lock);
92 for_each_possible_cpu(cpu) {
93 if (!head_page(tr->data[cpu]))
94 continue;
95 57
96 cnt += tr->data[cpu]->trace_idx; 58 cnt = ring_buffer_entries(tr->buffer);
97 59
98 ret = trace_test_buffer_cpu(tr, tr->data[cpu]); 60 for_each_possible_cpu(cpu) {
61 ret = trace_test_buffer_cpu(tr, cpu);
99 if (ret) 62 if (ret)
100 break; 63 break;
101 } 64 }
102 __raw_spin_unlock(&ftrace_max_lock); 65 __raw_spin_unlock(&ftrace_max_lock);
103 raw_local_irq_restore(flags); 66 local_irq_restore(flags);
104 67
105 if (count) 68 if (count)
106 *count = cnt; 69 *count = cnt;
@@ -108,7 +71,12 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
108 return ret; 71 return ret;
109} 72}
110 73
111#ifdef CONFIG_FTRACE 74static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
75{
76 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
77 trace->name, init_ret);
78}
79#ifdef CONFIG_FUNCTION_TRACER
112 80
113#ifdef CONFIG_DYNAMIC_FTRACE 81#ifdef CONFIG_DYNAMIC_FTRACE
114 82
@@ -120,11 +88,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
120 struct trace_array *tr, 88 struct trace_array *tr,
121 int (*func)(void)) 89 int (*func)(void))
122{ 90{
123 unsigned long count;
124 int ret;
125 int save_ftrace_enabled = ftrace_enabled; 91 int save_ftrace_enabled = ftrace_enabled;
126 int save_tracer_enabled = tracer_enabled; 92 int save_tracer_enabled = tracer_enabled;
93 unsigned long count;
127 char *func_name; 94 char *func_name;
95 int ret;
128 96
129 /* The ftrace test PASSED */ 97 /* The ftrace test PASSED */
130 printk(KERN_CONT "PASSED\n"); 98 printk(KERN_CONT "PASSED\n");
@@ -137,13 +105,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
137 /* passed in by parameter to fool gcc from optimizing */ 105 /* passed in by parameter to fool gcc from optimizing */
138 func(); 106 func();
139 107
140 /* update the records */
141 ret = ftrace_force_update();
142 if (ret) {
143 printk(KERN_CONT ".. ftraced failed .. ");
144 return ret;
145 }
146
147 /* 108 /*
148 * Some archs *cough*PowerPC*cough* add charachters to the 109 * Some archs *cough*PowerPC*cough* add charachters to the
149 * start of the function names. We simply put a '*' to 110 * start of the function names. We simply put a '*' to
@@ -155,8 +116,12 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
155 ftrace_set_filter(func_name, strlen(func_name), 1); 116 ftrace_set_filter(func_name, strlen(func_name), 1);
156 117
157 /* enable tracing */ 118 /* enable tracing */
158 tr->ctrl = 1; 119 ret = trace->init(tr);
159 trace->init(tr); 120 if (ret) {
121 warn_failed_init_tracer(trace, ret);
122 goto out;
123 }
124
160 /* Sleep for a 1/10 of a second */ 125 /* Sleep for a 1/10 of a second */
161 msleep(100); 126 msleep(100);
162 127
@@ -178,13 +143,13 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
178 msleep(100); 143 msleep(100);
179 144
180 /* stop the tracing. */ 145 /* stop the tracing. */
181 tr->ctrl = 0; 146 tracing_stop();
182 trace->ctrl_update(tr);
183 ftrace_enabled = 0; 147 ftrace_enabled = 0;
184 148
185 /* check the trace buffer */ 149 /* check the trace buffer */
186 ret = trace_test_buffer(tr, &count); 150 ret = trace_test_buffer(tr, &count);
187 trace->reset(tr); 151 trace->reset(tr);
152 tracing_start();
188 153
189 /* we should only have one item */ 154 /* we should only have one item */
190 if (!ret && count != 1) { 155 if (!ret && count != 1) {
@@ -192,6 +157,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
192 ret = -1; 157 ret = -1;
193 goto out; 158 goto out;
194 } 159 }
160
195 out: 161 out:
196 ftrace_enabled = save_ftrace_enabled; 162 ftrace_enabled = save_ftrace_enabled;
197 tracer_enabled = save_tracer_enabled; 163 tracer_enabled = save_tracer_enabled;
@@ -212,37 +178,34 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
212int 178int
213trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 179trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
214{ 180{
215 unsigned long count;
216 int ret;
217 int save_ftrace_enabled = ftrace_enabled; 181 int save_ftrace_enabled = ftrace_enabled;
218 int save_tracer_enabled = tracer_enabled; 182 int save_tracer_enabled = tracer_enabled;
183 unsigned long count;
184 int ret;
219 185
220 /* make sure msleep has been recorded */ 186 /* make sure msleep has been recorded */
221 msleep(1); 187 msleep(1);
222 188
223 /* force the recorded functions to be traced */
224 ret = ftrace_force_update();
225 if (ret) {
226 printk(KERN_CONT ".. ftraced failed .. ");
227 return ret;
228 }
229
230 /* start the tracing */ 189 /* start the tracing */
231 ftrace_enabled = 1; 190 ftrace_enabled = 1;
232 tracer_enabled = 1; 191 tracer_enabled = 1;
233 192
234 tr->ctrl = 1; 193 ret = trace->init(tr);
235 trace->init(tr); 194 if (ret) {
195 warn_failed_init_tracer(trace, ret);
196 goto out;
197 }
198
236 /* Sleep for a 1/10 of a second */ 199 /* Sleep for a 1/10 of a second */
237 msleep(100); 200 msleep(100);
238 /* stop the tracing. */ 201 /* stop the tracing. */
239 tr->ctrl = 0; 202 tracing_stop();
240 trace->ctrl_update(tr);
241 ftrace_enabled = 0; 203 ftrace_enabled = 0;
242 204
243 /* check the trace buffer */ 205 /* check the trace buffer */
244 ret = trace_test_buffer(tr, &count); 206 ret = trace_test_buffer(tr, &count);
245 trace->reset(tr); 207 trace->reset(tr);
208 tracing_start();
246 209
247 if (!ret && !count) { 210 if (!ret && !count) {
248 printk(KERN_CONT ".. no entries found .."); 211 printk(KERN_CONT ".. no entries found ..");
@@ -263,7 +226,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
263 226
264 return ret; 227 return ret;
265} 228}
266#endif /* CONFIG_FTRACE */ 229#endif /* CONFIG_FUNCTION_TRACER */
267 230
268#ifdef CONFIG_IRQSOFF_TRACER 231#ifdef CONFIG_IRQSOFF_TRACER
269int 232int
@@ -274,8 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
274 int ret; 237 int ret;
275 238
276 /* start the tracing */ 239 /* start the tracing */
277 tr->ctrl = 1; 240 ret = trace->init(tr);
278 trace->init(tr); 241 if (ret) {
242 warn_failed_init_tracer(trace, ret);
243 return ret;
244 }
245
279 /* reset the max latency */ 246 /* reset the max latency */
280 tracing_max_latency = 0; 247 tracing_max_latency = 0;
281 /* disable interrupts for a bit */ 248 /* disable interrupts for a bit */
@@ -283,13 +250,13 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
283 udelay(100); 250 udelay(100);
284 local_irq_enable(); 251 local_irq_enable();
285 /* stop the tracing. */ 252 /* stop the tracing. */
286 tr->ctrl = 0; 253 tracing_stop();
287 trace->ctrl_update(tr);
288 /* check both trace buffers */ 254 /* check both trace buffers */
289 ret = trace_test_buffer(tr, NULL); 255 ret = trace_test_buffer(tr, NULL);
290 if (!ret) 256 if (!ret)
291 ret = trace_test_buffer(&max_tr, &count); 257 ret = trace_test_buffer(&max_tr, &count);
292 trace->reset(tr); 258 trace->reset(tr);
259 tracing_start();
293 260
294 if (!ret && !count) { 261 if (!ret && !count) {
295 printk(KERN_CONT ".. no entries found .."); 262 printk(KERN_CONT ".. no entries found ..");
@@ -310,9 +277,26 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
310 unsigned long count; 277 unsigned long count;
311 int ret; 278 int ret;
312 279
280 /*
281 * Now that the big kernel lock is no longer preemptable,
282 * and this is called with the BKL held, it will always
283 * fail. If preemption is already disabled, simply
284 * pass the test. When the BKL is removed, or becomes
285 * preemptible again, we will once again test this,
286 * so keep it in.
287 */
288 if (preempt_count()) {
289 printk(KERN_CONT "can not test ... force ");
290 return 0;
291 }
292
313 /* start the tracing */ 293 /* start the tracing */
314 tr->ctrl = 1; 294 ret = trace->init(tr);
315 trace->init(tr); 295 if (ret) {
296 warn_failed_init_tracer(trace, ret);
297 return ret;
298 }
299
316 /* reset the max latency */ 300 /* reset the max latency */
317 tracing_max_latency = 0; 301 tracing_max_latency = 0;
318 /* disable preemption for a bit */ 302 /* disable preemption for a bit */
@@ -320,13 +304,13 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
320 udelay(100); 304 udelay(100);
321 preempt_enable(); 305 preempt_enable();
322 /* stop the tracing. */ 306 /* stop the tracing. */
323 tr->ctrl = 0; 307 tracing_stop();
324 trace->ctrl_update(tr);
325 /* check both trace buffers */ 308 /* check both trace buffers */
326 ret = trace_test_buffer(tr, NULL); 309 ret = trace_test_buffer(tr, NULL);
327 if (!ret) 310 if (!ret)
328 ret = trace_test_buffer(&max_tr, &count); 311 ret = trace_test_buffer(&max_tr, &count);
329 trace->reset(tr); 312 trace->reset(tr);
313 tracing_start();
330 314
331 if (!ret && !count) { 315 if (!ret && !count) {
332 printk(KERN_CONT ".. no entries found .."); 316 printk(KERN_CONT ".. no entries found ..");
@@ -347,9 +331,25 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
347 unsigned long count; 331 unsigned long count;
348 int ret; 332 int ret;
349 333
334 /*
335 * Now that the big kernel lock is no longer preemptable,
336 * and this is called with the BKL held, it will always
337 * fail. If preemption is already disabled, simply
338 * pass the test. When the BKL is removed, or becomes
339 * preemptible again, we will once again test this,
340 * so keep it in.
341 */
342 if (preempt_count()) {
343 printk(KERN_CONT "can not test ... force ");
344 return 0;
345 }
346
350 /* start the tracing */ 347 /* start the tracing */
351 tr->ctrl = 1; 348 ret = trace->init(tr);
352 trace->init(tr); 349 if (ret) {
350 warn_failed_init_tracer(trace, ret);
351 goto out;
352 }
353 353
354 /* reset the max latency */ 354 /* reset the max latency */
355 tracing_max_latency = 0; 355 tracing_max_latency = 0;
@@ -363,27 +363,30 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
363 local_irq_enable(); 363 local_irq_enable();
364 364
365 /* stop the tracing. */ 365 /* stop the tracing. */
366 tr->ctrl = 0; 366 tracing_stop();
367 trace->ctrl_update(tr);
368 /* check both trace buffers */ 367 /* check both trace buffers */
369 ret = trace_test_buffer(tr, NULL); 368 ret = trace_test_buffer(tr, NULL);
370 if (ret) 369 if (ret) {
370 tracing_start();
371 goto out; 371 goto out;
372 }
372 373
373 ret = trace_test_buffer(&max_tr, &count); 374 ret = trace_test_buffer(&max_tr, &count);
374 if (ret) 375 if (ret) {
376 tracing_start();
375 goto out; 377 goto out;
378 }
376 379
377 if (!ret && !count) { 380 if (!ret && !count) {
378 printk(KERN_CONT ".. no entries found .."); 381 printk(KERN_CONT ".. no entries found ..");
379 ret = -1; 382 ret = -1;
383 tracing_start();
380 goto out; 384 goto out;
381 } 385 }
382 386
383 /* do the test by disabling interrupts first this time */ 387 /* do the test by disabling interrupts first this time */
384 tracing_max_latency = 0; 388 tracing_max_latency = 0;
385 tr->ctrl = 1; 389 tracing_start();
386 trace->ctrl_update(tr);
387 preempt_disable(); 390 preempt_disable();
388 local_irq_disable(); 391 local_irq_disable();
389 udelay(100); 392 udelay(100);
@@ -392,8 +395,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
392 local_irq_enable(); 395 local_irq_enable();
393 396
394 /* stop the tracing. */ 397 /* stop the tracing. */
395 tr->ctrl = 0; 398 tracing_stop();
396 trace->ctrl_update(tr);
397 /* check both trace buffers */ 399 /* check both trace buffers */
398 ret = trace_test_buffer(tr, NULL); 400 ret = trace_test_buffer(tr, NULL);
399 if (ret) 401 if (ret)
@@ -409,12 +411,22 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
409 411
410 out: 412 out:
411 trace->reset(tr); 413 trace->reset(tr);
414 tracing_start();
412 tracing_max_latency = save_max; 415 tracing_max_latency = save_max;
413 416
414 return ret; 417 return ret;
415} 418}
416#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 419#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
417 420
421#ifdef CONFIG_NOP_TRACER
422int
423trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
424{
425 /* What could possibly go wrong? */
426 return 0;
427}
428#endif
429
418#ifdef CONFIG_SCHED_TRACER 430#ifdef CONFIG_SCHED_TRACER
419static int trace_wakeup_test_thread(void *data) 431static int trace_wakeup_test_thread(void *data)
420{ 432{
@@ -465,8 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
465 wait_for_completion(&isrt); 477 wait_for_completion(&isrt);
466 478
467 /* start the tracing */ 479 /* start the tracing */
468 tr->ctrl = 1; 480 ret = trace->init(tr);
469 trace->init(tr); 481 if (ret) {
482 warn_failed_init_tracer(trace, ret);
483 return ret;
484 }
485
470 /* reset the max latency */ 486 /* reset the max latency */
471 tracing_max_latency = 0; 487 tracing_max_latency = 0;
472 488
@@ -486,9 +502,11 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
486 502
487 wake_up_process(p); 503 wake_up_process(p);
488 504
505 /* give a little time to let the thread wake up */
506 msleep(100);
507
489 /* stop the tracing. */ 508 /* stop the tracing. */
490 tr->ctrl = 0; 509 tracing_stop();
491 trace->ctrl_update(tr);
492 /* check both trace buffers */ 510 /* check both trace buffers */
493 ret = trace_test_buffer(tr, NULL); 511 ret = trace_test_buffer(tr, NULL);
494 if (!ret) 512 if (!ret)
@@ -496,6 +514,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
496 514
497 515
498 trace->reset(tr); 516 trace->reset(tr);
517 tracing_start();
499 518
500 tracing_max_latency = save_max; 519 tracing_max_latency = save_max;
501 520
@@ -519,16 +538,20 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
519 int ret; 538 int ret;
520 539
521 /* start the tracing */ 540 /* start the tracing */
522 tr->ctrl = 1; 541 ret = trace->init(tr);
523 trace->init(tr); 542 if (ret) {
543 warn_failed_init_tracer(trace, ret);
544 return ret;
545 }
546
524 /* Sleep for a 1/10 of a second */ 547 /* Sleep for a 1/10 of a second */
525 msleep(100); 548 msleep(100);
526 /* stop the tracing. */ 549 /* stop the tracing. */
527 tr->ctrl = 0; 550 tracing_stop();
528 trace->ctrl_update(tr);
529 /* check the trace buffer */ 551 /* check the trace buffer */
530 ret = trace_test_buffer(tr, &count); 552 ret = trace_test_buffer(tr, &count);
531 trace->reset(tr); 553 trace->reset(tr);
554 tracing_start();
532 555
533 if (!ret && !count) { 556 if (!ret && !count) {
534 printk(KERN_CONT ".. no entries found .."); 557 printk(KERN_CONT ".. no entries found ..");
@@ -547,17 +570,48 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
547 int ret; 570 int ret;
548 571
549 /* start the tracing */ 572 /* start the tracing */
550 tr->ctrl = 1; 573 ret = trace->init(tr);
551 trace->init(tr); 574 if (ret) {
575 warn_failed_init_tracer(trace, ret);
576 return 0;
577 }
578
552 /* Sleep for a 1/10 of a second */ 579 /* Sleep for a 1/10 of a second */
553 msleep(100); 580 msleep(100);
554 /* stop the tracing. */ 581 /* stop the tracing. */
555 tr->ctrl = 0; 582 tracing_stop();
556 trace->ctrl_update(tr);
557 /* check the trace buffer */ 583 /* check the trace buffer */
558 ret = trace_test_buffer(tr, &count); 584 ret = trace_test_buffer(tr, &count);
559 trace->reset(tr); 585 trace->reset(tr);
586 tracing_start();
560 587
561 return ret; 588 return ret;
562} 589}
563#endif /* CONFIG_SYSPROF_TRACER */ 590#endif /* CONFIG_SYSPROF_TRACER */
591
592#ifdef CONFIG_BRANCH_TRACER
593int
594trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
595{
596 unsigned long count;
597 int ret;
598
599 /* start the tracing */
600 ret = trace->init(tr);
601 if (ret) {
602 warn_failed_init_tracer(trace, ret);
603 return ret;
604 }
605
606 /* Sleep for a 1/10 of a second */
607 msleep(100);
608 /* stop the tracing. */
609 tracing_stop();
610 /* check the trace buffer */
611 ret = trace_test_buffer(tr, &count);
612 trace->reset(tr);
613 tracing_start();
614
615 return ret;
616}
617#endif /* CONFIG_BRANCH_TRACER */