diff options
Diffstat (limited to 'kernel/trace/trace_selftest.c')
| -rw-r--r-- | kernel/trace/trace_selftest.c | 173 |
1 files changed, 133 insertions, 40 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 90bc752a7580..88c8eb70f54a 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -13,6 +13,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
| 13 | case TRACE_STACK: | 13 | case TRACE_STACK: |
| 14 | case TRACE_PRINT: | 14 | case TRACE_PRINT: |
| 15 | case TRACE_SPECIAL: | 15 | case TRACE_SPECIAL: |
| 16 | case TRACE_BRANCH: | ||
| 16 | return 1; | 17 | return 1; |
| 17 | } | 18 | } |
| 18 | return 0; | 19 | return 0; |
| @@ -51,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
| 51 | int cpu, ret = 0; | 52 | int cpu, ret = 0; |
| 52 | 53 | ||
| 53 | /* Don't allow flipping of max traces now */ | 54 | /* Don't allow flipping of max traces now */ |
| 54 | raw_local_irq_save(flags); | 55 | local_irq_save(flags); |
| 55 | __raw_spin_lock(&ftrace_max_lock); | 56 | __raw_spin_lock(&ftrace_max_lock); |
| 56 | 57 | ||
| 57 | cnt = ring_buffer_entries(tr->buffer); | 58 | cnt = ring_buffer_entries(tr->buffer); |
| @@ -62,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
| 62 | break; | 63 | break; |
| 63 | } | 64 | } |
| 64 | __raw_spin_unlock(&ftrace_max_lock); | 65 | __raw_spin_unlock(&ftrace_max_lock); |
| 65 | raw_local_irq_restore(flags); | 66 | local_irq_restore(flags); |
| 66 | 67 | ||
| 67 | if (count) | 68 | if (count) |
| 68 | *count = cnt; | 69 | *count = cnt; |
| @@ -70,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
| 70 | return ret; | 71 | return ret; |
| 71 | } | 72 | } |
| 72 | 73 | ||
| 74 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | ||
| 75 | { | ||
| 76 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | ||
| 77 | trace->name, init_ret); | ||
| 78 | } | ||
| 73 | #ifdef CONFIG_FUNCTION_TRACER | 79 | #ifdef CONFIG_FUNCTION_TRACER |
| 74 | 80 | ||
| 75 | #ifdef CONFIG_DYNAMIC_FTRACE | 81 | #ifdef CONFIG_DYNAMIC_FTRACE |
| @@ -110,8 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
| 110 | ftrace_set_filter(func_name, strlen(func_name), 1); | 116 | ftrace_set_filter(func_name, strlen(func_name), 1); |
| 111 | 117 | ||
| 112 | /* enable tracing */ | 118 | /* enable tracing */ |
| 113 | tr->ctrl = 1; | 119 | ret = trace->init(tr); |
| 114 | trace->init(tr); | 120 | if (ret) { |
| 121 | warn_failed_init_tracer(trace, ret); | ||
| 122 | goto out; | ||
| 123 | } | ||
| 115 | 124 | ||
| 116 | /* Sleep for a 1/10 of a second */ | 125 | /* Sleep for a 1/10 of a second */ |
| 117 | msleep(100); | 126 | msleep(100); |
| @@ -134,13 +143,13 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
| 134 | msleep(100); | 143 | msleep(100); |
| 135 | 144 | ||
| 136 | /* stop the tracing. */ | 145 | /* stop the tracing. */ |
| 137 | tr->ctrl = 0; | 146 | tracing_stop(); |
| 138 | trace->ctrl_update(tr); | ||
| 139 | ftrace_enabled = 0; | 147 | ftrace_enabled = 0; |
| 140 | 148 | ||
| 141 | /* check the trace buffer */ | 149 | /* check the trace buffer */ |
| 142 | ret = trace_test_buffer(tr, &count); | 150 | ret = trace_test_buffer(tr, &count); |
| 143 | trace->reset(tr); | 151 | trace->reset(tr); |
| 152 | tracing_start(); | ||
| 144 | 153 | ||
| 145 | /* we should only have one item */ | 154 | /* we should only have one item */ |
| 146 | if (!ret && count != 1) { | 155 | if (!ret && count != 1) { |
| @@ -148,6 +157,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
| 148 | ret = -1; | 157 | ret = -1; |
| 149 | goto out; | 158 | goto out; |
| 150 | } | 159 | } |
| 160 | |||
| 151 | out: | 161 | out: |
| 152 | ftrace_enabled = save_ftrace_enabled; | 162 | ftrace_enabled = save_ftrace_enabled; |
| 153 | tracer_enabled = save_tracer_enabled; | 163 | tracer_enabled = save_tracer_enabled; |
| @@ -180,18 +190,22 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
| 180 | ftrace_enabled = 1; | 190 | ftrace_enabled = 1; |
| 181 | tracer_enabled = 1; | 191 | tracer_enabled = 1; |
| 182 | 192 | ||
| 183 | tr->ctrl = 1; | 193 | ret = trace->init(tr); |
| 184 | trace->init(tr); | 194 | if (ret) { |
| 195 | warn_failed_init_tracer(trace, ret); | ||
| 196 | goto out; | ||
| 197 | } | ||
| 198 | |||
| 185 | /* Sleep for a 1/10 of a second */ | 199 | /* Sleep for a 1/10 of a second */ |
| 186 | msleep(100); | 200 | msleep(100); |
| 187 | /* stop the tracing. */ | 201 | /* stop the tracing. */ |
| 188 | tr->ctrl = 0; | 202 | tracing_stop(); |
| 189 | trace->ctrl_update(tr); | ||
| 190 | ftrace_enabled = 0; | 203 | ftrace_enabled = 0; |
| 191 | 204 | ||
| 192 | /* check the trace buffer */ | 205 | /* check the trace buffer */ |
| 193 | ret = trace_test_buffer(tr, &count); | 206 | ret = trace_test_buffer(tr, &count); |
| 194 | trace->reset(tr); | 207 | trace->reset(tr); |
| 208 | tracing_start(); | ||
| 195 | 209 | ||
| 196 | if (!ret && !count) { | 210 | if (!ret && !count) { |
| 197 | printk(KERN_CONT ".. no entries found .."); | 211 | printk(KERN_CONT ".. no entries found .."); |
| @@ -223,8 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
| 223 | int ret; | 237 | int ret; |
| 224 | 238 | ||
| 225 | /* start the tracing */ | 239 | /* start the tracing */ |
| 226 | tr->ctrl = 1; | 240 | ret = trace->init(tr); |
| 227 | trace->init(tr); | 241 | if (ret) { |
| 242 | warn_failed_init_tracer(trace, ret); | ||
| 243 | return ret; | ||
| 244 | } | ||
| 245 | |||
| 228 | /* reset the max latency */ | 246 | /* reset the max latency */ |
| 229 | tracing_max_latency = 0; | 247 | tracing_max_latency = 0; |
| 230 | /* disable interrupts for a bit */ | 248 | /* disable interrupts for a bit */ |
| @@ -232,13 +250,13 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
| 232 | udelay(100); | 250 | udelay(100); |
| 233 | local_irq_enable(); | 251 | local_irq_enable(); |
| 234 | /* stop the tracing. */ | 252 | /* stop the tracing. */ |
| 235 | tr->ctrl = 0; | 253 | tracing_stop(); |
| 236 | trace->ctrl_update(tr); | ||
| 237 | /* check both trace buffers */ | 254 | /* check both trace buffers */ |
| 238 | ret = trace_test_buffer(tr, NULL); | 255 | ret = trace_test_buffer(tr, NULL); |
| 239 | if (!ret) | 256 | if (!ret) |
| 240 | ret = trace_test_buffer(&max_tr, &count); | 257 | ret = trace_test_buffer(&max_tr, &count); |
| 241 | trace->reset(tr); | 258 | trace->reset(tr); |
| 259 | tracing_start(); | ||
| 242 | 260 | ||
| 243 | if (!ret && !count) { | 261 | if (!ret && !count) { |
| 244 | printk(KERN_CONT ".. no entries found .."); | 262 | printk(KERN_CONT ".. no entries found .."); |
| @@ -259,9 +277,26 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
| 259 | unsigned long count; | 277 | unsigned long count; |
| 260 | int ret; | 278 | int ret; |
| 261 | 279 | ||
| 280 | /* | ||
| 281 | * Now that the big kernel lock is no longer preemptable, | ||
| 282 | * and this is called with the BKL held, it will always | ||
| 283 | * fail. If preemption is already disabled, simply | ||
| 284 | * pass the test. When the BKL is removed, or becomes | ||
| 285 | * preemptible again, we will once again test this, | ||
| 286 | * so keep it in. | ||
| 287 | */ | ||
| 288 | if (preempt_count()) { | ||
| 289 | printk(KERN_CONT "can not test ... force "); | ||
| 290 | return 0; | ||
| 291 | } | ||
| 292 | |||
| 262 | /* start the tracing */ | 293 | /* start the tracing */ |
| 263 | tr->ctrl = 1; | 294 | ret = trace->init(tr); |
| 264 | trace->init(tr); | 295 | if (ret) { |
| 296 | warn_failed_init_tracer(trace, ret); | ||
| 297 | return ret; | ||
| 298 | } | ||
| 299 | |||
| 265 | /* reset the max latency */ | 300 | /* reset the max latency */ |
| 266 | tracing_max_latency = 0; | 301 | tracing_max_latency = 0; |
| 267 | /* disable preemption for a bit */ | 302 | /* disable preemption for a bit */ |
| @@ -269,13 +304,13 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
| 269 | udelay(100); | 304 | udelay(100); |
| 270 | preempt_enable(); | 305 | preempt_enable(); |
| 271 | /* stop the tracing. */ | 306 | /* stop the tracing. */ |
| 272 | tr->ctrl = 0; | 307 | tracing_stop(); |
| 273 | trace->ctrl_update(tr); | ||
| 274 | /* check both trace buffers */ | 308 | /* check both trace buffers */ |
| 275 | ret = trace_test_buffer(tr, NULL); | 309 | ret = trace_test_buffer(tr, NULL); |
| 276 | if (!ret) | 310 | if (!ret) |
| 277 | ret = trace_test_buffer(&max_tr, &count); | 311 | ret = trace_test_buffer(&max_tr, &count); |
| 278 | trace->reset(tr); | 312 | trace->reset(tr); |
| 313 | tracing_start(); | ||
| 279 | 314 | ||
| 280 | if (!ret && !count) { | 315 | if (!ret && !count) { |
| 281 | printk(KERN_CONT ".. no entries found .."); | 316 | printk(KERN_CONT ".. no entries found .."); |
| @@ -296,9 +331,25 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
| 296 | unsigned long count; | 331 | unsigned long count; |
| 297 | int ret; | 332 | int ret; |
| 298 | 333 | ||
| 334 | /* | ||
| 335 | * Now that the big kernel lock is no longer preemptable, | ||
| 336 | * and this is called with the BKL held, it will always | ||
| 337 | * fail. If preemption is already disabled, simply | ||
| 338 | * pass the test. When the BKL is removed, or becomes | ||
| 339 | * preemptible again, we will once again test this, | ||
| 340 | * so keep it in. | ||
| 341 | */ | ||
| 342 | if (preempt_count()) { | ||
| 343 | printk(KERN_CONT "can not test ... force "); | ||
| 344 | return 0; | ||
| 345 | } | ||
| 346 | |||
| 299 | /* start the tracing */ | 347 | /* start the tracing */ |
| 300 | tr->ctrl = 1; | 348 | ret = trace->init(tr); |
| 301 | trace->init(tr); | 349 | if (ret) { |
| 350 | warn_failed_init_tracer(trace, ret); | ||
| 351 | goto out; | ||
| 352 | } | ||
| 302 | 353 | ||
| 303 | /* reset the max latency */ | 354 | /* reset the max latency */ |
| 304 | tracing_max_latency = 0; | 355 | tracing_max_latency = 0; |
| @@ -312,27 +363,30 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
| 312 | local_irq_enable(); | 363 | local_irq_enable(); |
| 313 | 364 | ||
| 314 | /* stop the tracing. */ | 365 | /* stop the tracing. */ |
| 315 | tr->ctrl = 0; | 366 | tracing_stop(); |
| 316 | trace->ctrl_update(tr); | ||
| 317 | /* check both trace buffers */ | 367 | /* check both trace buffers */ |
| 318 | ret = trace_test_buffer(tr, NULL); | 368 | ret = trace_test_buffer(tr, NULL); |
| 319 | if (ret) | 369 | if (ret) { |
| 370 | tracing_start(); | ||
| 320 | goto out; | 371 | goto out; |
| 372 | } | ||
| 321 | 373 | ||
| 322 | ret = trace_test_buffer(&max_tr, &count); | 374 | ret = trace_test_buffer(&max_tr, &count); |
| 323 | if (ret) | 375 | if (ret) { |
| 376 | tracing_start(); | ||
| 324 | goto out; | 377 | goto out; |
| 378 | } | ||
| 325 | 379 | ||
| 326 | if (!ret && !count) { | 380 | if (!ret && !count) { |
| 327 | printk(KERN_CONT ".. no entries found .."); | 381 | printk(KERN_CONT ".. no entries found .."); |
| 328 | ret = -1; | 382 | ret = -1; |
| 383 | tracing_start(); | ||
| 329 | goto out; | 384 | goto out; |
| 330 | } | 385 | } |
| 331 | 386 | ||
| 332 | /* do the test by disabling interrupts first this time */ | 387 | /* do the test by disabling interrupts first this time */ |
| 333 | tracing_max_latency = 0; | 388 | tracing_max_latency = 0; |
| 334 | tr->ctrl = 1; | 389 | tracing_start(); |
| 335 | trace->ctrl_update(tr); | ||
| 336 | preempt_disable(); | 390 | preempt_disable(); |
| 337 | local_irq_disable(); | 391 | local_irq_disable(); |
| 338 | udelay(100); | 392 | udelay(100); |
| @@ -341,8 +395,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
| 341 | local_irq_enable(); | 395 | local_irq_enable(); |
| 342 | 396 | ||
| 343 | /* stop the tracing. */ | 397 | /* stop the tracing. */ |
| 344 | tr->ctrl = 0; | 398 | tracing_stop(); |
| 345 | trace->ctrl_update(tr); | ||
| 346 | /* check both trace buffers */ | 399 | /* check both trace buffers */ |
| 347 | ret = trace_test_buffer(tr, NULL); | 400 | ret = trace_test_buffer(tr, NULL); |
| 348 | if (ret) | 401 | if (ret) |
| @@ -358,6 +411,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
| 358 | 411 | ||
| 359 | out: | 412 | out: |
| 360 | trace->reset(tr); | 413 | trace->reset(tr); |
| 414 | tracing_start(); | ||
| 361 | tracing_max_latency = save_max; | 415 | tracing_max_latency = save_max; |
| 362 | 416 | ||
| 363 | return ret; | 417 | return ret; |
| @@ -423,8 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
| 423 | wait_for_completion(&isrt); | 477 | wait_for_completion(&isrt); |
| 424 | 478 | ||
| 425 | /* start the tracing */ | 479 | /* start the tracing */ |
| 426 | tr->ctrl = 1; | 480 | ret = trace->init(tr); |
| 427 | trace->init(tr); | 481 | if (ret) { |
| 482 | warn_failed_init_tracer(trace, ret); | ||
| 483 | return ret; | ||
| 484 | } | ||
| 485 | |||
| 428 | /* reset the max latency */ | 486 | /* reset the max latency */ |
| 429 | tracing_max_latency = 0; | 487 | tracing_max_latency = 0; |
| 430 | 488 | ||
| @@ -448,8 +506,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
| 448 | msleep(100); | 506 | msleep(100); |
| 449 | 507 | ||
| 450 | /* stop the tracing. */ | 508 | /* stop the tracing. */ |
| 451 | tr->ctrl = 0; | 509 | tracing_stop(); |
| 452 | trace->ctrl_update(tr); | ||
| 453 | /* check both trace buffers */ | 510 | /* check both trace buffers */ |
| 454 | ret = trace_test_buffer(tr, NULL); | 511 | ret = trace_test_buffer(tr, NULL); |
| 455 | if (!ret) | 512 | if (!ret) |
| @@ -457,6 +514,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
| 457 | 514 | ||
| 458 | 515 | ||
| 459 | trace->reset(tr); | 516 | trace->reset(tr); |
| 517 | tracing_start(); | ||
| 460 | 518 | ||
| 461 | tracing_max_latency = save_max; | 519 | tracing_max_latency = save_max; |
| 462 | 520 | ||
| @@ -480,16 +538,20 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
| 480 | int ret; | 538 | int ret; |
| 481 | 539 | ||
| 482 | /* start the tracing */ | 540 | /* start the tracing */ |
| 483 | tr->ctrl = 1; | 541 | ret = trace->init(tr); |
| 484 | trace->init(tr); | 542 | if (ret) { |
| 543 | warn_failed_init_tracer(trace, ret); | ||
| 544 | return ret; | ||
| 545 | } | ||
| 546 | |||
| 485 | /* Sleep for a 1/10 of a second */ | 547 | /* Sleep for a 1/10 of a second */ |
| 486 | msleep(100); | 548 | msleep(100); |
| 487 | /* stop the tracing. */ | 549 | /* stop the tracing. */ |
| 488 | tr->ctrl = 0; | 550 | tracing_stop(); |
| 489 | trace->ctrl_update(tr); | ||
| 490 | /* check the trace buffer */ | 551 | /* check the trace buffer */ |
| 491 | ret = trace_test_buffer(tr, &count); | 552 | ret = trace_test_buffer(tr, &count); |
| 492 | trace->reset(tr); | 553 | trace->reset(tr); |
| 554 | tracing_start(); | ||
| 493 | 555 | ||
| 494 | if (!ret && !count) { | 556 | if (!ret && !count) { |
| 495 | printk(KERN_CONT ".. no entries found .."); | 557 | printk(KERN_CONT ".. no entries found .."); |
| @@ -508,17 +570,48 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
| 508 | int ret; | 570 | int ret; |
| 509 | 571 | ||
| 510 | /* start the tracing */ | 572 | /* start the tracing */ |
| 511 | tr->ctrl = 1; | 573 | ret = trace->init(tr); |
| 512 | trace->init(tr); | 574 | if (ret) { |
| 575 | warn_failed_init_tracer(trace, ret); | ||
| 576 | return 0; | ||
| 577 | } | ||
| 578 | |||
| 513 | /* Sleep for a 1/10 of a second */ | 579 | /* Sleep for a 1/10 of a second */ |
| 514 | msleep(100); | 580 | msleep(100); |
| 515 | /* stop the tracing. */ | 581 | /* stop the tracing. */ |
| 516 | tr->ctrl = 0; | 582 | tracing_stop(); |
| 517 | trace->ctrl_update(tr); | ||
| 518 | /* check the trace buffer */ | 583 | /* check the trace buffer */ |
| 519 | ret = trace_test_buffer(tr, &count); | 584 | ret = trace_test_buffer(tr, &count); |
| 520 | trace->reset(tr); | 585 | trace->reset(tr); |
| 586 | tracing_start(); | ||
| 521 | 587 | ||
| 522 | return ret; | 588 | return ret; |
| 523 | } | 589 | } |
| 524 | #endif /* CONFIG_SYSPROF_TRACER */ | 590 | #endif /* CONFIG_SYSPROF_TRACER */ |
| 591 | |||
| 592 | #ifdef CONFIG_BRANCH_TRACER | ||
| 593 | int | ||
| 594 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | ||
| 595 | { | ||
| 596 | unsigned long count; | ||
| 597 | int ret; | ||
| 598 | |||
| 599 | /* start the tracing */ | ||
| 600 | ret = trace->init(tr); | ||
| 601 | if (ret) { | ||
| 602 | warn_failed_init_tracer(trace, ret); | ||
| 603 | return ret; | ||
| 604 | } | ||
| 605 | |||
| 606 | /* Sleep for a 1/10 of a second */ | ||
| 607 | msleep(100); | ||
| 608 | /* stop the tracing. */ | ||
| 609 | tracing_stop(); | ||
| 610 | /* check the trace buffer */ | ||
| 611 | ret = trace_test_buffer(tr, &count); | ||
| 612 | trace->reset(tr); | ||
| 613 | tracing_start(); | ||
| 614 | |||
| 615 | return ret; | ||
| 616 | } | ||
| 617 | #endif /* CONFIG_BRANCH_TRACER */ | ||
