diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-05 14:04:19 -0400 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-05 14:04:19 -0400 | 
| commit | 714f83d5d9f7c785f622259dad1f4fad12d64664 (patch) | |
| tree | 20563541ae438e11d686b4d629074eb002a481b7 /kernel/trace/trace_irqsoff.c | |
| parent | 8901e7ffc2fa78ede7ce9826dbad68a3a25dc2dc (diff) | |
| parent | 645dae969c3b8651c5bc7c54a1835ec03820f85f (diff) | |
Merge branch 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (413 commits)
  tracing, net: fix net tree and tracing tree merge interaction
  tracing, powerpc: fix powerpc tree and tracing tree interaction
  ring-buffer: do not remove reader page from list on ring buffer free
  function-graph: allow unregistering twice
  trace: make argument 'mem' of trace_seq_putmem() const
  tracing: add missing 'extern' keywords to trace_output.h
  tracing: provide trace_seq_reserve()
  blktrace: print out BLK_TN_MESSAGE properly
  blktrace: extract duplidate code
  blktrace: fix memory leak when freeing struct blk_io_trace
  blktrace: fix blk_probes_ref chaos
  blktrace: make classic output more classic
  blktrace: fix off-by-one bug
  blktrace: fix the original blktrace
  blktrace: fix a race when creating blk_tree_root in debugfs
  blktrace: fix timestamp in binary output
  tracing, Text Edit Lock: cleanup
  tracing: filter fix for TRACE_EVENT_FORMAT events
  ftrace: Using FTRACE_WARN_ON() to check "freed record" in ftrace_release()
  x86: kretprobe-booster interrupt emulation code fix
  ...
Fix up trivial conflicts in
 arch/parisc/include/asm/ftrace.h
 include/linux/memory.h
 kernel/extable.c
 kernel/module.c
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 54 | 
1 files changed, 16 insertions, 38 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 62a78d943534..b923d13e2fad 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c  | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* | 
| 2 | * trace irqs off criticall timings | 2 | * trace irqs off critical timings | 
| 3 | * | 3 | * | 
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 
| 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 
| @@ -32,6 +32,8 @@ enum { | |||
| 32 | 32 | ||
| 33 | static int trace_type __read_mostly; | 33 | static int trace_type __read_mostly; | 
| 34 | 34 | ||
| 35 | static int save_lat_flag; | ||
| 36 | |||
| 35 | #ifdef CONFIG_PREEMPT_TRACER | 37 | #ifdef CONFIG_PREEMPT_TRACER | 
| 36 | static inline int | 38 | static inline int | 
| 37 | preempt_trace(void) | 39 | preempt_trace(void) | 
| @@ -95,7 +97,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
| 95 | disabled = atomic_inc_return(&data->disabled); | 97 | disabled = atomic_inc_return(&data->disabled); | 
| 96 | 98 | ||
| 97 | if (likely(disabled == 1)) | 99 | if (likely(disabled == 1)) | 
| 98 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); | 100 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| 99 | 101 | ||
| 100 | atomic_dec(&data->disabled); | 102 | atomic_dec(&data->disabled); | 
| 101 | } | 103 | } | 
| @@ -153,7 +155,7 @@ check_critical_timing(struct trace_array *tr, | |||
| 153 | if (!report_latency(delta)) | 155 | if (!report_latency(delta)) | 
| 154 | goto out_unlock; | 156 | goto out_unlock; | 
| 155 | 157 | ||
| 156 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); | 158 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 
| 157 | 159 | ||
| 158 | latency = nsecs_to_usecs(delta); | 160 | latency = nsecs_to_usecs(delta); | 
| 159 | 161 | ||
| @@ -177,7 +179,7 @@ out: | |||
| 177 | data->critical_sequence = max_sequence; | 179 | data->critical_sequence = max_sequence; | 
| 178 | data->preempt_timestamp = ftrace_now(cpu); | 180 | data->preempt_timestamp = ftrace_now(cpu); | 
| 179 | tracing_reset(tr, cpu); | 181 | tracing_reset(tr, cpu); | 
| 180 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); | 182 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 
| 181 | } | 183 | } | 
| 182 | 184 | ||
| 183 | static inline void | 185 | static inline void | 
| @@ -210,7 +212,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
| 210 | 212 | ||
| 211 | local_save_flags(flags); | 213 | local_save_flags(flags); | 
| 212 | 214 | ||
| 213 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); | 215 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| 214 | 216 | ||
| 215 | per_cpu(tracing_cpu, cpu) = 1; | 217 | per_cpu(tracing_cpu, cpu) = 1; | 
| 216 | 218 | ||
| @@ -244,7 +246,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
| 244 | atomic_inc(&data->disabled); | 246 | atomic_inc(&data->disabled); | 
| 245 | 247 | ||
| 246 | local_save_flags(flags); | 248 | local_save_flags(flags); | 
| 247 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); | 249 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| 248 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 250 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 
| 249 | data->critical_start = 0; | 251 | data->critical_start = 0; | 
| 250 | atomic_dec(&data->disabled); | 252 | atomic_dec(&data->disabled); | 
| @@ -353,33 +355,26 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) | |||
| 353 | } | 355 | } | 
| 354 | #endif /* CONFIG_PREEMPT_TRACER */ | 356 | #endif /* CONFIG_PREEMPT_TRACER */ | 
| 355 | 357 | ||
| 356 | /* | ||
| 357 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
| 358 | * variable when we disable it when we open a trace output file. | ||
| 359 | */ | ||
| 360 | static int save_tracer_enabled; | ||
| 361 | |||
| 362 | static void start_irqsoff_tracer(struct trace_array *tr) | 358 | static void start_irqsoff_tracer(struct trace_array *tr) | 
| 363 | { | 359 | { | 
| 364 | register_ftrace_function(&trace_ops); | 360 | register_ftrace_function(&trace_ops); | 
| 365 | if (tracing_is_enabled()) { | 361 | if (tracing_is_enabled()) | 
| 366 | tracer_enabled = 1; | 362 | tracer_enabled = 1; | 
| 367 | save_tracer_enabled = 1; | 363 | else | 
| 368 | } else { | ||
| 369 | tracer_enabled = 0; | 364 | tracer_enabled = 0; | 
| 370 | save_tracer_enabled = 0; | ||
| 371 | } | ||
| 372 | } | 365 | } | 
| 373 | 366 | ||
| 374 | static void stop_irqsoff_tracer(struct trace_array *tr) | 367 | static void stop_irqsoff_tracer(struct trace_array *tr) | 
| 375 | { | 368 | { | 
| 376 | tracer_enabled = 0; | 369 | tracer_enabled = 0; | 
| 377 | save_tracer_enabled = 0; | ||
| 378 | unregister_ftrace_function(&trace_ops); | 370 | unregister_ftrace_function(&trace_ops); | 
| 379 | } | 371 | } | 
| 380 | 372 | ||
| 381 | static void __irqsoff_tracer_init(struct trace_array *tr) | 373 | static void __irqsoff_tracer_init(struct trace_array *tr) | 
| 382 | { | 374 | { | 
| 375 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | ||
| 376 | trace_flags |= TRACE_ITER_LATENCY_FMT; | ||
| 377 | |||
| 383 | tracing_max_latency = 0; | 378 | tracing_max_latency = 0; | 
| 384 | irqsoff_trace = tr; | 379 | irqsoff_trace = tr; | 
| 385 | /* make sure that the tracer is visible */ | 380 | /* make sure that the tracer is visible */ | 
| @@ -390,30 +385,19 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
| 390 | static void irqsoff_tracer_reset(struct trace_array *tr) | 385 | static void irqsoff_tracer_reset(struct trace_array *tr) | 
| 391 | { | 386 | { | 
| 392 | stop_irqsoff_tracer(tr); | 387 | stop_irqsoff_tracer(tr); | 
| 388 | |||
| 389 | if (!save_lat_flag) | ||
| 390 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | ||
| 393 | } | 391 | } | 
| 394 | 392 | ||
| 395 | static void irqsoff_tracer_start(struct trace_array *tr) | 393 | static void irqsoff_tracer_start(struct trace_array *tr) | 
| 396 | { | 394 | { | 
| 397 | tracer_enabled = 1; | 395 | tracer_enabled = 1; | 
| 398 | save_tracer_enabled = 1; | ||
| 399 | } | 396 | } | 
| 400 | 397 | ||
| 401 | static void irqsoff_tracer_stop(struct trace_array *tr) | 398 | static void irqsoff_tracer_stop(struct trace_array *tr) | 
| 402 | { | 399 | { | 
| 403 | tracer_enabled = 0; | 400 | tracer_enabled = 0; | 
| 404 | save_tracer_enabled = 0; | ||
| 405 | } | ||
| 406 | |||
| 407 | static void irqsoff_tracer_open(struct trace_iterator *iter) | ||
| 408 | { | ||
| 409 | /* stop the trace while dumping */ | ||
| 410 | tracer_enabled = 0; | ||
| 411 | } | ||
| 412 | |||
| 413 | static void irqsoff_tracer_close(struct trace_iterator *iter) | ||
| 414 | { | ||
| 415 | /* restart tracing */ | ||
| 416 | tracer_enabled = save_tracer_enabled; | ||
| 417 | } | 401 | } | 
| 418 | 402 | ||
| 419 | #ifdef CONFIG_IRQSOFF_TRACER | 403 | #ifdef CONFIG_IRQSOFF_TRACER | 
| @@ -431,8 +415,6 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
| 431 | .reset = irqsoff_tracer_reset, | 415 | .reset = irqsoff_tracer_reset, | 
| 432 | .start = irqsoff_tracer_start, | 416 | .start = irqsoff_tracer_start, | 
| 433 | .stop = irqsoff_tracer_stop, | 417 | .stop = irqsoff_tracer_stop, | 
| 434 | .open = irqsoff_tracer_open, | ||
| 435 | .close = irqsoff_tracer_close, | ||
| 436 | .print_max = 1, | 418 | .print_max = 1, | 
| 437 | #ifdef CONFIG_FTRACE_SELFTEST | 419 | #ifdef CONFIG_FTRACE_SELFTEST | 
| 438 | .selftest = trace_selftest_startup_irqsoff, | 420 | .selftest = trace_selftest_startup_irqsoff, | 
| @@ -459,8 +441,6 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
| 459 | .reset = irqsoff_tracer_reset, | 441 | .reset = irqsoff_tracer_reset, | 
| 460 | .start = irqsoff_tracer_start, | 442 | .start = irqsoff_tracer_start, | 
| 461 | .stop = irqsoff_tracer_stop, | 443 | .stop = irqsoff_tracer_stop, | 
| 462 | .open = irqsoff_tracer_open, | ||
| 463 | .close = irqsoff_tracer_close, | ||
| 464 | .print_max = 1, | 444 | .print_max = 1, | 
| 465 | #ifdef CONFIG_FTRACE_SELFTEST | 445 | #ifdef CONFIG_FTRACE_SELFTEST | 
| 466 | .selftest = trace_selftest_startup_preemptoff, | 446 | .selftest = trace_selftest_startup_preemptoff, | 
| @@ -489,8 +469,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
| 489 | .reset = irqsoff_tracer_reset, | 469 | .reset = irqsoff_tracer_reset, | 
| 490 | .start = irqsoff_tracer_start, | 470 | .start = irqsoff_tracer_start, | 
| 491 | .stop = irqsoff_tracer_stop, | 471 | .stop = irqsoff_tracer_stop, | 
| 492 | .open = irqsoff_tracer_open, | ||
| 493 | .close = irqsoff_tracer_close, | ||
| 494 | .print_max = 1, | 472 | .print_max = 1, | 
| 495 | #ifdef CONFIG_FTRACE_SELFTEST | 473 | #ifdef CONFIG_FTRACE_SELFTEST | 
| 496 | .selftest = trace_selftest_startup_preemptirqsoff, | 474 | .selftest = trace_selftest_startup_preemptirqsoff, | 
