diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Kconfig | 11 | ||||
| -rw-r--r-- | kernel/trace/Makefile | 1 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 33 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 179 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer_benchmark.c | 5 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 127 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 47 | ||||
| -rw-r--r-- | kernel/trace/trace_entries.h | 12 | ||||
| -rw-r--r-- | kernel/trace/trace_events_filter.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 169 | ||||
| -rw-r--r-- | kernel/trace/trace_hw_branches.c | 312 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 271 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 535 | ||||
| -rw-r--r-- | kernel/trace/trace_ksym.c | 26 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 5 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 5 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest.c | 64 |
18 files changed, 1016 insertions, 790 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 13e13d428cd3..8b1797c4545b 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -44,9 +44,6 @@ config HAVE_FTRACE_MCOUNT_RECORD | |||
| 44 | help | 44 | help |
| 45 | See Documentation/trace/ftrace-design.txt | 45 | See Documentation/trace/ftrace-design.txt |
| 46 | 46 | ||
| 47 | config HAVE_HW_BRANCH_TRACER | ||
| 48 | bool | ||
| 49 | |||
| 50 | config HAVE_SYSCALL_TRACEPOINTS | 47 | config HAVE_SYSCALL_TRACEPOINTS |
| 51 | bool | 48 | bool |
| 52 | help | 49 | help |
| @@ -374,14 +371,6 @@ config STACK_TRACER | |||
| 374 | 371 | ||
| 375 | Say N if unsure. | 372 | Say N if unsure. |
| 376 | 373 | ||
| 377 | config HW_BRANCH_TRACER | ||
| 378 | depends on HAVE_HW_BRANCH_TRACER | ||
| 379 | bool "Trace hw branches" | ||
| 380 | select GENERIC_TRACER | ||
| 381 | help | ||
| 382 | This tracer records all branches on the system in a circular | ||
| 383 | buffer, giving access to the last N branches for each cpu. | ||
| 384 | |||
| 385 | config KMEMTRACE | 374 | config KMEMTRACE |
| 386 | bool "Trace SLAB allocations" | 375 | bool "Trace SLAB allocations" |
| 387 | select GENERIC_TRACER | 376 | select GENERIC_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 78edc6490038..ffb1a5b0550e 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -41,7 +41,6 @@ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | |||
| 41 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | 41 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o |
| 42 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | 42 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o |
| 43 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 43 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
| 44 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o | ||
| 45 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | 44 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o |
| 46 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o | 45 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o |
| 47 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | 46 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2404b59b3097..32837e19e3bd 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -264,6 +264,7 @@ struct ftrace_profile { | |||
| 264 | unsigned long counter; | 264 | unsigned long counter; |
| 265 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 265 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 266 | unsigned long long time; | 266 | unsigned long long time; |
| 267 | unsigned long long time_squared; | ||
| 267 | #endif | 268 | #endif |
| 268 | }; | 269 | }; |
| 269 | 270 | ||
| @@ -366,9 +367,9 @@ static int function_stat_headers(struct seq_file *m) | |||
| 366 | { | 367 | { |
| 367 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 368 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 368 | seq_printf(m, " Function " | 369 | seq_printf(m, " Function " |
| 369 | "Hit Time Avg\n" | 370 | "Hit Time Avg s^2\n" |
| 370 | " -------- " | 371 | " -------- " |
| 371 | "--- ---- ---\n"); | 372 | "--- ---- --- ---\n"); |
| 372 | #else | 373 | #else |
| 373 | seq_printf(m, " Function Hit\n" | 374 | seq_printf(m, " Function Hit\n" |
| 374 | " -------- ---\n"); | 375 | " -------- ---\n"); |
| @@ -384,6 +385,7 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 384 | static DEFINE_MUTEX(mutex); | 385 | static DEFINE_MUTEX(mutex); |
| 385 | static struct trace_seq s; | 386 | static struct trace_seq s; |
| 386 | unsigned long long avg; | 387 | unsigned long long avg; |
| 388 | unsigned long long stddev; | ||
| 387 | #endif | 389 | #endif |
| 388 | 390 | ||
| 389 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 391 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
| @@ -394,11 +396,25 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 394 | avg = rec->time; | 396 | avg = rec->time; |
| 395 | do_div(avg, rec->counter); | 397 | do_div(avg, rec->counter); |
| 396 | 398 | ||
| 399 | /* Sample standard deviation (s^2) */ | ||
| 400 | if (rec->counter <= 1) | ||
| 401 | stddev = 0; | ||
| 402 | else { | ||
| 403 | stddev = rec->time_squared - rec->counter * avg * avg; | ||
| 404 | /* | ||
| 405 | * Divide only 1000 for ns^2 -> us^2 conversion. | ||
| 406 | * trace_print_graph_duration will divide 1000 again. | ||
| 407 | */ | ||
| 408 | do_div(stddev, (rec->counter - 1) * 1000); | ||
| 409 | } | ||
| 410 | |||
| 397 | mutex_lock(&mutex); | 411 | mutex_lock(&mutex); |
| 398 | trace_seq_init(&s); | 412 | trace_seq_init(&s); |
| 399 | trace_print_graph_duration(rec->time, &s); | 413 | trace_print_graph_duration(rec->time, &s); |
| 400 | trace_seq_puts(&s, " "); | 414 | trace_seq_puts(&s, " "); |
| 401 | trace_print_graph_duration(avg, &s); | 415 | trace_print_graph_duration(avg, &s); |
| 416 | trace_seq_puts(&s, " "); | ||
| 417 | trace_print_graph_duration(stddev, &s); | ||
| 402 | trace_print_seq(m, &s); | 418 | trace_print_seq(m, &s); |
| 403 | mutex_unlock(&mutex); | 419 | mutex_unlock(&mutex); |
| 404 | #endif | 420 | #endif |
| @@ -650,6 +666,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
| 650 | if (!stat->hash || !ftrace_profile_enabled) | 666 | if (!stat->hash || !ftrace_profile_enabled) |
| 651 | goto out; | 667 | goto out; |
| 652 | 668 | ||
| 669 | /* If the calltime was zero'd ignore it */ | ||
| 670 | if (!trace->calltime) | ||
| 671 | goto out; | ||
| 672 | |||
| 653 | calltime = trace->rettime - trace->calltime; | 673 | calltime = trace->rettime - trace->calltime; |
| 654 | 674 | ||
| 655 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | 675 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { |
| @@ -668,8 +688,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
| 668 | } | 688 | } |
| 669 | 689 | ||
| 670 | rec = ftrace_find_profiled_func(stat, trace->func); | 690 | rec = ftrace_find_profiled_func(stat, trace->func); |
| 671 | if (rec) | 691 | if (rec) { |
| 672 | rec->time += calltime; | 692 | rec->time += calltime; |
| 693 | rec->time_squared += calltime * calltime; | ||
| 694 | } | ||
| 673 | 695 | ||
| 674 | out: | 696 | out: |
| 675 | local_irq_restore(flags); | 697 | local_irq_restore(flags); |
| @@ -3212,8 +3234,7 @@ free: | |||
| 3212 | } | 3234 | } |
| 3213 | 3235 | ||
| 3214 | static void | 3236 | static void |
| 3215 | ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 3237 | ftrace_graph_probe_sched_switch(struct task_struct *prev, struct task_struct *next) |
| 3216 | struct task_struct *next) | ||
| 3217 | { | 3238 | { |
| 3218 | unsigned long long timestamp; | 3239 | unsigned long long timestamp; |
| 3219 | int index; | 3240 | int index; |
| @@ -3339,11 +3360,11 @@ void unregister_ftrace_graph(void) | |||
| 3339 | goto out; | 3360 | goto out; |
| 3340 | 3361 | ||
| 3341 | ftrace_graph_active--; | 3362 | ftrace_graph_active--; |
| 3342 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); | ||
| 3343 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 3363 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 3344 | ftrace_graph_entry = ftrace_graph_entry_stub; | 3364 | ftrace_graph_entry = ftrace_graph_entry_stub; |
| 3345 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 3365 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
| 3346 | unregister_pm_notifier(&ftrace_suspend_notifier); | 3366 | unregister_pm_notifier(&ftrace_suspend_notifier); |
| 3367 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); | ||
| 3347 | 3368 | ||
| 3348 | out: | 3369 | out: |
| 3349 | mutex_unlock(&ftrace_lock); | 3370 | mutex_unlock(&ftrace_lock); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 41ca394feb22..7f6059c5aa94 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -319,6 +319,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); | |||
| 319 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 319 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
| 320 | #define TS_DELTA_TEST (~TS_MASK) | 320 | #define TS_DELTA_TEST (~TS_MASK) |
| 321 | 321 | ||
| 322 | /* Flag when events were overwritten */ | ||
| 323 | #define RB_MISSED_EVENTS (1 << 31) | ||
| 324 | /* Missed count stored at end */ | ||
| 325 | #define RB_MISSED_STORED (1 << 30) | ||
| 326 | |||
| 322 | struct buffer_data_page { | 327 | struct buffer_data_page { |
| 323 | u64 time_stamp; /* page time stamp */ | 328 | u64 time_stamp; /* page time stamp */ |
| 324 | local_t commit; /* write committed index */ | 329 | local_t commit; /* write committed index */ |
| @@ -338,6 +343,7 @@ struct buffer_page { | |||
| 338 | local_t write; /* index for next write */ | 343 | local_t write; /* index for next write */ |
| 339 | unsigned read; /* index for next read */ | 344 | unsigned read; /* index for next read */ |
| 340 | local_t entries; /* entries on this page */ | 345 | local_t entries; /* entries on this page */ |
| 346 | unsigned long real_end; /* real end of data */ | ||
| 341 | struct buffer_data_page *page; /* Actual data page */ | 347 | struct buffer_data_page *page; /* Actual data page */ |
| 342 | }; | 348 | }; |
| 343 | 349 | ||
| @@ -417,6 +423,12 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
| 417 | (unsigned int)sizeof(field.commit), | 423 | (unsigned int)sizeof(field.commit), |
| 418 | (unsigned int)is_signed_type(long)); | 424 | (unsigned int)is_signed_type(long)); |
| 419 | 425 | ||
| 426 | ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
| 427 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 428 | (unsigned int)offsetof(typeof(field), commit), | ||
| 429 | 1, | ||
| 430 | (unsigned int)is_signed_type(long)); | ||
| 431 | |||
| 420 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 432 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
| 421 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | 433 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
| 422 | (unsigned int)offsetof(typeof(field), data), | 434 | (unsigned int)offsetof(typeof(field), data), |
| @@ -440,6 +452,8 @@ struct ring_buffer_per_cpu { | |||
| 440 | struct buffer_page *tail_page; /* write to tail */ | 452 | struct buffer_page *tail_page; /* write to tail */ |
| 441 | struct buffer_page *commit_page; /* committed pages */ | 453 | struct buffer_page *commit_page; /* committed pages */ |
| 442 | struct buffer_page *reader_page; | 454 | struct buffer_page *reader_page; |
| 455 | unsigned long lost_events; | ||
| 456 | unsigned long last_overrun; | ||
| 443 | local_t commit_overrun; | 457 | local_t commit_overrun; |
| 444 | local_t overrun; | 458 | local_t overrun; |
| 445 | local_t entries; | 459 | local_t entries; |
| @@ -1762,6 +1776,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1762 | kmemcheck_annotate_bitfield(event, bitfield); | 1776 | kmemcheck_annotate_bitfield(event, bitfield); |
| 1763 | 1777 | ||
| 1764 | /* | 1778 | /* |
| 1779 | * Save the original length to the meta data. | ||
| 1780 | * This will be used by the reader to add lost event | ||
| 1781 | * counter. | ||
| 1782 | */ | ||
| 1783 | tail_page->real_end = tail; | ||
| 1784 | |||
| 1785 | /* | ||
| 1765 | * If this event is bigger than the minimum size, then | 1786 | * If this event is bigger than the minimum size, then |
| 1766 | * we need to be careful that we don't subtract the | 1787 | * we need to be careful that we don't subtract the |
| 1767 | * write counter enough to allow another writer to slip | 1788 | * write counter enough to allow another writer to slip |
| @@ -1979,17 +2000,13 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1979 | u64 *ts, u64 *delta) | 2000 | u64 *ts, u64 *delta) |
| 1980 | { | 2001 | { |
| 1981 | struct ring_buffer_event *event; | 2002 | struct ring_buffer_event *event; |
| 1982 | static int once; | ||
| 1983 | int ret; | 2003 | int ret; |
| 1984 | 2004 | ||
| 1985 | if (unlikely(*delta > (1ULL << 59) && !once++)) { | 2005 | WARN_ONCE(*delta > (1ULL << 59), |
| 1986 | printk(KERN_WARNING "Delta way too big! %llu" | 2006 | KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", |
| 1987 | " ts=%llu write stamp = %llu\n", | 2007 | (unsigned long long)*delta, |
| 1988 | (unsigned long long)*delta, | 2008 | (unsigned long long)*ts, |
| 1989 | (unsigned long long)*ts, | 2009 | (unsigned long long)cpu_buffer->write_stamp); |
| 1990 | (unsigned long long)cpu_buffer->write_stamp); | ||
| 1991 | WARN_ON(1); | ||
| 1992 | } | ||
| 1993 | 2010 | ||
| 1994 | /* | 2011 | /* |
| 1995 | * The delta is too big, we to add a | 2012 | * The delta is too big, we to add a |
| @@ -2838,6 +2855,7 @@ static struct buffer_page * | |||
| 2838 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 2855 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
| 2839 | { | 2856 | { |
| 2840 | struct buffer_page *reader = NULL; | 2857 | struct buffer_page *reader = NULL; |
| 2858 | unsigned long overwrite; | ||
| 2841 | unsigned long flags; | 2859 | unsigned long flags; |
| 2842 | int nr_loops = 0; | 2860 | int nr_loops = 0; |
| 2843 | int ret; | 2861 | int ret; |
| @@ -2879,6 +2897,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2879 | local_set(&cpu_buffer->reader_page->write, 0); | 2897 | local_set(&cpu_buffer->reader_page->write, 0); |
| 2880 | local_set(&cpu_buffer->reader_page->entries, 0); | 2898 | local_set(&cpu_buffer->reader_page->entries, 0); |
| 2881 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 2899 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
| 2900 | cpu_buffer->reader_page->real_end = 0; | ||
| 2882 | 2901 | ||
| 2883 | spin: | 2902 | spin: |
| 2884 | /* | 2903 | /* |
| @@ -2899,6 +2918,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2899 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); | 2918 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); |
| 2900 | 2919 | ||
| 2901 | /* | 2920 | /* |
| 2921 | * We want to make sure we read the overruns after we set up our | ||
| 2922 | * pointers to the next object. The writer side does a | ||
| 2923 | * cmpxchg to cross pages which acts as the mb on the writer | ||
| 2924 | * side. Note, the reader will constantly fail the swap | ||
| 2925 | * while the writer is updating the pointers, so this | ||
| 2926 | * guarantees that the overwrite recorded here is the one we | ||
| 2927 | * want to compare with the last_overrun. | ||
| 2928 | */ | ||
| 2929 | smp_mb(); | ||
| 2930 | overwrite = local_read(&(cpu_buffer->overrun)); | ||
| 2931 | |||
| 2932 | /* | ||
| 2902 | * Here's the tricky part. | 2933 | * Here's the tricky part. |
| 2903 | * | 2934 | * |
| 2904 | * We need to move the pointer past the header page. | 2935 | * We need to move the pointer past the header page. |
| @@ -2929,6 +2960,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2929 | cpu_buffer->reader_page = reader; | 2960 | cpu_buffer->reader_page = reader; |
| 2930 | rb_reset_reader_page(cpu_buffer); | 2961 | rb_reset_reader_page(cpu_buffer); |
| 2931 | 2962 | ||
| 2963 | if (overwrite != cpu_buffer->last_overrun) { | ||
| 2964 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | ||
| 2965 | cpu_buffer->last_overrun = overwrite; | ||
| 2966 | } | ||
| 2967 | |||
| 2932 | goto again; | 2968 | goto again; |
| 2933 | 2969 | ||
| 2934 | out: | 2970 | out: |
| @@ -3005,8 +3041,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
| 3005 | rb_advance_iter(iter); | 3041 | rb_advance_iter(iter); |
| 3006 | } | 3042 | } |
| 3007 | 3043 | ||
| 3044 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) | ||
| 3045 | { | ||
| 3046 | return cpu_buffer->lost_events; | ||
| 3047 | } | ||
| 3048 | |||
| 3008 | static struct ring_buffer_event * | 3049 | static struct ring_buffer_event * |
| 3009 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) | 3050 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, |
| 3051 | unsigned long *lost_events) | ||
| 3010 | { | 3052 | { |
| 3011 | struct ring_buffer_event *event; | 3053 | struct ring_buffer_event *event; |
| 3012 | struct buffer_page *reader; | 3054 | struct buffer_page *reader; |
| @@ -3058,6 +3100,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) | |||
| 3058 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, | 3100 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, |
| 3059 | cpu_buffer->cpu, ts); | 3101 | cpu_buffer->cpu, ts); |
| 3060 | } | 3102 | } |
| 3103 | if (lost_events) | ||
| 3104 | *lost_events = rb_lost_events(cpu_buffer); | ||
| 3061 | return event; | 3105 | return event; |
| 3062 | 3106 | ||
| 3063 | default: | 3107 | default: |
| @@ -3168,12 +3212,14 @@ static inline int rb_ok_to_lock(void) | |||
| 3168 | * @buffer: The ring buffer to read | 3212 | * @buffer: The ring buffer to read |
| 3169 | * @cpu: The cpu to peak at | 3213 | * @cpu: The cpu to peak at |
| 3170 | * @ts: The timestamp counter of this event. | 3214 | * @ts: The timestamp counter of this event. |
| 3215 | * @lost_events: a variable to store if events were lost (may be NULL) | ||
| 3171 | * | 3216 | * |
| 3172 | * This will return the event that will be read next, but does | 3217 | * This will return the event that will be read next, but does |
| 3173 | * not consume the data. | 3218 | * not consume the data. |
| 3174 | */ | 3219 | */ |
| 3175 | struct ring_buffer_event * | 3220 | struct ring_buffer_event * |
| 3176 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 3221 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, |
| 3222 | unsigned long *lost_events) | ||
| 3177 | { | 3223 | { |
| 3178 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 3224 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
| 3179 | struct ring_buffer_event *event; | 3225 | struct ring_buffer_event *event; |
| @@ -3188,7 +3234,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 3188 | local_irq_save(flags); | 3234 | local_irq_save(flags); |
| 3189 | if (dolock) | 3235 | if (dolock) |
| 3190 | spin_lock(&cpu_buffer->reader_lock); | 3236 | spin_lock(&cpu_buffer->reader_lock); |
| 3191 | event = rb_buffer_peek(cpu_buffer, ts); | 3237 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
| 3192 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3238 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
| 3193 | rb_advance_reader(cpu_buffer); | 3239 | rb_advance_reader(cpu_buffer); |
| 3194 | if (dolock) | 3240 | if (dolock) |
| @@ -3230,13 +3276,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
| 3230 | /** | 3276 | /** |
| 3231 | * ring_buffer_consume - return an event and consume it | 3277 | * ring_buffer_consume - return an event and consume it |
| 3232 | * @buffer: The ring buffer to get the next event from | 3278 | * @buffer: The ring buffer to get the next event from |
| 3279 | * @cpu: the cpu to read the buffer from | ||
| 3280 | * @ts: a variable to store the timestamp (may be NULL) | ||
| 3281 | * @lost_events: a variable to store if events were lost (may be NULL) | ||
| 3233 | * | 3282 | * |
| 3234 | * Returns the next event in the ring buffer, and that event is consumed. | 3283 | * Returns the next event in the ring buffer, and that event is consumed. |
| 3235 | * Meaning, that sequential reads will keep returning a different event, | 3284 | * Meaning, that sequential reads will keep returning a different event, |
| 3236 | * and eventually empty the ring buffer if the producer is slower. | 3285 | * and eventually empty the ring buffer if the producer is slower. |
| 3237 | */ | 3286 | */ |
| 3238 | struct ring_buffer_event * | 3287 | struct ring_buffer_event * |
| 3239 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 3288 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, |
| 3289 | unsigned long *lost_events) | ||
| 3240 | { | 3290 | { |
| 3241 | struct ring_buffer_per_cpu *cpu_buffer; | 3291 | struct ring_buffer_per_cpu *cpu_buffer; |
| 3242 | struct ring_buffer_event *event = NULL; | 3292 | struct ring_buffer_event *event = NULL; |
| @@ -3257,9 +3307,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 3257 | if (dolock) | 3307 | if (dolock) |
| 3258 | spin_lock(&cpu_buffer->reader_lock); | 3308 | spin_lock(&cpu_buffer->reader_lock); |
| 3259 | 3309 | ||
| 3260 | event = rb_buffer_peek(cpu_buffer, ts); | 3310 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
| 3261 | if (event) | 3311 | if (event) { |
| 3312 | cpu_buffer->lost_events = 0; | ||
| 3262 | rb_advance_reader(cpu_buffer); | 3313 | rb_advance_reader(cpu_buffer); |
| 3314 | } | ||
| 3263 | 3315 | ||
| 3264 | if (dolock) | 3316 | if (dolock) |
| 3265 | spin_unlock(&cpu_buffer->reader_lock); | 3317 | spin_unlock(&cpu_buffer->reader_lock); |
| @@ -3276,23 +3328,30 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 3276 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | 3328 | EXPORT_SYMBOL_GPL(ring_buffer_consume); |
| 3277 | 3329 | ||
| 3278 | /** | 3330 | /** |
| 3279 | * ring_buffer_read_start - start a non consuming read of the buffer | 3331 | * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer |
| 3280 | * @buffer: The ring buffer to read from | 3332 | * @buffer: The ring buffer to read from |
| 3281 | * @cpu: The cpu buffer to iterate over | 3333 | * @cpu: The cpu buffer to iterate over |
| 3282 | * | 3334 | * |
| 3283 | * This starts up an iteration through the buffer. It also disables | 3335 | * This performs the initial preparations necessary to iterate |
| 3284 | * the recording to the buffer until the reading is finished. | 3336 | * through the buffer. Memory is allocated, buffer recording |
| 3285 | * This prevents the reading from being corrupted. This is not | 3337 | * is disabled, and the iterator pointer is returned to the caller. |
| 3286 | * a consuming read, so a producer is not expected. | ||
| 3287 | * | 3338 | * |
| 3288 | * Must be paired with ring_buffer_finish. | 3339 | * Disabling buffer recordng prevents the reading from being |
| 3340 | * corrupted. This is not a consuming read, so a producer is not | ||
| 3341 | * expected. | ||
| 3342 | * | ||
| 3343 | * After a sequence of ring_buffer_read_prepare calls, the user is | ||
| 3344 | * expected to make at least one call to ring_buffer_prepare_sync. | ||
| 3345 | * Afterwards, ring_buffer_read_start is invoked to get things going | ||
| 3346 | * for real. | ||
| 3347 | * | ||
| 3348 | * This overall must be paired with ring_buffer_finish. | ||
| 3289 | */ | 3349 | */ |
| 3290 | struct ring_buffer_iter * | 3350 | struct ring_buffer_iter * |
| 3291 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | 3351 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) |
| 3292 | { | 3352 | { |
| 3293 | struct ring_buffer_per_cpu *cpu_buffer; | 3353 | struct ring_buffer_per_cpu *cpu_buffer; |
| 3294 | struct ring_buffer_iter *iter; | 3354 | struct ring_buffer_iter *iter; |
| 3295 | unsigned long flags; | ||
| 3296 | 3355 | ||
| 3297 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 3356 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 3298 | return NULL; | 3357 | return NULL; |
| @@ -3306,15 +3365,52 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
| 3306 | iter->cpu_buffer = cpu_buffer; | 3365 | iter->cpu_buffer = cpu_buffer; |
| 3307 | 3366 | ||
| 3308 | atomic_inc(&cpu_buffer->record_disabled); | 3367 | atomic_inc(&cpu_buffer->record_disabled); |
| 3368 | |||
| 3369 | return iter; | ||
| 3370 | } | ||
| 3371 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); | ||
| 3372 | |||
| 3373 | /** | ||
| 3374 | * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls | ||
| 3375 | * | ||
| 3376 | * All previously invoked ring_buffer_read_prepare calls to prepare | ||
| 3377 | * iterators will be synchronized. Afterwards, read_buffer_read_start | ||
| 3378 | * calls on those iterators are allowed. | ||
| 3379 | */ | ||
| 3380 | void | ||
| 3381 | ring_buffer_read_prepare_sync(void) | ||
| 3382 | { | ||
| 3309 | synchronize_sched(); | 3383 | synchronize_sched(); |
| 3384 | } | ||
| 3385 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | ||
| 3386 | |||
| 3387 | /** | ||
| 3388 | * ring_buffer_read_start - start a non consuming read of the buffer | ||
| 3389 | * @iter: The iterator returned by ring_buffer_read_prepare | ||
| 3390 | * | ||
| 3391 | * This finalizes the startup of an iteration through the buffer. | ||
| 3392 | * The iterator comes from a call to ring_buffer_read_prepare and | ||
| 3393 | * an intervening ring_buffer_read_prepare_sync must have been | ||
| 3394 | * performed. | ||
| 3395 | * | ||
| 3396 | * Must be paired with ring_buffer_finish. | ||
| 3397 | */ | ||
| 3398 | void | ||
| 3399 | ring_buffer_read_start(struct ring_buffer_iter *iter) | ||
| 3400 | { | ||
| 3401 | struct ring_buffer_per_cpu *cpu_buffer; | ||
| 3402 | unsigned long flags; | ||
| 3403 | |||
| 3404 | if (!iter) | ||
| 3405 | return; | ||
| 3406 | |||
| 3407 | cpu_buffer = iter->cpu_buffer; | ||
| 3310 | 3408 | ||
| 3311 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3409 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3312 | arch_spin_lock(&cpu_buffer->lock); | 3410 | arch_spin_lock(&cpu_buffer->lock); |
| 3313 | rb_iter_reset(iter); | 3411 | rb_iter_reset(iter); |
| 3314 | arch_spin_unlock(&cpu_buffer->lock); | 3412 | arch_spin_unlock(&cpu_buffer->lock); |
| 3315 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3413 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3316 | |||
| 3317 | return iter; | ||
| 3318 | } | 3414 | } |
| 3319 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 3415 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
| 3320 | 3416 | ||
| @@ -3408,6 +3504,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 3408 | cpu_buffer->write_stamp = 0; | 3504 | cpu_buffer->write_stamp = 0; |
| 3409 | cpu_buffer->read_stamp = 0; | 3505 | cpu_buffer->read_stamp = 0; |
| 3410 | 3506 | ||
| 3507 | cpu_buffer->lost_events = 0; | ||
| 3508 | cpu_buffer->last_overrun = 0; | ||
| 3509 | |||
| 3411 | rb_head_page_activate(cpu_buffer); | 3510 | rb_head_page_activate(cpu_buffer); |
| 3412 | } | 3511 | } |
| 3413 | 3512 | ||
| @@ -3683,6 +3782,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 3683 | struct ring_buffer_event *event; | 3782 | struct ring_buffer_event *event; |
| 3684 | struct buffer_data_page *bpage; | 3783 | struct buffer_data_page *bpage; |
| 3685 | struct buffer_page *reader; | 3784 | struct buffer_page *reader; |
| 3785 | unsigned long missed_events; | ||
| 3686 | unsigned long flags; | 3786 | unsigned long flags; |
| 3687 | unsigned int commit; | 3787 | unsigned int commit; |
| 3688 | unsigned int read; | 3788 | unsigned int read; |
| @@ -3719,6 +3819,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 3719 | read = reader->read; | 3819 | read = reader->read; |
| 3720 | commit = rb_page_commit(reader); | 3820 | commit = rb_page_commit(reader); |
| 3721 | 3821 | ||
| 3822 | /* Check if any events were dropped */ | ||
| 3823 | missed_events = cpu_buffer->lost_events; | ||
| 3824 | |||
| 3722 | /* | 3825 | /* |
| 3723 | * If this page has been partially read or | 3826 | * If this page has been partially read or |
| 3724 | * if len is not big enough to read the rest of the page or | 3827 | * if len is not big enough to read the rest of the page or |
| @@ -3779,9 +3882,35 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 3779 | local_set(&reader->entries, 0); | 3882 | local_set(&reader->entries, 0); |
| 3780 | reader->read = 0; | 3883 | reader->read = 0; |
| 3781 | *data_page = bpage; | 3884 | *data_page = bpage; |
| 3885 | |||
| 3886 | /* | ||
| 3887 | * Use the real_end for the data size, | ||
| 3888 | * This gives us a chance to store the lost events | ||
| 3889 | * on the page. | ||
| 3890 | */ | ||
| 3891 | if (reader->real_end) | ||
| 3892 | local_set(&bpage->commit, reader->real_end); | ||
| 3782 | } | 3893 | } |
| 3783 | ret = read; | 3894 | ret = read; |
| 3784 | 3895 | ||
| 3896 | cpu_buffer->lost_events = 0; | ||
| 3897 | /* | ||
| 3898 | * Set a flag in the commit field if we lost events | ||
| 3899 | */ | ||
| 3900 | if (missed_events) { | ||
| 3901 | commit = local_read(&bpage->commit); | ||
| 3902 | |||
| 3903 | /* If there is room at the end of the page to save the | ||
| 3904 | * missed events, then record it there. | ||
| 3905 | */ | ||
| 3906 | if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { | ||
| 3907 | memcpy(&bpage->data[commit], &missed_events, | ||
| 3908 | sizeof(missed_events)); | ||
| 3909 | local_add(RB_MISSED_STORED, &bpage->commit); | ||
| 3910 | } | ||
| 3911 | local_add(RB_MISSED_EVENTS, &bpage->commit); | ||
| 3912 | } | ||
| 3913 | |||
| 3785 | out_unlock: | 3914 | out_unlock: |
| 3786 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3915 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3787 | 3916 | ||
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index df74c7982255..302f8a614635 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
| @@ -81,7 +81,7 @@ static enum event_status read_event(int cpu) | |||
| 81 | int *entry; | 81 | int *entry; |
| 82 | u64 ts; | 82 | u64 ts; |
| 83 | 83 | ||
| 84 | event = ring_buffer_consume(buffer, cpu, &ts); | 84 | event = ring_buffer_consume(buffer, cpu, &ts, NULL); |
| 85 | if (!event) | 85 | if (!event) |
| 86 | return EVENT_DROPPED; | 86 | return EVENT_DROPPED; |
| 87 | 87 | ||
| @@ -113,7 +113,8 @@ static enum event_status read_page(int cpu) | |||
| 113 | ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); | 113 | ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); |
| 114 | if (ret >= 0) { | 114 | if (ret >= 0) { |
| 115 | rpage = bpage; | 115 | rpage = bpage; |
| 116 | commit = local_read(&rpage->commit); | 116 | /* The commit may have missed event flags set, clear them */ |
| 117 | commit = local_read(&rpage->commit) & 0xfffff; | ||
| 117 | for (i = 0; i < commit && !kill_test; i += inc) { | 118 | for (i = 0; i < commit && !kill_test; i += inc) { |
| 118 | 119 | ||
| 119 | if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { | 120 | if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 44f916a04065..756d7283318b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask; | |||
| 117 | * | 117 | * |
| 118 | * It is default off, but you can enable it with either specifying | 118 | * It is default off, but you can enable it with either specifying |
| 119 | * "ftrace_dump_on_oops" in the kernel command line, or setting | 119 | * "ftrace_dump_on_oops" in the kernel command line, or setting |
| 120 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | 120 | * /proc/sys/kernel/ftrace_dump_on_oops |
| 121 | * Set 1 if you want to dump buffers of all CPUs | ||
| 122 | * Set 2 if you want to dump the buffer of the CPU that triggered oops | ||
| 121 | */ | 123 | */ |
| 122 | int ftrace_dump_on_oops; | 124 | |
| 125 | enum ftrace_dump_mode ftrace_dump_on_oops; | ||
| 123 | 126 | ||
| 124 | static int tracing_set_tracer(const char *buf); | 127 | static int tracing_set_tracer(const char *buf); |
| 125 | 128 | ||
| @@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace); | |||
| 139 | 142 | ||
| 140 | static int __init set_ftrace_dump_on_oops(char *str) | 143 | static int __init set_ftrace_dump_on_oops(char *str) |
| 141 | { | 144 | { |
| 142 | ftrace_dump_on_oops = 1; | 145 | if (*str++ != '=' || !*str) { |
| 143 | return 1; | 146 | ftrace_dump_on_oops = DUMP_ALL; |
| 147 | return 1; | ||
| 148 | } | ||
| 149 | |||
| 150 | if (!strcmp("orig_cpu", str)) { | ||
| 151 | ftrace_dump_on_oops = DUMP_ORIG; | ||
| 152 | return 1; | ||
| 153 | } | ||
| 154 | |||
| 155 | return 0; | ||
| 144 | } | 156 | } |
| 145 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 157 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
| 146 | 158 | ||
| @@ -1545,7 +1557,8 @@ static void trace_iterator_increment(struct trace_iterator *iter) | |||
| 1545 | } | 1557 | } |
| 1546 | 1558 | ||
| 1547 | static struct trace_entry * | 1559 | static struct trace_entry * |
| 1548 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | 1560 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, |
| 1561 | unsigned long *lost_events) | ||
| 1549 | { | 1562 | { |
| 1550 | struct ring_buffer_event *event; | 1563 | struct ring_buffer_event *event; |
| 1551 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1564 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
| @@ -1556,7 +1569,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
| 1556 | if (buf_iter) | 1569 | if (buf_iter) |
| 1557 | event = ring_buffer_iter_peek(buf_iter, ts); | 1570 | event = ring_buffer_iter_peek(buf_iter, ts); |
| 1558 | else | 1571 | else |
| 1559 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | 1572 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
| 1573 | lost_events); | ||
| 1560 | 1574 | ||
| 1561 | ftrace_enable_cpu(); | 1575 | ftrace_enable_cpu(); |
| 1562 | 1576 | ||
| @@ -1564,10 +1578,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
| 1564 | } | 1578 | } |
| 1565 | 1579 | ||
| 1566 | static struct trace_entry * | 1580 | static struct trace_entry * |
| 1567 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1581 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
| 1582 | unsigned long *missing_events, u64 *ent_ts) | ||
| 1568 | { | 1583 | { |
| 1569 | struct ring_buffer *buffer = iter->tr->buffer; | 1584 | struct ring_buffer *buffer = iter->tr->buffer; |
| 1570 | struct trace_entry *ent, *next = NULL; | 1585 | struct trace_entry *ent, *next = NULL; |
| 1586 | unsigned long lost_events = 0, next_lost = 0; | ||
| 1571 | int cpu_file = iter->cpu_file; | 1587 | int cpu_file = iter->cpu_file; |
| 1572 | u64 next_ts = 0, ts; | 1588 | u64 next_ts = 0, ts; |
| 1573 | int next_cpu = -1; | 1589 | int next_cpu = -1; |
| @@ -1580,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
| 1580 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 1596 | if (cpu_file > TRACE_PIPE_ALL_CPU) { |
| 1581 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 1597 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
| 1582 | return NULL; | 1598 | return NULL; |
| 1583 | ent = peek_next_entry(iter, cpu_file, ent_ts); | 1599 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); |
| 1584 | if (ent_cpu) | 1600 | if (ent_cpu) |
| 1585 | *ent_cpu = cpu_file; | 1601 | *ent_cpu = cpu_file; |
| 1586 | 1602 | ||
| @@ -1592,7 +1608,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
| 1592 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1608 | if (ring_buffer_empty_cpu(buffer, cpu)) |
| 1593 | continue; | 1609 | continue; |
| 1594 | 1610 | ||
| 1595 | ent = peek_next_entry(iter, cpu, &ts); | 1611 | ent = peek_next_entry(iter, cpu, &ts, &lost_events); |
| 1596 | 1612 | ||
| 1597 | /* | 1613 | /* |
| 1598 | * Pick the entry with the smallest timestamp: | 1614 | * Pick the entry with the smallest timestamp: |
| @@ -1601,6 +1617,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
| 1601 | next = ent; | 1617 | next = ent; |
| 1602 | next_cpu = cpu; | 1618 | next_cpu = cpu; |
| 1603 | next_ts = ts; | 1619 | next_ts = ts; |
| 1620 | next_lost = lost_events; | ||
| 1604 | } | 1621 | } |
| 1605 | } | 1622 | } |
| 1606 | 1623 | ||
| @@ -1610,6 +1627,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
| 1610 | if (ent_ts) | 1627 | if (ent_ts) |
| 1611 | *ent_ts = next_ts; | 1628 | *ent_ts = next_ts; |
| 1612 | 1629 | ||
| 1630 | if (missing_events) | ||
| 1631 | *missing_events = next_lost; | ||
| 1632 | |||
| 1613 | return next; | 1633 | return next; |
| 1614 | } | 1634 | } |
| 1615 | 1635 | ||
| @@ -1617,13 +1637,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
| 1617 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 1637 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
| 1618 | int *ent_cpu, u64 *ent_ts) | 1638 | int *ent_cpu, u64 *ent_ts) |
| 1619 | { | 1639 | { |
| 1620 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1640 | return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
| 1621 | } | 1641 | } |
| 1622 | 1642 | ||
| 1623 | /* Find the next real entry, and increment the iterator to the next entry */ | 1643 | /* Find the next real entry, and increment the iterator to the next entry */ |
| 1624 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1644 | static void *find_next_entry_inc(struct trace_iterator *iter) |
| 1625 | { | 1645 | { |
| 1626 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1646 | iter->ent = __find_next_entry(iter, &iter->cpu, |
| 1647 | &iter->lost_events, &iter->ts); | ||
| 1627 | 1648 | ||
| 1628 | if (iter->ent) | 1649 | if (iter->ent) |
| 1629 | trace_iterator_increment(iter); | 1650 | trace_iterator_increment(iter); |
| @@ -1635,7 +1656,8 @@ static void trace_consume(struct trace_iterator *iter) | |||
| 1635 | { | 1656 | { |
| 1636 | /* Don't allow ftrace to trace into the ring buffers */ | 1657 | /* Don't allow ftrace to trace into the ring buffers */ |
| 1637 | ftrace_disable_cpu(); | 1658 | ftrace_disable_cpu(); |
| 1638 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); | 1659 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
| 1660 | &iter->lost_events); | ||
| 1639 | ftrace_enable_cpu(); | 1661 | ftrace_enable_cpu(); |
| 1640 | } | 1662 | } |
| 1641 | 1663 | ||
| @@ -1786,7 +1808,7 @@ static void print_func_help_header(struct seq_file *m) | |||
| 1786 | } | 1808 | } |
| 1787 | 1809 | ||
| 1788 | 1810 | ||
| 1789 | static void | 1811 | void |
| 1790 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 1812 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
| 1791 | { | 1813 | { |
| 1792 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1814 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
| @@ -1995,7 +2017,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
| 1995 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; | 2017 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; |
| 1996 | } | 2018 | } |
| 1997 | 2019 | ||
| 1998 | static int trace_empty(struct trace_iterator *iter) | 2020 | int trace_empty(struct trace_iterator *iter) |
| 1999 | { | 2021 | { |
| 2000 | int cpu; | 2022 | int cpu; |
| 2001 | 2023 | ||
| @@ -2030,6 +2052,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 2030 | { | 2052 | { |
| 2031 | enum print_line_t ret; | 2053 | enum print_line_t ret; |
| 2032 | 2054 | ||
| 2055 | if (iter->lost_events) | ||
| 2056 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | ||
| 2057 | iter->cpu, iter->lost_events); | ||
| 2058 | |||
| 2033 | if (iter->trace && iter->trace->print_line) { | 2059 | if (iter->trace && iter->trace->print_line) { |
| 2034 | ret = iter->trace->print_line(iter); | 2060 | ret = iter->trace->print_line(iter); |
| 2035 | if (ret != TRACE_TYPE_UNHANDLED) | 2061 | if (ret != TRACE_TYPE_UNHANDLED) |
| @@ -2058,6 +2084,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 2058 | return print_trace_fmt(iter); | 2084 | return print_trace_fmt(iter); |
| 2059 | } | 2085 | } |
| 2060 | 2086 | ||
| 2087 | void trace_default_header(struct seq_file *m) | ||
| 2088 | { | ||
| 2089 | struct trace_iterator *iter = m->private; | ||
| 2090 | |||
| 2091 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
| 2092 | /* print nothing if the buffers are empty */ | ||
| 2093 | if (trace_empty(iter)) | ||
| 2094 | return; | ||
| 2095 | print_trace_header(m, iter); | ||
| 2096 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
| 2097 | print_lat_help_header(m); | ||
| 2098 | } else { | ||
| 2099 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
| 2100 | print_func_help_header(m); | ||
| 2101 | } | ||
| 2102 | } | ||
| 2103 | |||
| 2061 | static int s_show(struct seq_file *m, void *v) | 2104 | static int s_show(struct seq_file *m, void *v) |
| 2062 | { | 2105 | { |
| 2063 | struct trace_iterator *iter = v; | 2106 | struct trace_iterator *iter = v; |
| @@ -2070,17 +2113,9 @@ static int s_show(struct seq_file *m, void *v) | |||
| 2070 | } | 2113 | } |
| 2071 | if (iter->trace && iter->trace->print_header) | 2114 | if (iter->trace && iter->trace->print_header) |
| 2072 | iter->trace->print_header(m); | 2115 | iter->trace->print_header(m); |
| 2073 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2116 | else |
| 2074 | /* print nothing if the buffers are empty */ | 2117 | trace_default_header(m); |
| 2075 | if (trace_empty(iter)) | 2118 | |
| 2076 | return 0; | ||
| 2077 | print_trace_header(m, iter); | ||
| 2078 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
| 2079 | print_lat_help_header(m); | ||
| 2080 | } else { | ||
| 2081 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
| 2082 | print_func_help_header(m); | ||
| 2083 | } | ||
| 2084 | } else if (iter->leftover) { | 2119 | } else if (iter->leftover) { |
| 2085 | /* | 2120 | /* |
| 2086 | * If we filled the seq_file buffer earlier, we | 2121 | * If we filled the seq_file buffer earlier, we |
| @@ -2166,15 +2201,20 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 2166 | 2201 | ||
| 2167 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2202 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
| 2168 | for_each_tracing_cpu(cpu) { | 2203 | for_each_tracing_cpu(cpu) { |
| 2169 | |||
| 2170 | iter->buffer_iter[cpu] = | 2204 | iter->buffer_iter[cpu] = |
| 2171 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2205 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
| 2206 | } | ||
| 2207 | ring_buffer_read_prepare_sync(); | ||
| 2208 | for_each_tracing_cpu(cpu) { | ||
| 2209 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
| 2172 | tracing_iter_reset(iter, cpu); | 2210 | tracing_iter_reset(iter, cpu); |
| 2173 | } | 2211 | } |
| 2174 | } else { | 2212 | } else { |
| 2175 | cpu = iter->cpu_file; | 2213 | cpu = iter->cpu_file; |
| 2176 | iter->buffer_iter[cpu] = | 2214 | iter->buffer_iter[cpu] = |
| 2177 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2215 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
| 2216 | ring_buffer_read_prepare_sync(); | ||
| 2217 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
| 2178 | tracing_iter_reset(iter, cpu); | 2218 | tracing_iter_reset(iter, cpu); |
| 2179 | } | 2219 | } |
| 2180 | 2220 | ||
| @@ -4324,7 +4364,7 @@ static int trace_panic_handler(struct notifier_block *this, | |||
| 4324 | unsigned long event, void *unused) | 4364 | unsigned long event, void *unused) |
| 4325 | { | 4365 | { |
| 4326 | if (ftrace_dump_on_oops) | 4366 | if (ftrace_dump_on_oops) |
| 4327 | ftrace_dump(); | 4367 | ftrace_dump(ftrace_dump_on_oops); |
| 4328 | return NOTIFY_OK; | 4368 | return NOTIFY_OK; |
| 4329 | } | 4369 | } |
| 4330 | 4370 | ||
| @@ -4341,7 +4381,7 @@ static int trace_die_handler(struct notifier_block *self, | |||
| 4341 | switch (val) { | 4381 | switch (val) { |
| 4342 | case DIE_OOPS: | 4382 | case DIE_OOPS: |
| 4343 | if (ftrace_dump_on_oops) | 4383 | if (ftrace_dump_on_oops) |
| 4344 | ftrace_dump(); | 4384 | ftrace_dump(ftrace_dump_on_oops); |
| 4345 | break; | 4385 | break; |
| 4346 | default: | 4386 | default: |
| 4347 | break; | 4387 | break; |
| @@ -4382,7 +4422,8 @@ trace_printk_seq(struct trace_seq *s) | |||
| 4382 | trace_seq_init(s); | 4422 | trace_seq_init(s); |
| 4383 | } | 4423 | } |
| 4384 | 4424 | ||
| 4385 | static void __ftrace_dump(bool disable_tracing) | 4425 | static void |
| 4426 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | ||
| 4386 | { | 4427 | { |
| 4387 | static arch_spinlock_t ftrace_dump_lock = | 4428 | static arch_spinlock_t ftrace_dump_lock = |
| 4388 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 4429 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| @@ -4415,12 +4456,25 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4415 | /* don't look at user memory in panic mode */ | 4456 | /* don't look at user memory in panic mode */ |
| 4416 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4457 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
| 4417 | 4458 | ||
| 4418 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
| 4419 | |||
| 4420 | /* Simulate the iterator */ | 4459 | /* Simulate the iterator */ |
| 4421 | iter.tr = &global_trace; | 4460 | iter.tr = &global_trace; |
| 4422 | iter.trace = current_trace; | 4461 | iter.trace = current_trace; |
| 4423 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 4462 | |
| 4463 | switch (oops_dump_mode) { | ||
| 4464 | case DUMP_ALL: | ||
| 4465 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
| 4466 | break; | ||
| 4467 | case DUMP_ORIG: | ||
| 4468 | iter.cpu_file = raw_smp_processor_id(); | ||
| 4469 | break; | ||
| 4470 | case DUMP_NONE: | ||
| 4471 | goto out_enable; | ||
| 4472 | default: | ||
| 4473 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | ||
| 4474 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
| 4475 | } | ||
| 4476 | |||
| 4477 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
| 4424 | 4478 | ||
| 4425 | /* | 4479 | /* |
| 4426 | * We need to stop all tracing on all CPUS to read the | 4480 | * We need to stop all tracing on all CPUS to read the |
| @@ -4459,6 +4513,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4459 | else | 4513 | else |
| 4460 | printk(KERN_TRACE "---------------------------------\n"); | 4514 | printk(KERN_TRACE "---------------------------------\n"); |
| 4461 | 4515 | ||
| 4516 | out_enable: | ||
| 4462 | /* Re-enable tracing if requested */ | 4517 | /* Re-enable tracing if requested */ |
| 4463 | if (!disable_tracing) { | 4518 | if (!disable_tracing) { |
| 4464 | trace_flags |= old_userobj; | 4519 | trace_flags |= old_userobj; |
| @@ -4475,9 +4530,9 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4475 | } | 4530 | } |
| 4476 | 4531 | ||
| 4477 | /* By default: disable tracing after the dump */ | 4532 | /* By default: disable tracing after the dump */ |
| 4478 | void ftrace_dump(void) | 4533 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
| 4479 | { | 4534 | { |
| 4480 | __ftrace_dump(true); | 4535 | __ftrace_dump(true, oops_dump_mode); |
| 4481 | } | 4536 | } |
| 4482 | 4537 | ||
| 4483 | __init static int tracer_alloc_buffers(void) | 4538 | __init static int tracer_alloc_buffers(void) |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2825ef2c0b15..d1ce0bec1b3f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -34,7 +34,6 @@ enum trace_type { | |||
| 34 | TRACE_GRAPH_RET, | 34 | TRACE_GRAPH_RET, |
| 35 | TRACE_GRAPH_ENT, | 35 | TRACE_GRAPH_ENT, |
| 36 | TRACE_USER_STACK, | 36 | TRACE_USER_STACK, |
| 37 | TRACE_HW_BRANCHES, | ||
| 38 | TRACE_KMEM_ALLOC, | 37 | TRACE_KMEM_ALLOC, |
| 39 | TRACE_KMEM_FREE, | 38 | TRACE_KMEM_FREE, |
| 40 | TRACE_BLK, | 39 | TRACE_BLK, |
| @@ -103,29 +102,17 @@ struct syscall_trace_exit { | |||
| 103 | long ret; | 102 | long ret; |
| 104 | }; | 103 | }; |
| 105 | 104 | ||
| 106 | struct kprobe_trace_entry { | 105 | struct kprobe_trace_entry_head { |
| 107 | struct trace_entry ent; | 106 | struct trace_entry ent; |
| 108 | unsigned long ip; | 107 | unsigned long ip; |
| 109 | int nargs; | ||
| 110 | unsigned long args[]; | ||
| 111 | }; | 108 | }; |
| 112 | 109 | ||
| 113 | #define SIZEOF_KPROBE_TRACE_ENTRY(n) \ | 110 | struct kretprobe_trace_entry_head { |
| 114 | (offsetof(struct kprobe_trace_entry, args) + \ | ||
| 115 | (sizeof(unsigned long) * (n))) | ||
| 116 | |||
| 117 | struct kretprobe_trace_entry { | ||
| 118 | struct trace_entry ent; | 111 | struct trace_entry ent; |
| 119 | unsigned long func; | 112 | unsigned long func; |
| 120 | unsigned long ret_ip; | 113 | unsigned long ret_ip; |
| 121 | int nargs; | ||
| 122 | unsigned long args[]; | ||
| 123 | }; | 114 | }; |
| 124 | 115 | ||
| 125 | #define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \ | ||
| 126 | (offsetof(struct kretprobe_trace_entry, args) + \ | ||
| 127 | (sizeof(unsigned long) * (n))) | ||
| 128 | |||
| 129 | /* | 116 | /* |
| 130 | * trace_flag_type is an enumeration that holds different | 117 | * trace_flag_type is an enumeration that holds different |
| 131 | * states when a trace occurs. These are: | 118 | * states when a trace occurs. These are: |
| @@ -229,7 +216,6 @@ extern void __ftrace_bad_type(void); | |||
| 229 | TRACE_GRAPH_ENT); \ | 216 | TRACE_GRAPH_ENT); \ |
| 230 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | 217 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
| 231 | TRACE_GRAPH_RET); \ | 218 | TRACE_GRAPH_RET); \ |
| 232 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | ||
| 233 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | 219 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ |
| 234 | TRACE_KMEM_ALLOC); \ | 220 | TRACE_KMEM_ALLOC); \ |
| 235 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | 221 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ |
| @@ -378,6 +364,9 @@ void trace_function(struct trace_array *tr, | |||
| 378 | unsigned long ip, | 364 | unsigned long ip, |
| 379 | unsigned long parent_ip, | 365 | unsigned long parent_ip, |
| 380 | unsigned long flags, int pc); | 366 | unsigned long flags, int pc); |
| 367 | void trace_default_header(struct seq_file *m); | ||
| 368 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); | ||
| 369 | int trace_empty(struct trace_iterator *iter); | ||
| 381 | 370 | ||
| 382 | void trace_graph_return(struct ftrace_graph_ret *trace); | 371 | void trace_graph_return(struct ftrace_graph_ret *trace); |
| 383 | int trace_graph_entry(struct ftrace_graph_ent *trace); | 372 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
| @@ -467,8 +456,6 @@ extern int trace_selftest_startup_sysprof(struct tracer *trace, | |||
| 467 | struct trace_array *tr); | 456 | struct trace_array *tr); |
| 468 | extern int trace_selftest_startup_branch(struct tracer *trace, | 457 | extern int trace_selftest_startup_branch(struct tracer *trace, |
| 469 | struct trace_array *tr); | 458 | struct trace_array *tr); |
| 470 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, | ||
| 471 | struct trace_array *tr); | ||
| 472 | extern int trace_selftest_startup_ksym(struct tracer *trace, | 459 | extern int trace_selftest_startup_ksym(struct tracer *trace, |
| 473 | struct trace_array *tr); | 460 | struct trace_array *tr); |
| 474 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 461 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
| @@ -491,9 +478,29 @@ extern int trace_clock_id; | |||
| 491 | 478 | ||
| 492 | /* Standard output formatting function used for function return traces */ | 479 | /* Standard output formatting function used for function return traces */ |
| 493 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 480 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 494 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | 481 | |
| 482 | /* Flag options */ | ||
| 483 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | ||
| 484 | #define TRACE_GRAPH_PRINT_CPU 0x2 | ||
| 485 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | ||
| 486 | #define TRACE_GRAPH_PRINT_PROC 0x8 | ||
| 487 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | ||
| 488 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | ||
| 489 | |||
| 490 | extern enum print_line_t | ||
| 491 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | ||
| 492 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); | ||
| 495 | extern enum print_line_t | 493 | extern enum print_line_t |
| 496 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | 494 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
| 495 | extern void graph_trace_open(struct trace_iterator *iter); | ||
| 496 | extern void graph_trace_close(struct trace_iterator *iter); | ||
| 497 | extern int __trace_graph_entry(struct trace_array *tr, | ||
| 498 | struct ftrace_graph_ent *trace, | ||
| 499 | unsigned long flags, int pc); | ||
| 500 | extern void __trace_graph_return(struct trace_array *tr, | ||
| 501 | struct ftrace_graph_ret *trace, | ||
| 502 | unsigned long flags, int pc); | ||
| 503 | |||
| 497 | 504 | ||
| 498 | #ifdef CONFIG_DYNAMIC_FTRACE | 505 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 499 | /* TODO: make this variable */ | 506 | /* TODO: make this variable */ |
| @@ -524,7 +531,7 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
| 524 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 531 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 525 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | 532 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 526 | static inline enum print_line_t | 533 | static inline enum print_line_t |
| 527 | print_graph_function(struct trace_iterator *iter) | 534 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
| 528 | { | 535 | { |
| 529 | return TRACE_TYPE_UNHANDLED; | 536 | return TRACE_TYPE_UNHANDLED; |
| 530 | } | 537 | } |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index c16a08f399df..dc008c1240da 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
| @@ -318,18 +318,6 @@ FTRACE_ENTRY(branch, trace_branch, | |||
| 318 | __entry->func, __entry->file, __entry->correct) | 318 | __entry->func, __entry->file, __entry->correct) |
| 319 | ); | 319 | ); |
| 320 | 320 | ||
| 321 | FTRACE_ENTRY(hw_branch, hw_branch_entry, | ||
| 322 | |||
| 323 | TRACE_HW_BRANCHES, | ||
| 324 | |||
| 325 | F_STRUCT( | ||
| 326 | __field( u64, from ) | ||
| 327 | __field( u64, to ) | ||
| 328 | ), | ||
| 329 | |||
| 330 | F_printk("from: %llx to: %llx", __entry->from, __entry->to) | ||
| 331 | ); | ||
| 332 | |||
| 333 | FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, | 321 | FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, |
| 334 | 322 | ||
| 335 | TRACE_KMEM_ALLOC, | 323 | TRACE_KMEM_ALLOC, |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 88c0b6dbd7fe..58092d844a1f 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -1398,7 +1398,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id, | |||
| 1398 | } | 1398 | } |
| 1399 | 1399 | ||
| 1400 | err = -EINVAL; | 1400 | err = -EINVAL; |
| 1401 | if (!call) | 1401 | if (&call->list == &ftrace_events) |
| 1402 | goto out_unlock; | 1402 | goto out_unlock; |
| 1403 | 1403 | ||
| 1404 | err = -EEXIST; | 1404 | err = -EEXIST; |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 9aed1a5cf553..dd11c830eb84 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -40,7 +40,7 @@ struct fgraph_data { | |||
| 40 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 40 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
| 41 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 41 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
| 42 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 42 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
| 43 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | 43 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
| 44 | 44 | ||
| 45 | static struct tracer_opt trace_opts[] = { | 45 | static struct tracer_opt trace_opts[] = { |
| 46 | /* Display overruns? (for self-debug purpose) */ | 46 | /* Display overruns? (for self-debug purpose) */ |
| @@ -179,7 +179,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |||
| 179 | return ret; | 179 | return ret; |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static int __trace_graph_entry(struct trace_array *tr, | 182 | int __trace_graph_entry(struct trace_array *tr, |
| 183 | struct ftrace_graph_ent *trace, | 183 | struct ftrace_graph_ent *trace, |
| 184 | unsigned long flags, | 184 | unsigned long flags, |
| 185 | int pc) | 185 | int pc) |
| @@ -246,7 +246,7 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | |||
| 246 | return trace_graph_entry(trace); | 246 | return trace_graph_entry(trace); |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | static void __trace_graph_return(struct trace_array *tr, | 249 | void __trace_graph_return(struct trace_array *tr, |
| 250 | struct ftrace_graph_ret *trace, | 250 | struct ftrace_graph_ret *trace, |
| 251 | unsigned long flags, | 251 | unsigned long flags, |
| 252 | int pc) | 252 | int pc) |
| @@ -490,9 +490,10 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
| 490 | * We need to consume the current entry to see | 490 | * We need to consume the current entry to see |
| 491 | * the next one. | 491 | * the next one. |
| 492 | */ | 492 | */ |
| 493 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | 493 | ring_buffer_consume(iter->tr->buffer, iter->cpu, |
| 494 | NULL, NULL); | ||
| 494 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 495 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, |
| 495 | NULL); | 496 | NULL, NULL); |
| 496 | } | 497 | } |
| 497 | 498 | ||
| 498 | if (!event) | 499 | if (!event) |
| @@ -526,17 +527,18 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
| 526 | 527 | ||
| 527 | /* Signal a overhead of time execution to the output */ | 528 | /* Signal a overhead of time execution to the output */ |
| 528 | static int | 529 | static int |
| 529 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | 530 | print_graph_overhead(unsigned long long duration, struct trace_seq *s, |
| 531 | u32 flags) | ||
| 530 | { | 532 | { |
| 531 | /* If duration disappear, we don't need anything */ | 533 | /* If duration disappear, we don't need anything */ |
| 532 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | 534 | if (!(flags & TRACE_GRAPH_PRINT_DURATION)) |
| 533 | return 1; | 535 | return 1; |
| 534 | 536 | ||
| 535 | /* Non nested entry or return */ | 537 | /* Non nested entry or return */ |
| 536 | if (duration == -1) | 538 | if (duration == -1) |
| 537 | return trace_seq_printf(s, " "); | 539 | return trace_seq_printf(s, " "); |
| 538 | 540 | ||
| 539 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 541 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { |
| 540 | /* Duration exceeded 100 msecs */ | 542 | /* Duration exceeded 100 msecs */ |
| 541 | if (duration > 100000ULL) | 543 | if (duration > 100000ULL) |
| 542 | return trace_seq_printf(s, "! "); | 544 | return trace_seq_printf(s, "! "); |
| @@ -562,7 +564,7 @@ static int print_graph_abs_time(u64 t, struct trace_seq *s) | |||
| 562 | 564 | ||
| 563 | static enum print_line_t | 565 | static enum print_line_t |
| 564 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, | 566 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
| 565 | enum trace_type type, int cpu, pid_t pid) | 567 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
| 566 | { | 568 | { |
| 567 | int ret; | 569 | int ret; |
| 568 | struct trace_seq *s = &iter->seq; | 570 | struct trace_seq *s = &iter->seq; |
| @@ -572,21 +574,21 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
| 572 | return TRACE_TYPE_UNHANDLED; | 574 | return TRACE_TYPE_UNHANDLED; |
| 573 | 575 | ||
| 574 | /* Absolute time */ | 576 | /* Absolute time */ |
| 575 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 577 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
| 576 | ret = print_graph_abs_time(iter->ts, s); | 578 | ret = print_graph_abs_time(iter->ts, s); |
| 577 | if (!ret) | 579 | if (!ret) |
| 578 | return TRACE_TYPE_PARTIAL_LINE; | 580 | return TRACE_TYPE_PARTIAL_LINE; |
| 579 | } | 581 | } |
| 580 | 582 | ||
| 581 | /* Cpu */ | 583 | /* Cpu */ |
| 582 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 584 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
| 583 | ret = print_graph_cpu(s, cpu); | 585 | ret = print_graph_cpu(s, cpu); |
| 584 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 586 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 585 | return TRACE_TYPE_PARTIAL_LINE; | 587 | return TRACE_TYPE_PARTIAL_LINE; |
| 586 | } | 588 | } |
| 587 | 589 | ||
| 588 | /* Proc */ | 590 | /* Proc */ |
| 589 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 591 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
| 590 | ret = print_graph_proc(s, pid); | 592 | ret = print_graph_proc(s, pid); |
| 591 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 593 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 592 | return TRACE_TYPE_PARTIAL_LINE; | 594 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -596,7 +598,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
| 596 | } | 598 | } |
| 597 | 599 | ||
| 598 | /* No overhead */ | 600 | /* No overhead */ |
| 599 | ret = print_graph_overhead(-1, s); | 601 | ret = print_graph_overhead(-1, s, flags); |
| 600 | if (!ret) | 602 | if (!ret) |
| 601 | return TRACE_TYPE_PARTIAL_LINE; | 603 | return TRACE_TYPE_PARTIAL_LINE; |
| 602 | 604 | ||
| @@ -609,7 +611,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
| 609 | return TRACE_TYPE_PARTIAL_LINE; | 611 | return TRACE_TYPE_PARTIAL_LINE; |
| 610 | 612 | ||
| 611 | /* Don't close the duration column if haven't one */ | 613 | /* Don't close the duration column if haven't one */ |
| 612 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 614 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
| 613 | trace_seq_printf(s, " |"); | 615 | trace_seq_printf(s, " |"); |
| 614 | ret = trace_seq_printf(s, "\n"); | 616 | ret = trace_seq_printf(s, "\n"); |
| 615 | 617 | ||
| @@ -679,7 +681,8 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
| 679 | static enum print_line_t | 681 | static enum print_line_t |
| 680 | print_graph_entry_leaf(struct trace_iterator *iter, | 682 | print_graph_entry_leaf(struct trace_iterator *iter, |
| 681 | struct ftrace_graph_ent_entry *entry, | 683 | struct ftrace_graph_ent_entry *entry, |
| 682 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | 684 | struct ftrace_graph_ret_entry *ret_entry, |
| 685 | struct trace_seq *s, u32 flags) | ||
| 683 | { | 686 | { |
| 684 | struct fgraph_data *data = iter->private; | 687 | struct fgraph_data *data = iter->private; |
| 685 | struct ftrace_graph_ret *graph_ret; | 688 | struct ftrace_graph_ret *graph_ret; |
| @@ -711,12 +714,12 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 711 | } | 714 | } |
| 712 | 715 | ||
| 713 | /* Overhead */ | 716 | /* Overhead */ |
| 714 | ret = print_graph_overhead(duration, s); | 717 | ret = print_graph_overhead(duration, s, flags); |
| 715 | if (!ret) | 718 | if (!ret) |
| 716 | return TRACE_TYPE_PARTIAL_LINE; | 719 | return TRACE_TYPE_PARTIAL_LINE; |
| 717 | 720 | ||
| 718 | /* Duration */ | 721 | /* Duration */ |
| 719 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 722 | if (flags & TRACE_GRAPH_PRINT_DURATION) { |
| 720 | ret = print_graph_duration(duration, s); | 723 | ret = print_graph_duration(duration, s); |
| 721 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 724 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 722 | return TRACE_TYPE_PARTIAL_LINE; | 725 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -739,7 +742,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 739 | static enum print_line_t | 742 | static enum print_line_t |
| 740 | print_graph_entry_nested(struct trace_iterator *iter, | 743 | print_graph_entry_nested(struct trace_iterator *iter, |
| 741 | struct ftrace_graph_ent_entry *entry, | 744 | struct ftrace_graph_ent_entry *entry, |
| 742 | struct trace_seq *s, int cpu) | 745 | struct trace_seq *s, int cpu, u32 flags) |
| 743 | { | 746 | { |
| 744 | struct ftrace_graph_ent *call = &entry->graph_ent; | 747 | struct ftrace_graph_ent *call = &entry->graph_ent; |
| 745 | struct fgraph_data *data = iter->private; | 748 | struct fgraph_data *data = iter->private; |
| @@ -759,12 +762,12 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
| 759 | } | 762 | } |
| 760 | 763 | ||
| 761 | /* No overhead */ | 764 | /* No overhead */ |
| 762 | ret = print_graph_overhead(-1, s); | 765 | ret = print_graph_overhead(-1, s, flags); |
| 763 | if (!ret) | 766 | if (!ret) |
| 764 | return TRACE_TYPE_PARTIAL_LINE; | 767 | return TRACE_TYPE_PARTIAL_LINE; |
| 765 | 768 | ||
| 766 | /* No time */ | 769 | /* No time */ |
| 767 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 770 | if (flags & TRACE_GRAPH_PRINT_DURATION) { |
| 768 | ret = trace_seq_printf(s, " | "); | 771 | ret = trace_seq_printf(s, " | "); |
| 769 | if (!ret) | 772 | if (!ret) |
| 770 | return TRACE_TYPE_PARTIAL_LINE; | 773 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -790,7 +793,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
| 790 | 793 | ||
| 791 | static enum print_line_t | 794 | static enum print_line_t |
| 792 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | 795 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
| 793 | int type, unsigned long addr) | 796 | int type, unsigned long addr, u32 flags) |
| 794 | { | 797 | { |
| 795 | struct fgraph_data *data = iter->private; | 798 | struct fgraph_data *data = iter->private; |
| 796 | struct trace_entry *ent = iter->ent; | 799 | struct trace_entry *ent = iter->ent; |
| @@ -803,27 +806,27 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
| 803 | 806 | ||
| 804 | if (type) { | 807 | if (type) { |
| 805 | /* Interrupt */ | 808 | /* Interrupt */ |
| 806 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid); | 809 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
| 807 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 810 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 808 | return TRACE_TYPE_PARTIAL_LINE; | 811 | return TRACE_TYPE_PARTIAL_LINE; |
| 809 | } | 812 | } |
| 810 | 813 | ||
| 811 | /* Absolute time */ | 814 | /* Absolute time */ |
| 812 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 815 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
| 813 | ret = print_graph_abs_time(iter->ts, s); | 816 | ret = print_graph_abs_time(iter->ts, s); |
| 814 | if (!ret) | 817 | if (!ret) |
| 815 | return TRACE_TYPE_PARTIAL_LINE; | 818 | return TRACE_TYPE_PARTIAL_LINE; |
| 816 | } | 819 | } |
| 817 | 820 | ||
| 818 | /* Cpu */ | 821 | /* Cpu */ |
| 819 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 822 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
| 820 | ret = print_graph_cpu(s, cpu); | 823 | ret = print_graph_cpu(s, cpu); |
| 821 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 824 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 822 | return TRACE_TYPE_PARTIAL_LINE; | 825 | return TRACE_TYPE_PARTIAL_LINE; |
| 823 | } | 826 | } |
| 824 | 827 | ||
| 825 | /* Proc */ | 828 | /* Proc */ |
| 826 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 829 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
| 827 | ret = print_graph_proc(s, ent->pid); | 830 | ret = print_graph_proc(s, ent->pid); |
| 828 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 831 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 829 | return TRACE_TYPE_PARTIAL_LINE; | 832 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -845,7 +848,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
| 845 | 848 | ||
| 846 | static enum print_line_t | 849 | static enum print_line_t |
| 847 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 850 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
| 848 | struct trace_iterator *iter) | 851 | struct trace_iterator *iter, u32 flags) |
| 849 | { | 852 | { |
| 850 | struct fgraph_data *data = iter->private; | 853 | struct fgraph_data *data = iter->private; |
| 851 | struct ftrace_graph_ent *call = &field->graph_ent; | 854 | struct ftrace_graph_ent *call = &field->graph_ent; |
| @@ -853,14 +856,14 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
| 853 | static enum print_line_t ret; | 856 | static enum print_line_t ret; |
| 854 | int cpu = iter->cpu; | 857 | int cpu = iter->cpu; |
| 855 | 858 | ||
| 856 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) | 859 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) |
| 857 | return TRACE_TYPE_PARTIAL_LINE; | 860 | return TRACE_TYPE_PARTIAL_LINE; |
| 858 | 861 | ||
| 859 | leaf_ret = get_return_for_leaf(iter, field); | 862 | leaf_ret = get_return_for_leaf(iter, field); |
| 860 | if (leaf_ret) | 863 | if (leaf_ret) |
| 861 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s); | 864 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
| 862 | else | 865 | else |
| 863 | ret = print_graph_entry_nested(iter, field, s, cpu); | 866 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
| 864 | 867 | ||
| 865 | if (data) { | 868 | if (data) { |
| 866 | /* | 869 | /* |
| @@ -879,7 +882,8 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
| 879 | 882 | ||
| 880 | static enum print_line_t | 883 | static enum print_line_t |
| 881 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 884 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
| 882 | struct trace_entry *ent, struct trace_iterator *iter) | 885 | struct trace_entry *ent, struct trace_iterator *iter, |
| 886 | u32 flags) | ||
| 883 | { | 887 | { |
| 884 | unsigned long long duration = trace->rettime - trace->calltime; | 888 | unsigned long long duration = trace->rettime - trace->calltime; |
| 885 | struct fgraph_data *data = iter->private; | 889 | struct fgraph_data *data = iter->private; |
| @@ -909,16 +913,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 909 | } | 913 | } |
| 910 | } | 914 | } |
| 911 | 915 | ||
| 912 | if (print_graph_prologue(iter, s, 0, 0)) | 916 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
| 913 | return TRACE_TYPE_PARTIAL_LINE; | 917 | return TRACE_TYPE_PARTIAL_LINE; |
| 914 | 918 | ||
| 915 | /* Overhead */ | 919 | /* Overhead */ |
| 916 | ret = print_graph_overhead(duration, s); | 920 | ret = print_graph_overhead(duration, s, flags); |
| 917 | if (!ret) | 921 | if (!ret) |
| 918 | return TRACE_TYPE_PARTIAL_LINE; | 922 | return TRACE_TYPE_PARTIAL_LINE; |
| 919 | 923 | ||
| 920 | /* Duration */ | 924 | /* Duration */ |
| 921 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 925 | if (flags & TRACE_GRAPH_PRINT_DURATION) { |
| 922 | ret = print_graph_duration(duration, s); | 926 | ret = print_graph_duration(duration, s); |
| 923 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 927 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 924 | return TRACE_TYPE_PARTIAL_LINE; | 928 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -948,14 +952,15 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 948 | } | 952 | } |
| 949 | 953 | ||
| 950 | /* Overrun */ | 954 | /* Overrun */ |
| 951 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | 955 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { |
| 952 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 956 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", |
| 953 | trace->overrun); | 957 | trace->overrun); |
| 954 | if (!ret) | 958 | if (!ret) |
| 955 | return TRACE_TYPE_PARTIAL_LINE; | 959 | return TRACE_TYPE_PARTIAL_LINE; |
| 956 | } | 960 | } |
| 957 | 961 | ||
| 958 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); | 962 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
| 963 | cpu, pid, flags); | ||
| 959 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 964 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 960 | return TRACE_TYPE_PARTIAL_LINE; | 965 | return TRACE_TYPE_PARTIAL_LINE; |
| 961 | 966 | ||
| @@ -963,8 +968,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 963 | } | 968 | } |
| 964 | 969 | ||
| 965 | static enum print_line_t | 970 | static enum print_line_t |
| 966 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | 971 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
| 967 | struct trace_iterator *iter) | 972 | struct trace_iterator *iter, u32 flags) |
| 968 | { | 973 | { |
| 969 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 974 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
| 970 | struct fgraph_data *data = iter->private; | 975 | struct fgraph_data *data = iter->private; |
| @@ -976,16 +981,16 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
| 976 | if (data) | 981 | if (data) |
| 977 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; | 982 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
| 978 | 983 | ||
| 979 | if (print_graph_prologue(iter, s, 0, 0)) | 984 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
| 980 | return TRACE_TYPE_PARTIAL_LINE; | 985 | return TRACE_TYPE_PARTIAL_LINE; |
| 981 | 986 | ||
| 982 | /* No overhead */ | 987 | /* No overhead */ |
| 983 | ret = print_graph_overhead(-1, s); | 988 | ret = print_graph_overhead(-1, s, flags); |
| 984 | if (!ret) | 989 | if (!ret) |
| 985 | return TRACE_TYPE_PARTIAL_LINE; | 990 | return TRACE_TYPE_PARTIAL_LINE; |
| 986 | 991 | ||
| 987 | /* No time */ | 992 | /* No time */ |
| 988 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 993 | if (flags & TRACE_GRAPH_PRINT_DURATION) { |
| 989 | ret = trace_seq_printf(s, " | "); | 994 | ret = trace_seq_printf(s, " | "); |
| 990 | if (!ret) | 995 | if (!ret) |
| 991 | return TRACE_TYPE_PARTIAL_LINE; | 996 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -1040,7 +1045,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
| 1040 | 1045 | ||
| 1041 | 1046 | ||
| 1042 | enum print_line_t | 1047 | enum print_line_t |
| 1043 | print_graph_function(struct trace_iterator *iter) | 1048 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
| 1044 | { | 1049 | { |
| 1045 | struct ftrace_graph_ent_entry *field; | 1050 | struct ftrace_graph_ent_entry *field; |
| 1046 | struct fgraph_data *data = iter->private; | 1051 | struct fgraph_data *data = iter->private; |
| @@ -1061,7 +1066,7 @@ print_graph_function(struct trace_iterator *iter) | |||
| 1061 | if (data && data->failed) { | 1066 | if (data && data->failed) { |
| 1062 | field = &data->ent; | 1067 | field = &data->ent; |
| 1063 | iter->cpu = data->cpu; | 1068 | iter->cpu = data->cpu; |
| 1064 | ret = print_graph_entry(field, s, iter); | 1069 | ret = print_graph_entry(field, s, iter, flags); |
| 1065 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { | 1070 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
| 1066 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | 1071 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; |
| 1067 | ret = TRACE_TYPE_NO_CONSUME; | 1072 | ret = TRACE_TYPE_NO_CONSUME; |
| @@ -1081,32 +1086,49 @@ print_graph_function(struct trace_iterator *iter) | |||
| 1081 | struct ftrace_graph_ent_entry saved; | 1086 | struct ftrace_graph_ent_entry saved; |
| 1082 | trace_assign_type(field, entry); | 1087 | trace_assign_type(field, entry); |
| 1083 | saved = *field; | 1088 | saved = *field; |
| 1084 | return print_graph_entry(&saved, s, iter); | 1089 | return print_graph_entry(&saved, s, iter, flags); |
| 1085 | } | 1090 | } |
| 1086 | case TRACE_GRAPH_RET: { | 1091 | case TRACE_GRAPH_RET: { |
| 1087 | struct ftrace_graph_ret_entry *field; | 1092 | struct ftrace_graph_ret_entry *field; |
| 1088 | trace_assign_type(field, entry); | 1093 | trace_assign_type(field, entry); |
| 1089 | return print_graph_return(&field->ret, s, entry, iter); | 1094 | return print_graph_return(&field->ret, s, entry, iter, flags); |
| 1090 | } | 1095 | } |
| 1096 | case TRACE_STACK: | ||
| 1097 | case TRACE_FN: | ||
| 1098 | /* dont trace stack and functions as comments */ | ||
| 1099 | return TRACE_TYPE_UNHANDLED; | ||
| 1100 | |||
| 1091 | default: | 1101 | default: |
| 1092 | return print_graph_comment(s, entry, iter); | 1102 | return print_graph_comment(s, entry, iter, flags); |
| 1093 | } | 1103 | } |
| 1094 | 1104 | ||
| 1095 | return TRACE_TYPE_HANDLED; | 1105 | return TRACE_TYPE_HANDLED; |
| 1096 | } | 1106 | } |
| 1097 | 1107 | ||
| 1098 | static void print_lat_header(struct seq_file *s) | 1108 | static enum print_line_t |
| 1109 | print_graph_function(struct trace_iterator *iter) | ||
| 1110 | { | ||
| 1111 | return print_graph_function_flags(iter, tracer_flags.val); | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | static enum print_line_t | ||
| 1115 | print_graph_function_event(struct trace_iterator *iter, int flags) | ||
| 1116 | { | ||
| 1117 | return print_graph_function(iter); | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | static void print_lat_header(struct seq_file *s, u32 flags) | ||
| 1099 | { | 1121 | { |
| 1100 | static const char spaces[] = " " /* 16 spaces */ | 1122 | static const char spaces[] = " " /* 16 spaces */ |
| 1101 | " " /* 4 spaces */ | 1123 | " " /* 4 spaces */ |
| 1102 | " "; /* 17 spaces */ | 1124 | " "; /* 17 spaces */ |
| 1103 | int size = 0; | 1125 | int size = 0; |
| 1104 | 1126 | ||
| 1105 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1127 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
| 1106 | size += 16; | 1128 | size += 16; |
| 1107 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1129 | if (flags & TRACE_GRAPH_PRINT_CPU) |
| 1108 | size += 4; | 1130 | size += 4; |
| 1109 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1131 | if (flags & TRACE_GRAPH_PRINT_PROC) |
| 1110 | size += 17; | 1132 | size += 17; |
| 1111 | 1133 | ||
| 1112 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | 1134 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); |
| @@ -1117,43 +1139,48 @@ static void print_lat_header(struct seq_file *s) | |||
| 1117 | seq_printf(s, "#%.*s|||| / \n", size, spaces); | 1139 | seq_printf(s, "#%.*s|||| / \n", size, spaces); |
| 1118 | } | 1140 | } |
| 1119 | 1141 | ||
| 1120 | static void print_graph_headers(struct seq_file *s) | 1142 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
| 1121 | { | 1143 | { |
| 1122 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; | 1144 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
| 1123 | 1145 | ||
| 1124 | if (lat) | 1146 | if (lat) |
| 1125 | print_lat_header(s); | 1147 | print_lat_header(s, flags); |
| 1126 | 1148 | ||
| 1127 | /* 1st line */ | 1149 | /* 1st line */ |
| 1128 | seq_printf(s, "#"); | 1150 | seq_printf(s, "#"); |
| 1129 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1151 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
| 1130 | seq_printf(s, " TIME "); | 1152 | seq_printf(s, " TIME "); |
| 1131 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1153 | if (flags & TRACE_GRAPH_PRINT_CPU) |
| 1132 | seq_printf(s, " CPU"); | 1154 | seq_printf(s, " CPU"); |
| 1133 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1155 | if (flags & TRACE_GRAPH_PRINT_PROC) |
| 1134 | seq_printf(s, " TASK/PID "); | 1156 | seq_printf(s, " TASK/PID "); |
| 1135 | if (lat) | 1157 | if (lat) |
| 1136 | seq_printf(s, "|||||"); | 1158 | seq_printf(s, "|||||"); |
| 1137 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 1159 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
| 1138 | seq_printf(s, " DURATION "); | 1160 | seq_printf(s, " DURATION "); |
| 1139 | seq_printf(s, " FUNCTION CALLS\n"); | 1161 | seq_printf(s, " FUNCTION CALLS\n"); |
| 1140 | 1162 | ||
| 1141 | /* 2nd line */ | 1163 | /* 2nd line */ |
| 1142 | seq_printf(s, "#"); | 1164 | seq_printf(s, "#"); |
| 1143 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1165 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
| 1144 | seq_printf(s, " | "); | 1166 | seq_printf(s, " | "); |
| 1145 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1167 | if (flags & TRACE_GRAPH_PRINT_CPU) |
| 1146 | seq_printf(s, " | "); | 1168 | seq_printf(s, " | "); |
| 1147 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1169 | if (flags & TRACE_GRAPH_PRINT_PROC) |
| 1148 | seq_printf(s, " | | "); | 1170 | seq_printf(s, " | | "); |
| 1149 | if (lat) | 1171 | if (lat) |
| 1150 | seq_printf(s, "|||||"); | 1172 | seq_printf(s, "|||||"); |
| 1151 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 1173 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
| 1152 | seq_printf(s, " | | "); | 1174 | seq_printf(s, " | | "); |
| 1153 | seq_printf(s, " | | | |\n"); | 1175 | seq_printf(s, " | | | |\n"); |
| 1154 | } | 1176 | } |
| 1155 | 1177 | ||
| 1156 | static void graph_trace_open(struct trace_iterator *iter) | 1178 | void print_graph_headers(struct seq_file *s) |
| 1179 | { | ||
| 1180 | print_graph_headers_flags(s, tracer_flags.val); | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | void graph_trace_open(struct trace_iterator *iter) | ||
| 1157 | { | 1184 | { |
| 1158 | /* pid and depth on the last trace processed */ | 1185 | /* pid and depth on the last trace processed */ |
| 1159 | struct fgraph_data *data; | 1186 | struct fgraph_data *data; |
| @@ -1188,7 +1215,7 @@ static void graph_trace_open(struct trace_iterator *iter) | |||
| 1188 | pr_warning("function graph tracer: not enough memory\n"); | 1215 | pr_warning("function graph tracer: not enough memory\n"); |
| 1189 | } | 1216 | } |
| 1190 | 1217 | ||
| 1191 | static void graph_trace_close(struct trace_iterator *iter) | 1218 | void graph_trace_close(struct trace_iterator *iter) |
| 1192 | { | 1219 | { |
| 1193 | struct fgraph_data *data = iter->private; | 1220 | struct fgraph_data *data = iter->private; |
| 1194 | 1221 | ||
| @@ -1198,6 +1225,16 @@ static void graph_trace_close(struct trace_iterator *iter) | |||
| 1198 | } | 1225 | } |
| 1199 | } | 1226 | } |
| 1200 | 1227 | ||
| 1228 | static struct trace_event graph_trace_entry_event = { | ||
| 1229 | .type = TRACE_GRAPH_ENT, | ||
| 1230 | .trace = print_graph_function_event, | ||
| 1231 | }; | ||
| 1232 | |||
| 1233 | static struct trace_event graph_trace_ret_event = { | ||
| 1234 | .type = TRACE_GRAPH_RET, | ||
| 1235 | .trace = print_graph_function_event, | ||
| 1236 | }; | ||
| 1237 | |||
| 1201 | static struct tracer graph_trace __read_mostly = { | 1238 | static struct tracer graph_trace __read_mostly = { |
| 1202 | .name = "function_graph", | 1239 | .name = "function_graph", |
| 1203 | .open = graph_trace_open, | 1240 | .open = graph_trace_open, |
| @@ -1219,6 +1256,16 @@ static __init int init_graph_trace(void) | |||
| 1219 | { | 1256 | { |
| 1220 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | 1257 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
| 1221 | 1258 | ||
| 1259 | if (!register_ftrace_event(&graph_trace_entry_event)) { | ||
| 1260 | pr_warning("Warning: could not register graph trace events\n"); | ||
| 1261 | return 1; | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | if (!register_ftrace_event(&graph_trace_ret_event)) { | ||
| 1265 | pr_warning("Warning: could not register graph trace events\n"); | ||
| 1266 | return 1; | ||
| 1267 | } | ||
| 1268 | |||
| 1222 | return register_tracer(&graph_trace); | 1269 | return register_tracer(&graph_trace); |
| 1223 | } | 1270 | } |
| 1224 | 1271 | ||
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c deleted file mode 100644 index 7b97000745f5..000000000000 --- a/kernel/trace/trace_hw_branches.c +++ /dev/null | |||
| @@ -1,312 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * h/w branch tracer for x86 based on BTS | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2009 Intel Corporation. | ||
| 5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 | ||
| 6 | */ | ||
| 7 | #include <linux/kallsyms.h> | ||
| 8 | #include <linux/debugfs.h> | ||
| 9 | #include <linux/ftrace.h> | ||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/cpu.h> | ||
| 12 | #include <linux/smp.h> | ||
| 13 | #include <linux/fs.h> | ||
| 14 | |||
| 15 | #include <asm/ds.h> | ||
| 16 | |||
| 17 | #include "trace_output.h" | ||
| 18 | #include "trace.h" | ||
| 19 | |||
| 20 | |||
| 21 | #define BTS_BUFFER_SIZE (1 << 13) | ||
| 22 | |||
| 23 | static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer); | ||
| 24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer); | ||
| 25 | |||
| 26 | #define this_tracer per_cpu(hwb_tracer, smp_processor_id()) | ||
| 27 | |||
| 28 | static int trace_hw_branches_enabled __read_mostly; | ||
| 29 | static int trace_hw_branches_suspended __read_mostly; | ||
| 30 | static struct trace_array *hw_branch_trace __read_mostly; | ||
| 31 | |||
| 32 | |||
| 33 | static void bts_trace_init_cpu(int cpu) | ||
| 34 | { | ||
| 35 | per_cpu(hwb_tracer, cpu) = | ||
| 36 | ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu), | ||
| 37 | BTS_BUFFER_SIZE, NULL, (size_t)-1, | ||
| 38 | BTS_KERNEL); | ||
| 39 | |||
| 40 | if (IS_ERR(per_cpu(hwb_tracer, cpu))) | ||
| 41 | per_cpu(hwb_tracer, cpu) = NULL; | ||
| 42 | } | ||
| 43 | |||
| 44 | static int bts_trace_init(struct trace_array *tr) | ||
| 45 | { | ||
| 46 | int cpu; | ||
| 47 | |||
| 48 | hw_branch_trace = tr; | ||
| 49 | trace_hw_branches_enabled = 0; | ||
| 50 | |||
| 51 | get_online_cpus(); | ||
| 52 | for_each_online_cpu(cpu) { | ||
| 53 | bts_trace_init_cpu(cpu); | ||
| 54 | |||
| 55 | if (likely(per_cpu(hwb_tracer, cpu))) | ||
| 56 | trace_hw_branches_enabled = 1; | ||
| 57 | } | ||
| 58 | trace_hw_branches_suspended = 0; | ||
| 59 | put_online_cpus(); | ||
| 60 | |||
| 61 | /* If we could not enable tracing on a single cpu, we fail. */ | ||
| 62 | return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP; | ||
| 63 | } | ||
| 64 | |||
| 65 | static void bts_trace_reset(struct trace_array *tr) | ||
| 66 | { | ||
| 67 | int cpu; | ||
| 68 | |||
| 69 | get_online_cpus(); | ||
| 70 | for_each_online_cpu(cpu) { | ||
| 71 | if (likely(per_cpu(hwb_tracer, cpu))) { | ||
| 72 | ds_release_bts(per_cpu(hwb_tracer, cpu)); | ||
| 73 | per_cpu(hwb_tracer, cpu) = NULL; | ||
| 74 | } | ||
| 75 | } | ||
| 76 | trace_hw_branches_enabled = 0; | ||
| 77 | trace_hw_branches_suspended = 0; | ||
| 78 | put_online_cpus(); | ||
| 79 | } | ||
| 80 | |||
| 81 | static void bts_trace_start(struct trace_array *tr) | ||
| 82 | { | ||
| 83 | int cpu; | ||
| 84 | |||
| 85 | get_online_cpus(); | ||
| 86 | for_each_online_cpu(cpu) | ||
| 87 | if (likely(per_cpu(hwb_tracer, cpu))) | ||
| 88 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); | ||
| 89 | trace_hw_branches_suspended = 0; | ||
| 90 | put_online_cpus(); | ||
| 91 | } | ||
| 92 | |||
| 93 | static void bts_trace_stop(struct trace_array *tr) | ||
| 94 | { | ||
| 95 | int cpu; | ||
| 96 | |||
| 97 | get_online_cpus(); | ||
| 98 | for_each_online_cpu(cpu) | ||
| 99 | if (likely(per_cpu(hwb_tracer, cpu))) | ||
| 100 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); | ||
| 101 | trace_hw_branches_suspended = 1; | ||
| 102 | put_online_cpus(); | ||
| 103 | } | ||
| 104 | |||
| 105 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | ||
| 106 | unsigned long action, void *hcpu) | ||
| 107 | { | ||
| 108 | int cpu = (long)hcpu; | ||
| 109 | |||
| 110 | switch (action) { | ||
| 111 | case CPU_ONLINE: | ||
| 112 | case CPU_DOWN_FAILED: | ||
| 113 | /* The notification is sent with interrupts enabled. */ | ||
| 114 | if (trace_hw_branches_enabled) { | ||
| 115 | bts_trace_init_cpu(cpu); | ||
| 116 | |||
| 117 | if (trace_hw_branches_suspended && | ||
| 118 | likely(per_cpu(hwb_tracer, cpu))) | ||
| 119 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); | ||
| 120 | } | ||
| 121 | break; | ||
| 122 | |||
| 123 | case CPU_DOWN_PREPARE: | ||
| 124 | /* The notification is sent with interrupts enabled. */ | ||
| 125 | if (likely(per_cpu(hwb_tracer, cpu))) { | ||
| 126 | ds_release_bts(per_cpu(hwb_tracer, cpu)); | ||
| 127 | per_cpu(hwb_tracer, cpu) = NULL; | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 131 | return NOTIFY_DONE; | ||
| 132 | } | ||
| 133 | |||
| 134 | static struct notifier_block bts_hotcpu_notifier __cpuinitdata = { | ||
| 135 | .notifier_call = bts_hotcpu_handler | ||
| 136 | }; | ||
| 137 | |||
| 138 | static void bts_trace_print_header(struct seq_file *m) | ||
| 139 | { | ||
| 140 | seq_puts(m, "# CPU# TO <- FROM\n"); | ||
| 141 | } | ||
| 142 | |||
| 143 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | ||
| 144 | { | ||
| 145 | unsigned long symflags = TRACE_ITER_SYM_OFFSET; | ||
| 146 | struct trace_entry *entry = iter->ent; | ||
| 147 | struct trace_seq *seq = &iter->seq; | ||
| 148 | struct hw_branch_entry *it; | ||
| 149 | |||
| 150 | trace_assign_type(it, entry); | ||
| 151 | |||
| 152 | if (entry->type == TRACE_HW_BRANCHES) { | ||
| 153 | if (trace_seq_printf(seq, "%4d ", iter->cpu) && | ||
| 154 | seq_print_ip_sym(seq, it->to, symflags) && | ||
| 155 | trace_seq_printf(seq, "\t <- ") && | ||
| 156 | seq_print_ip_sym(seq, it->from, symflags) && | ||
| 157 | trace_seq_printf(seq, "\n")) | ||
| 158 | return TRACE_TYPE_HANDLED; | ||
| 159 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 160 | } | ||
| 161 | return TRACE_TYPE_UNHANDLED; | ||
| 162 | } | ||
| 163 | |||
| 164 | void trace_hw_branch(u64 from, u64 to) | ||
| 165 | { | ||
| 166 | struct ftrace_event_call *call = &event_hw_branch; | ||
| 167 | struct trace_array *tr = hw_branch_trace; | ||
| 168 | struct ring_buffer_event *event; | ||
| 169 | struct ring_buffer *buf; | ||
| 170 | struct hw_branch_entry *entry; | ||
| 171 | unsigned long irq1; | ||
| 172 | int cpu; | ||
| 173 | |||
| 174 | if (unlikely(!tr)) | ||
| 175 | return; | ||
| 176 | |||
| 177 | if (unlikely(!trace_hw_branches_enabled)) | ||
| 178 | return; | ||
| 179 | |||
| 180 | local_irq_save(irq1); | ||
| 181 | cpu = raw_smp_processor_id(); | ||
| 182 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | ||
| 183 | goto out; | ||
| 184 | |||
| 185 | buf = tr->buffer; | ||
| 186 | event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES, | ||
| 187 | sizeof(*entry), 0, 0); | ||
| 188 | if (!event) | ||
| 189 | goto out; | ||
| 190 | entry = ring_buffer_event_data(event); | ||
| 191 | tracing_generic_entry_update(&entry->ent, 0, from); | ||
| 192 | entry->ent.type = TRACE_HW_BRANCHES; | ||
| 193 | entry->from = from; | ||
| 194 | entry->to = to; | ||
| 195 | if (!filter_check_discard(call, entry, buf, event)) | ||
| 196 | trace_buffer_unlock_commit(buf, event, 0, 0); | ||
| 197 | |||
| 198 | out: | ||
| 199 | atomic_dec(&tr->data[cpu]->disabled); | ||
| 200 | local_irq_restore(irq1); | ||
| 201 | } | ||
| 202 | |||
| 203 | static void trace_bts_at(const struct bts_trace *trace, void *at) | ||
| 204 | { | ||
| 205 | struct bts_struct bts; | ||
| 206 | int err = 0; | ||
| 207 | |||
| 208 | WARN_ON_ONCE(!trace->read); | ||
| 209 | if (!trace->read) | ||
| 210 | return; | ||
| 211 | |||
| 212 | err = trace->read(this_tracer, at, &bts); | ||
| 213 | if (err < 0) | ||
| 214 | return; | ||
| 215 | |||
| 216 | switch (bts.qualifier) { | ||
| 217 | case BTS_BRANCH: | ||
| 218 | trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to); | ||
| 219 | break; | ||
| 220 | } | ||
| 221 | } | ||
| 222 | |||
| 223 | /* | ||
| 224 | * Collect the trace on the current cpu and write it into the ftrace buffer. | ||
| 225 | * | ||
| 226 | * pre: tracing must be suspended on the current cpu | ||
| 227 | */ | ||
| 228 | static void trace_bts_cpu(void *arg) | ||
| 229 | { | ||
| 230 | struct trace_array *tr = (struct trace_array *)arg; | ||
| 231 | const struct bts_trace *trace; | ||
| 232 | unsigned char *at; | ||
| 233 | |||
| 234 | if (unlikely(!tr)) | ||
| 235 | return; | ||
| 236 | |||
| 237 | if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled))) | ||
| 238 | return; | ||
| 239 | |||
| 240 | if (unlikely(!this_tracer)) | ||
| 241 | return; | ||
| 242 | |||
| 243 | trace = ds_read_bts(this_tracer); | ||
| 244 | if (!trace) | ||
| 245 | return; | ||
| 246 | |||
| 247 | for (at = trace->ds.top; (void *)at < trace->ds.end; | ||
| 248 | at += trace->ds.size) | ||
| 249 | trace_bts_at(trace, at); | ||
| 250 | |||
| 251 | for (at = trace->ds.begin; (void *)at < trace->ds.top; | ||
| 252 | at += trace->ds.size) | ||
| 253 | trace_bts_at(trace, at); | ||
| 254 | } | ||
| 255 | |||
| 256 | static void trace_bts_prepare(struct trace_iterator *iter) | ||
| 257 | { | ||
| 258 | int cpu; | ||
| 259 | |||
| 260 | get_online_cpus(); | ||
| 261 | for_each_online_cpu(cpu) | ||
| 262 | if (likely(per_cpu(hwb_tracer, cpu))) | ||
| 263 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); | ||
| 264 | /* | ||
| 265 | * We need to collect the trace on the respective cpu since ftrace | ||
| 266 | * implicitly adds the record for the current cpu. | ||
| 267 | * Once that is more flexible, we could collect the data from any cpu. | ||
| 268 | */ | ||
| 269 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | ||
| 270 | |||
| 271 | for_each_online_cpu(cpu) | ||
| 272 | if (likely(per_cpu(hwb_tracer, cpu))) | ||
| 273 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); | ||
| 274 | put_online_cpus(); | ||
| 275 | } | ||
| 276 | |||
| 277 | static void trace_bts_close(struct trace_iterator *iter) | ||
| 278 | { | ||
| 279 | tracing_reset_online_cpus(iter->tr); | ||
| 280 | } | ||
| 281 | |||
| 282 | void trace_hw_branch_oops(void) | ||
| 283 | { | ||
| 284 | if (this_tracer) { | ||
| 285 | ds_suspend_bts_noirq(this_tracer); | ||
| 286 | trace_bts_cpu(hw_branch_trace); | ||
| 287 | ds_resume_bts_noirq(this_tracer); | ||
| 288 | } | ||
| 289 | } | ||
| 290 | |||
| 291 | struct tracer bts_tracer __read_mostly = | ||
| 292 | { | ||
| 293 | .name = "hw-branch-tracer", | ||
| 294 | .init = bts_trace_init, | ||
| 295 | .reset = bts_trace_reset, | ||
| 296 | .print_header = bts_trace_print_header, | ||
| 297 | .print_line = bts_trace_print_line, | ||
| 298 | .start = bts_trace_start, | ||
| 299 | .stop = bts_trace_stop, | ||
| 300 | .open = trace_bts_prepare, | ||
| 301 | .close = trace_bts_close, | ||
| 302 | #ifdef CONFIG_FTRACE_SELFTEST | ||
| 303 | .selftest = trace_selftest_startup_hw_branches, | ||
| 304 | #endif /* CONFIG_FTRACE_SELFTEST */ | ||
| 305 | }; | ||
| 306 | |||
| 307 | __init static int init_bts_trace(void) | ||
| 308 | { | ||
| 309 | register_hotcpu_notifier(&bts_hotcpu_notifier); | ||
| 310 | return register_tracer(&bts_tracer); | ||
| 311 | } | ||
| 312 | device_initcall(init_bts_trace); | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2974bc7538c7..6fd486e0cef4 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -34,6 +34,9 @@ static int trace_type __read_mostly; | |||
| 34 | 34 | ||
| 35 | static int save_lat_flag; | 35 | static int save_lat_flag; |
| 36 | 36 | ||
| 37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); | ||
| 38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | ||
| 39 | |||
| 37 | #ifdef CONFIG_PREEMPT_TRACER | 40 | #ifdef CONFIG_PREEMPT_TRACER |
| 38 | static inline int | 41 | static inline int |
| 39 | preempt_trace(void) | 42 | preempt_trace(void) |
| @@ -55,6 +58,23 @@ irq_trace(void) | |||
| 55 | # define irq_trace() (0) | 58 | # define irq_trace() (0) |
| 56 | #endif | 59 | #endif |
| 57 | 60 | ||
| 61 | #define TRACE_DISPLAY_GRAPH 1 | ||
| 62 | |||
| 63 | static struct tracer_opt trace_opts[] = { | ||
| 64 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 65 | /* display latency trace as call graph */ | ||
| 66 | { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, | ||
| 67 | #endif | ||
| 68 | { } /* Empty entry */ | ||
| 69 | }; | ||
| 70 | |||
| 71 | static struct tracer_flags tracer_flags = { | ||
| 72 | .val = 0, | ||
| 73 | .opts = trace_opts, | ||
| 74 | }; | ||
| 75 | |||
| 76 | #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) | ||
| 77 | |||
| 58 | /* | 78 | /* |
| 59 | * Sequence count - we record it when starting a measurement and | 79 | * Sequence count - we record it when starting a measurement and |
| 60 | * skip the latency if the sequence has changed - some other section | 80 | * skip the latency if the sequence has changed - some other section |
| @@ -108,6 +128,202 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
| 108 | }; | 128 | }; |
| 109 | #endif /* CONFIG_FUNCTION_TRACER */ | 129 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 110 | 130 | ||
| 131 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 132 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | ||
| 133 | { | ||
| 134 | int cpu; | ||
| 135 | |||
| 136 | if (!(bit & TRACE_DISPLAY_GRAPH)) | ||
| 137 | return -EINVAL; | ||
| 138 | |||
| 139 | if (!(is_graph() ^ set)) | ||
| 140 | return 0; | ||
| 141 | |||
| 142 | stop_irqsoff_tracer(irqsoff_trace, !set); | ||
| 143 | |||
| 144 | for_each_possible_cpu(cpu) | ||
| 145 | per_cpu(tracing_cpu, cpu) = 0; | ||
| 146 | |||
| 147 | tracing_max_latency = 0; | ||
| 148 | tracing_reset_online_cpus(irqsoff_trace); | ||
| 149 | |||
| 150 | return start_irqsoff_tracer(irqsoff_trace, set); | ||
| 151 | } | ||
| 152 | |||
| 153 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | ||
| 154 | { | ||
| 155 | struct trace_array *tr = irqsoff_trace; | ||
| 156 | struct trace_array_cpu *data; | ||
| 157 | unsigned long flags; | ||
| 158 | long disabled; | ||
| 159 | int ret; | ||
| 160 | int cpu; | ||
| 161 | int pc; | ||
| 162 | |||
| 163 | cpu = raw_smp_processor_id(); | ||
| 164 | if (likely(!per_cpu(tracing_cpu, cpu))) | ||
| 165 | return 0; | ||
| 166 | |||
| 167 | local_save_flags(flags); | ||
| 168 | /* slight chance to get a false positive on tracing_cpu */ | ||
| 169 | if (!irqs_disabled_flags(flags)) | ||
| 170 | return 0; | ||
| 171 | |||
| 172 | data = tr->data[cpu]; | ||
| 173 | disabled = atomic_inc_return(&data->disabled); | ||
| 174 | |||
| 175 | if (likely(disabled == 1)) { | ||
| 176 | pc = preempt_count(); | ||
| 177 | ret = __trace_graph_entry(tr, trace, flags, pc); | ||
| 178 | } else | ||
| 179 | ret = 0; | ||
| 180 | |||
| 181 | atomic_dec(&data->disabled); | ||
| 182 | return ret; | ||
| 183 | } | ||
| 184 | |||
| 185 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) | ||
| 186 | { | ||
| 187 | struct trace_array *tr = irqsoff_trace; | ||
| 188 | struct trace_array_cpu *data; | ||
| 189 | unsigned long flags; | ||
| 190 | long disabled; | ||
| 191 | int cpu; | ||
| 192 | int pc; | ||
| 193 | |||
| 194 | cpu = raw_smp_processor_id(); | ||
| 195 | if (likely(!per_cpu(tracing_cpu, cpu))) | ||
| 196 | return; | ||
| 197 | |||
| 198 | local_save_flags(flags); | ||
| 199 | /* slight chance to get a false positive on tracing_cpu */ | ||
| 200 | if (!irqs_disabled_flags(flags)) | ||
| 201 | return; | ||
| 202 | |||
| 203 | data = tr->data[cpu]; | ||
| 204 | disabled = atomic_inc_return(&data->disabled); | ||
| 205 | |||
| 206 | if (likely(disabled == 1)) { | ||
| 207 | pc = preempt_count(); | ||
| 208 | __trace_graph_return(tr, trace, flags, pc); | ||
| 209 | } | ||
| 210 | |||
| 211 | atomic_dec(&data->disabled); | ||
| 212 | } | ||
| 213 | |||
| 214 | static void irqsoff_trace_open(struct trace_iterator *iter) | ||
| 215 | { | ||
| 216 | if (is_graph()) | ||
| 217 | graph_trace_open(iter); | ||
| 218 | |||
| 219 | } | ||
| 220 | |||
| 221 | static void irqsoff_trace_close(struct trace_iterator *iter) | ||
| 222 | { | ||
| 223 | if (iter->private) | ||
| 224 | graph_trace_close(iter); | ||
| 225 | } | ||
| 226 | |||
| 227 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ | ||
| 228 | TRACE_GRAPH_PRINT_PROC) | ||
| 229 | |||
| 230 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | ||
| 231 | { | ||
| 232 | u32 flags = GRAPH_TRACER_FLAGS; | ||
| 233 | |||
| 234 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
| 235 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
| 236 | else | ||
| 237 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
| 238 | |||
| 239 | /* | ||
| 240 | * In graph mode call the graph tracer output function, | ||
| 241 | * otherwise go with the TRACE_FN event handler | ||
| 242 | */ | ||
| 243 | if (is_graph()) | ||
| 244 | return print_graph_function_flags(iter, flags); | ||
| 245 | |||
| 246 | return TRACE_TYPE_UNHANDLED; | ||
| 247 | } | ||
| 248 | |||
| 249 | static void irqsoff_print_header(struct seq_file *s) | ||
| 250 | { | ||
| 251 | if (is_graph()) { | ||
| 252 | struct trace_iterator *iter = s->private; | ||
| 253 | u32 flags = GRAPH_TRACER_FLAGS; | ||
| 254 | |||
| 255 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | ||
| 256 | /* print nothing if the buffers are empty */ | ||
| 257 | if (trace_empty(iter)) | ||
| 258 | return; | ||
| 259 | |||
| 260 | print_trace_header(s, iter); | ||
| 261 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
| 262 | } else | ||
| 263 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
| 264 | |||
| 265 | print_graph_headers_flags(s, flags); | ||
| 266 | } else | ||
| 267 | trace_default_header(s); | ||
| 268 | } | ||
| 269 | |||
| 270 | static void | ||
| 271 | trace_graph_function(struct trace_array *tr, | ||
| 272 | unsigned long ip, unsigned long flags, int pc) | ||
| 273 | { | ||
| 274 | u64 time = trace_clock_local(); | ||
| 275 | struct ftrace_graph_ent ent = { | ||
| 276 | .func = ip, | ||
| 277 | .depth = 0, | ||
| 278 | }; | ||
| 279 | struct ftrace_graph_ret ret = { | ||
| 280 | .func = ip, | ||
| 281 | .depth = 0, | ||
| 282 | .calltime = time, | ||
| 283 | .rettime = time, | ||
| 284 | }; | ||
| 285 | |||
| 286 | __trace_graph_entry(tr, &ent, flags, pc); | ||
| 287 | __trace_graph_return(tr, &ret, flags, pc); | ||
| 288 | } | ||
| 289 | |||
| 290 | static void | ||
| 291 | __trace_function(struct trace_array *tr, | ||
| 292 | unsigned long ip, unsigned long parent_ip, | ||
| 293 | unsigned long flags, int pc) | ||
| 294 | { | ||
| 295 | if (!is_graph()) | ||
| 296 | trace_function(tr, ip, parent_ip, flags, pc); | ||
| 297 | else { | ||
| 298 | trace_graph_function(tr, parent_ip, flags, pc); | ||
| 299 | trace_graph_function(tr, ip, flags, pc); | ||
| 300 | } | ||
| 301 | } | ||
| 302 | |||
| 303 | #else | ||
| 304 | #define __trace_function trace_function | ||
| 305 | |||
| 306 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | ||
| 307 | { | ||
| 308 | return -EINVAL; | ||
| 309 | } | ||
| 310 | |||
| 311 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | ||
| 312 | { | ||
| 313 | return -1; | ||
| 314 | } | ||
| 315 | |||
| 316 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | ||
| 317 | { | ||
| 318 | return TRACE_TYPE_UNHANDLED; | ||
| 319 | } | ||
| 320 | |||
| 321 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } | ||
| 322 | static void irqsoff_print_header(struct seq_file *s) { } | ||
| 323 | static void irqsoff_trace_open(struct trace_iterator *iter) { } | ||
| 324 | static void irqsoff_trace_close(struct trace_iterator *iter) { } | ||
| 325 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
| 326 | |||
| 111 | /* | 327 | /* |
| 112 | * Should this new latency be reported/recorded? | 328 | * Should this new latency be reported/recorded? |
| 113 | */ | 329 | */ |
| @@ -150,7 +366,7 @@ check_critical_timing(struct trace_array *tr, | |||
| 150 | if (!report_latency(delta)) | 366 | if (!report_latency(delta)) |
| 151 | goto out_unlock; | 367 | goto out_unlock; |
| 152 | 368 | ||
| 153 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 369 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
| 154 | /* Skip 5 functions to get to the irq/preempt enable function */ | 370 | /* Skip 5 functions to get to the irq/preempt enable function */ |
| 155 | __trace_stack(tr, flags, 5, pc); | 371 | __trace_stack(tr, flags, 5, pc); |
| 156 | 372 | ||
| @@ -172,7 +388,7 @@ out_unlock: | |||
| 172 | out: | 388 | out: |
| 173 | data->critical_sequence = max_sequence; | 389 | data->critical_sequence = max_sequence; |
| 174 | data->preempt_timestamp = ftrace_now(cpu); | 390 | data->preempt_timestamp = ftrace_now(cpu); |
| 175 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 391 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
| 176 | } | 392 | } |
| 177 | 393 | ||
| 178 | static inline void | 394 | static inline void |
| @@ -204,7 +420,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
| 204 | 420 | ||
| 205 | local_save_flags(flags); | 421 | local_save_flags(flags); |
| 206 | 422 | ||
| 207 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 423 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
| 208 | 424 | ||
| 209 | per_cpu(tracing_cpu, cpu) = 1; | 425 | per_cpu(tracing_cpu, cpu) = 1; |
| 210 | 426 | ||
| @@ -238,7 +454,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
| 238 | atomic_inc(&data->disabled); | 454 | atomic_inc(&data->disabled); |
| 239 | 455 | ||
| 240 | local_save_flags(flags); | 456 | local_save_flags(flags); |
| 241 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 457 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
| 242 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 458 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
| 243 | data->critical_start = 0; | 459 | data->critical_start = 0; |
| 244 | atomic_dec(&data->disabled); | 460 | atomic_dec(&data->disabled); |
| @@ -347,19 +563,32 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) | |||
| 347 | } | 563 | } |
| 348 | #endif /* CONFIG_PREEMPT_TRACER */ | 564 | #endif /* CONFIG_PREEMPT_TRACER */ |
| 349 | 565 | ||
| 350 | static void start_irqsoff_tracer(struct trace_array *tr) | 566 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) |
| 351 | { | 567 | { |
| 352 | register_ftrace_function(&trace_ops); | 568 | int ret = 0; |
| 353 | if (tracing_is_enabled()) | 569 | |
| 570 | if (!graph) | ||
| 571 | ret = register_ftrace_function(&trace_ops); | ||
| 572 | else | ||
| 573 | ret = register_ftrace_graph(&irqsoff_graph_return, | ||
| 574 | &irqsoff_graph_entry); | ||
| 575 | |||
| 576 | if (!ret && tracing_is_enabled()) | ||
| 354 | tracer_enabled = 1; | 577 | tracer_enabled = 1; |
| 355 | else | 578 | else |
| 356 | tracer_enabled = 0; | 579 | tracer_enabled = 0; |
| 580 | |||
| 581 | return ret; | ||
| 357 | } | 582 | } |
| 358 | 583 | ||
| 359 | static void stop_irqsoff_tracer(struct trace_array *tr) | 584 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
| 360 | { | 585 | { |
| 361 | tracer_enabled = 0; | 586 | tracer_enabled = 0; |
| 362 | unregister_ftrace_function(&trace_ops); | 587 | |
| 588 | if (!graph) | ||
| 589 | unregister_ftrace_function(&trace_ops); | ||
| 590 | else | ||
| 591 | unregister_ftrace_graph(); | ||
| 363 | } | 592 | } |
| 364 | 593 | ||
| 365 | static void __irqsoff_tracer_init(struct trace_array *tr) | 594 | static void __irqsoff_tracer_init(struct trace_array *tr) |
| @@ -372,12 +601,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
| 372 | /* make sure that the tracer is visible */ | 601 | /* make sure that the tracer is visible */ |
| 373 | smp_wmb(); | 602 | smp_wmb(); |
| 374 | tracing_reset_online_cpus(tr); | 603 | tracing_reset_online_cpus(tr); |
| 375 | start_irqsoff_tracer(tr); | 604 | |
| 605 | if (start_irqsoff_tracer(tr, is_graph())) | ||
| 606 | printk(KERN_ERR "failed to start irqsoff tracer\n"); | ||
| 376 | } | 607 | } |
| 377 | 608 | ||
| 378 | static void irqsoff_tracer_reset(struct trace_array *tr) | 609 | static void irqsoff_tracer_reset(struct trace_array *tr) |
| 379 | { | 610 | { |
| 380 | stop_irqsoff_tracer(tr); | 611 | stop_irqsoff_tracer(tr, is_graph()); |
| 381 | 612 | ||
| 382 | if (!save_lat_flag) | 613 | if (!save_lat_flag) |
| 383 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 614 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; |
| @@ -409,9 +640,15 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
| 409 | .start = irqsoff_tracer_start, | 640 | .start = irqsoff_tracer_start, |
| 410 | .stop = irqsoff_tracer_stop, | 641 | .stop = irqsoff_tracer_stop, |
| 411 | .print_max = 1, | 642 | .print_max = 1, |
| 643 | .print_header = irqsoff_print_header, | ||
| 644 | .print_line = irqsoff_print_line, | ||
| 645 | .flags = &tracer_flags, | ||
| 646 | .set_flag = irqsoff_set_flag, | ||
| 412 | #ifdef CONFIG_FTRACE_SELFTEST | 647 | #ifdef CONFIG_FTRACE_SELFTEST |
| 413 | .selftest = trace_selftest_startup_irqsoff, | 648 | .selftest = trace_selftest_startup_irqsoff, |
| 414 | #endif | 649 | #endif |
| 650 | .open = irqsoff_trace_open, | ||
| 651 | .close = irqsoff_trace_close, | ||
| 415 | }; | 652 | }; |
| 416 | # define register_irqsoff(trace) register_tracer(&trace) | 653 | # define register_irqsoff(trace) register_tracer(&trace) |
| 417 | #else | 654 | #else |
| @@ -435,9 +672,15 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
| 435 | .start = irqsoff_tracer_start, | 672 | .start = irqsoff_tracer_start, |
| 436 | .stop = irqsoff_tracer_stop, | 673 | .stop = irqsoff_tracer_stop, |
| 437 | .print_max = 1, | 674 | .print_max = 1, |
| 675 | .print_header = irqsoff_print_header, | ||
| 676 | .print_line = irqsoff_print_line, | ||
| 677 | .flags = &tracer_flags, | ||
| 678 | .set_flag = irqsoff_set_flag, | ||
| 438 | #ifdef CONFIG_FTRACE_SELFTEST | 679 | #ifdef CONFIG_FTRACE_SELFTEST |
| 439 | .selftest = trace_selftest_startup_preemptoff, | 680 | .selftest = trace_selftest_startup_preemptoff, |
| 440 | #endif | 681 | #endif |
| 682 | .open = irqsoff_trace_open, | ||
| 683 | .close = irqsoff_trace_close, | ||
| 441 | }; | 684 | }; |
| 442 | # define register_preemptoff(trace) register_tracer(&trace) | 685 | # define register_preemptoff(trace) register_tracer(&trace) |
| 443 | #else | 686 | #else |
| @@ -463,9 +706,15 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
| 463 | .start = irqsoff_tracer_start, | 706 | .start = irqsoff_tracer_start, |
| 464 | .stop = irqsoff_tracer_stop, | 707 | .stop = irqsoff_tracer_stop, |
| 465 | .print_max = 1, | 708 | .print_max = 1, |
| 709 | .print_header = irqsoff_print_header, | ||
| 710 | .print_line = irqsoff_print_line, | ||
| 711 | .flags = &tracer_flags, | ||
| 712 | .set_flag = irqsoff_set_flag, | ||
| 466 | #ifdef CONFIG_FTRACE_SELFTEST | 713 | #ifdef CONFIG_FTRACE_SELFTEST |
| 467 | .selftest = trace_selftest_startup_preemptirqsoff, | 714 | .selftest = trace_selftest_startup_preemptirqsoff, |
| 468 | #endif | 715 | #endif |
| 716 | .open = irqsoff_trace_open, | ||
| 717 | .close = irqsoff_trace_close, | ||
| 469 | }; | 718 | }; |
| 470 | 719 | ||
| 471 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 720 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1251e367bae9..a7514326052b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -29,6 +29,8 @@ | |||
| 29 | #include <linux/ctype.h> | 29 | #include <linux/ctype.h> |
| 30 | #include <linux/ptrace.h> | 30 | #include <linux/ptrace.h> |
| 31 | #include <linux/perf_event.h> | 31 | #include <linux/perf_event.h> |
| 32 | #include <linux/stringify.h> | ||
| 33 | #include <asm/bitsperlong.h> | ||
| 32 | 34 | ||
| 33 | #include "trace.h" | 35 | #include "trace.h" |
| 34 | #include "trace_output.h" | 36 | #include "trace_output.h" |
| @@ -40,7 +42,6 @@ | |||
| 40 | 42 | ||
| 41 | /* Reserved field names */ | 43 | /* Reserved field names */ |
| 42 | #define FIELD_STRING_IP "__probe_ip" | 44 | #define FIELD_STRING_IP "__probe_ip" |
| 43 | #define FIELD_STRING_NARGS "__probe_nargs" | ||
| 44 | #define FIELD_STRING_RETIP "__probe_ret_ip" | 45 | #define FIELD_STRING_RETIP "__probe_ret_ip" |
| 45 | #define FIELD_STRING_FUNC "__probe_func" | 46 | #define FIELD_STRING_FUNC "__probe_func" |
| 46 | 47 | ||
| @@ -52,56 +53,102 @@ const char *reserved_field_names[] = { | |||
| 52 | "common_tgid", | 53 | "common_tgid", |
| 53 | "common_lock_depth", | 54 | "common_lock_depth", |
| 54 | FIELD_STRING_IP, | 55 | FIELD_STRING_IP, |
| 55 | FIELD_STRING_NARGS, | ||
| 56 | FIELD_STRING_RETIP, | 56 | FIELD_STRING_RETIP, |
| 57 | FIELD_STRING_FUNC, | 57 | FIELD_STRING_FUNC, |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | struct fetch_func { | 60 | /* Printing function type */ |
| 61 | unsigned long (*func)(struct pt_regs *, void *); | 61 | typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *); |
| 62 | #define PRINT_TYPE_FUNC_NAME(type) print_type_##type | ||
| 63 | #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type | ||
| 64 | |||
| 65 | /* Printing in basic type function template */ | ||
| 66 | #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \ | ||
| 67 | static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ | ||
| 68 | const char *name, void *data)\ | ||
| 69 | { \ | ||
| 70 | return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\ | ||
| 71 | } \ | ||
| 72 | static const char PRINT_TYPE_FMT_NAME(type)[] = fmt; | ||
| 73 | |||
| 74 | DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int) | ||
| 75 | DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int) | ||
| 76 | DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long) | ||
| 77 | DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long) | ||
| 78 | DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int) | ||
| 79 | DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int) | ||
| 80 | DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long) | ||
| 81 | DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long) | ||
| 82 | |||
| 83 | /* Data fetch function type */ | ||
| 84 | typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); | ||
| 85 | |||
| 86 | struct fetch_param { | ||
| 87 | fetch_func_t fn; | ||
| 62 | void *data; | 88 | void *data; |
| 63 | }; | 89 | }; |
| 64 | 90 | ||
| 65 | static __kprobes unsigned long call_fetch(struct fetch_func *f, | 91 | static __kprobes void call_fetch(struct fetch_param *fprm, |
| 66 | struct pt_regs *regs) | 92 | struct pt_regs *regs, void *dest) |
| 67 | { | 93 | { |
| 68 | return f->func(regs, f->data); | 94 | return fprm->fn(regs, fprm->data, dest); |
| 69 | } | 95 | } |
| 70 | 96 | ||
| 71 | /* fetch handlers */ | 97 | #define FETCH_FUNC_NAME(kind, type) fetch_##kind##_##type |
| 72 | static __kprobes unsigned long fetch_register(struct pt_regs *regs, | 98 | /* |
| 73 | void *offset) | 99 | * Define macro for basic types - we don't need to define s* types, because |
| 74 | { | 100 | * we have to care only about bitwidth at recording time. |
| 75 | return regs_get_register(regs, (unsigned int)((unsigned long)offset)); | 101 | */ |
| 102 | #define DEFINE_BASIC_FETCH_FUNCS(kind) \ | ||
| 103 | DEFINE_FETCH_##kind(u8) \ | ||
| 104 | DEFINE_FETCH_##kind(u16) \ | ||
| 105 | DEFINE_FETCH_##kind(u32) \ | ||
| 106 | DEFINE_FETCH_##kind(u64) | ||
| 107 | |||
| 108 | #define CHECK_BASIC_FETCH_FUNCS(kind, fn) \ | ||
| 109 | ((FETCH_FUNC_NAME(kind, u8) == fn) || \ | ||
| 110 | (FETCH_FUNC_NAME(kind, u16) == fn) || \ | ||
| 111 | (FETCH_FUNC_NAME(kind, u32) == fn) || \ | ||
| 112 | (FETCH_FUNC_NAME(kind, u64) == fn)) | ||
| 113 | |||
| 114 | /* Data fetch function templates */ | ||
| 115 | #define DEFINE_FETCH_reg(type) \ | ||
| 116 | static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ | ||
| 117 | void *offset, void *dest) \ | ||
| 118 | { \ | ||
| 119 | *(type *)dest = (type)regs_get_register(regs, \ | ||
| 120 | (unsigned int)((unsigned long)offset)); \ | ||
| 76 | } | 121 | } |
| 77 | 122 | DEFINE_BASIC_FETCH_FUNCS(reg) | |
| 78 | static __kprobes unsigned long fetch_stack(struct pt_regs *regs, | 123 | |
| 79 | void *num) | 124 | #define DEFINE_FETCH_stack(type) \ |
| 80 | { | 125 | static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ |
| 81 | return regs_get_kernel_stack_nth(regs, | 126 | void *offset, void *dest) \ |
| 82 | (unsigned int)((unsigned long)num)); | 127 | { \ |
| 128 | *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ | ||
| 129 | (unsigned int)((unsigned long)offset)); \ | ||
| 83 | } | 130 | } |
| 131 | DEFINE_BASIC_FETCH_FUNCS(stack) | ||
| 84 | 132 | ||
| 85 | static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr) | 133 | #define DEFINE_FETCH_retval(type) \ |
| 86 | { | 134 | static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ |
| 87 | unsigned long retval; | 135 | void *dummy, void *dest) \ |
| 88 | 136 | { \ | |
| 89 | if (probe_kernel_address(addr, retval)) | 137 | *(type *)dest = (type)regs_return_value(regs); \ |
| 90 | return 0; | ||
| 91 | return retval; | ||
| 92 | } | 138 | } |
| 93 | 139 | DEFINE_BASIC_FETCH_FUNCS(retval) | |
| 94 | static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, | 140 | |
| 95 | void *dummy) | 141 | #define DEFINE_FETCH_memory(type) \ |
| 96 | { | 142 | static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ |
| 97 | return regs_return_value(regs); | 143 | void *addr, void *dest) \ |
| 98 | } | 144 | { \ |
| 99 | 145 | type retval; \ | |
| 100 | static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs, | 146 | if (probe_kernel_address(addr, retval)) \ |
| 101 | void *dummy) | 147 | *(type *)dest = 0; \ |
| 102 | { | 148 | else \ |
| 103 | return kernel_stack_pointer(regs); | 149 | *(type *)dest = retval; \ |
| 104 | } | 150 | } |
| 151 | DEFINE_BASIC_FETCH_FUNCS(memory) | ||
| 105 | 152 | ||
| 106 | /* Memory fetching by symbol */ | 153 | /* Memory fetching by symbol */ |
| 107 | struct symbol_cache { | 154 | struct symbol_cache { |
| @@ -145,51 +192,126 @@ static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) | |||
| 145 | return sc; | 192 | return sc; |
| 146 | } | 193 | } |
| 147 | 194 | ||
| 148 | static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data) | 195 | #define DEFINE_FETCH_symbol(type) \ |
| 149 | { | 196 | static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\ |
| 150 | struct symbol_cache *sc = data; | 197 | void *data, void *dest) \ |
| 151 | 198 | { \ | |
| 152 | if (sc->addr) | 199 | struct symbol_cache *sc = data; \ |
| 153 | return fetch_memory(regs, (void *)sc->addr); | 200 | if (sc->addr) \ |
| 154 | else | 201 | fetch_memory_##type(regs, (void *)sc->addr, dest); \ |
| 155 | return 0; | 202 | else \ |
| 203 | *(type *)dest = 0; \ | ||
| 156 | } | 204 | } |
| 205 | DEFINE_BASIC_FETCH_FUNCS(symbol) | ||
| 157 | 206 | ||
| 158 | /* Special indirect memory access interface */ | 207 | /* Dereference memory access function */ |
| 159 | struct indirect_fetch_data { | 208 | struct deref_fetch_param { |
| 160 | struct fetch_func orig; | 209 | struct fetch_param orig; |
| 161 | long offset; | 210 | long offset; |
| 162 | }; | 211 | }; |
| 163 | 212 | ||
| 164 | static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data) | 213 | #define DEFINE_FETCH_deref(type) \ |
| 165 | { | 214 | static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\ |
| 166 | struct indirect_fetch_data *ind = data; | 215 | void *data, void *dest) \ |
| 167 | unsigned long addr; | 216 | { \ |
| 168 | 217 | struct deref_fetch_param *dprm = data; \ | |
| 169 | addr = call_fetch(&ind->orig, regs); | 218 | unsigned long addr; \ |
| 170 | if (addr) { | 219 | call_fetch(&dprm->orig, regs, &addr); \ |
| 171 | addr += ind->offset; | 220 | if (addr) { \ |
| 172 | return fetch_memory(regs, (void *)addr); | 221 | addr += dprm->offset; \ |
| 173 | } else | 222 | fetch_memory_##type(regs, (void *)addr, dest); \ |
| 174 | return 0; | 223 | } else \ |
| 224 | *(type *)dest = 0; \ | ||
| 175 | } | 225 | } |
| 226 | DEFINE_BASIC_FETCH_FUNCS(deref) | ||
| 176 | 227 | ||
| 177 | static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) | 228 | static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) |
| 178 | { | 229 | { |
| 179 | if (data->orig.func == fetch_indirect) | 230 | if (CHECK_BASIC_FETCH_FUNCS(deref, data->orig.fn)) |
| 180 | free_indirect_fetch_data(data->orig.data); | 231 | free_deref_fetch_param(data->orig.data); |
| 181 | else if (data->orig.func == fetch_symbol) | 232 | else if (CHECK_BASIC_FETCH_FUNCS(symbol, data->orig.fn)) |
| 182 | free_symbol_cache(data->orig.data); | 233 | free_symbol_cache(data->orig.data); |
| 183 | kfree(data); | 234 | kfree(data); |
| 184 | } | 235 | } |
| 185 | 236 | ||
| 237 | /* Default (unsigned long) fetch type */ | ||
| 238 | #define __DEFAULT_FETCH_TYPE(t) u##t | ||
| 239 | #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) | ||
| 240 | #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) | ||
| 241 | #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) | ||
| 242 | |||
| 243 | #define ASSIGN_FETCH_FUNC(kind, type) \ | ||
| 244 | .kind = FETCH_FUNC_NAME(kind, type) | ||
| 245 | |||
| 246 | #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ | ||
| 247 | {.name = #ptype, \ | ||
| 248 | .size = sizeof(ftype), \ | ||
| 249 | .is_signed = sign, \ | ||
| 250 | .print = PRINT_TYPE_FUNC_NAME(ptype), \ | ||
| 251 | .fmt = PRINT_TYPE_FMT_NAME(ptype), \ | ||
| 252 | ASSIGN_FETCH_FUNC(reg, ftype), \ | ||
| 253 | ASSIGN_FETCH_FUNC(stack, ftype), \ | ||
| 254 | ASSIGN_FETCH_FUNC(retval, ftype), \ | ||
| 255 | ASSIGN_FETCH_FUNC(memory, ftype), \ | ||
| 256 | ASSIGN_FETCH_FUNC(symbol, ftype), \ | ||
| 257 | ASSIGN_FETCH_FUNC(deref, ftype), \ | ||
| 258 | } | ||
| 259 | |||
| 260 | /* Fetch type information table */ | ||
| 261 | static const struct fetch_type { | ||
| 262 | const char *name; /* Name of type */ | ||
| 263 | size_t size; /* Byte size of type */ | ||
| 264 | int is_signed; /* Signed flag */ | ||
| 265 | print_type_func_t print; /* Print functions */ | ||
| 266 | const char *fmt; /* Fromat string */ | ||
| 267 | /* Fetch functions */ | ||
| 268 | fetch_func_t reg; | ||
| 269 | fetch_func_t stack; | ||
| 270 | fetch_func_t retval; | ||
| 271 | fetch_func_t memory; | ||
| 272 | fetch_func_t symbol; | ||
| 273 | fetch_func_t deref; | ||
| 274 | } fetch_type_table[] = { | ||
| 275 | ASSIGN_FETCH_TYPE(u8, u8, 0), | ||
| 276 | ASSIGN_FETCH_TYPE(u16, u16, 0), | ||
| 277 | ASSIGN_FETCH_TYPE(u32, u32, 0), | ||
| 278 | ASSIGN_FETCH_TYPE(u64, u64, 0), | ||
| 279 | ASSIGN_FETCH_TYPE(s8, u8, 1), | ||
| 280 | ASSIGN_FETCH_TYPE(s16, u16, 1), | ||
| 281 | ASSIGN_FETCH_TYPE(s32, u32, 1), | ||
| 282 | ASSIGN_FETCH_TYPE(s64, u64, 1), | ||
| 283 | }; | ||
| 284 | |||
| 285 | static const struct fetch_type *find_fetch_type(const char *type) | ||
| 286 | { | ||
| 287 | int i; | ||
| 288 | |||
| 289 | if (!type) | ||
| 290 | type = DEFAULT_FETCH_TYPE_STR; | ||
| 291 | |||
| 292 | for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++) | ||
| 293 | if (strcmp(type, fetch_type_table[i].name) == 0) | ||
| 294 | return &fetch_type_table[i]; | ||
| 295 | return NULL; | ||
| 296 | } | ||
| 297 | |||
| 298 | /* Special function : only accept unsigned long */ | ||
| 299 | static __kprobes void fetch_stack_address(struct pt_regs *regs, | ||
| 300 | void *dummy, void *dest) | ||
| 301 | { | ||
| 302 | *(unsigned long *)dest = kernel_stack_pointer(regs); | ||
| 303 | } | ||
| 304 | |||
| 186 | /** | 305 | /** |
| 187 | * Kprobe event core functions | 306 | * Kprobe event core functions |
| 188 | */ | 307 | */ |
| 189 | 308 | ||
| 190 | struct probe_arg { | 309 | struct probe_arg { |
| 191 | struct fetch_func fetch; | 310 | struct fetch_param fetch; |
| 192 | const char *name; | 311 | unsigned int offset; /* Offset from argument entry */ |
| 312 | const char *name; /* Name of this argument */ | ||
| 313 | const char *comm; /* Command of this argument */ | ||
| 314 | const struct fetch_type *type; /* Type of this argument */ | ||
| 193 | }; | 315 | }; |
| 194 | 316 | ||
| 195 | /* Flags for trace_probe */ | 317 | /* Flags for trace_probe */ |
| @@ -204,6 +326,7 @@ struct trace_probe { | |||
| 204 | const char *symbol; /* symbol name */ | 326 | const char *symbol; /* symbol name */ |
| 205 | struct ftrace_event_call call; | 327 | struct ftrace_event_call call; |
| 206 | struct trace_event event; | 328 | struct trace_event event; |
| 329 | ssize_t size; /* trace entry size */ | ||
| 207 | unsigned int nr_args; | 330 | unsigned int nr_args; |
| 208 | struct probe_arg args[]; | 331 | struct probe_arg args[]; |
| 209 | }; | 332 | }; |
| @@ -212,6 +335,7 @@ struct trace_probe { | |||
| 212 | (offsetof(struct trace_probe, args) + \ | 335 | (offsetof(struct trace_probe, args) + \ |
| 213 | (sizeof(struct probe_arg) * (n))) | 336 | (sizeof(struct probe_arg) * (n))) |
| 214 | 337 | ||
| 338 | |||
| 215 | static __kprobes int probe_is_return(struct trace_probe *tp) | 339 | static __kprobes int probe_is_return(struct trace_probe *tp) |
| 216 | { | 340 | { |
| 217 | return tp->rp.handler != NULL; | 341 | return tp->rp.handler != NULL; |
| @@ -222,49 +346,6 @@ static __kprobes const char *probe_symbol(struct trace_probe *tp) | |||
| 222 | return tp->symbol ? tp->symbol : "unknown"; | 346 | return tp->symbol ? tp->symbol : "unknown"; |
| 223 | } | 347 | } |
| 224 | 348 | ||
| 225 | static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) | ||
| 226 | { | ||
| 227 | int ret = -EINVAL; | ||
| 228 | |||
| 229 | if (ff->func == fetch_register) { | ||
| 230 | const char *name; | ||
| 231 | name = regs_query_register_name((unsigned int)((long)ff->data)); | ||
| 232 | ret = snprintf(buf, n, "%%%s", name); | ||
| 233 | } else if (ff->func == fetch_stack) | ||
| 234 | ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data); | ||
| 235 | else if (ff->func == fetch_memory) | ||
| 236 | ret = snprintf(buf, n, "@0x%p", ff->data); | ||
| 237 | else if (ff->func == fetch_symbol) { | ||
| 238 | struct symbol_cache *sc = ff->data; | ||
| 239 | if (sc->offset) | ||
| 240 | ret = snprintf(buf, n, "@%s%+ld", sc->symbol, | ||
| 241 | sc->offset); | ||
| 242 | else | ||
| 243 | ret = snprintf(buf, n, "@%s", sc->symbol); | ||
| 244 | } else if (ff->func == fetch_retvalue) | ||
| 245 | ret = snprintf(buf, n, "$retval"); | ||
| 246 | else if (ff->func == fetch_stack_address) | ||
| 247 | ret = snprintf(buf, n, "$stack"); | ||
| 248 | else if (ff->func == fetch_indirect) { | ||
| 249 | struct indirect_fetch_data *id = ff->data; | ||
| 250 | size_t l = 0; | ||
| 251 | ret = snprintf(buf, n, "%+ld(", id->offset); | ||
| 252 | if (ret >= n) | ||
| 253 | goto end; | ||
| 254 | l += ret; | ||
| 255 | ret = probe_arg_string(buf + l, n - l, &id->orig); | ||
| 256 | if (ret < 0) | ||
| 257 | goto end; | ||
| 258 | l += ret; | ||
| 259 | ret = snprintf(buf + l, n - l, ")"); | ||
| 260 | ret += l; | ||
| 261 | } | ||
| 262 | end: | ||
| 263 | if (ret >= n) | ||
| 264 | return -ENOSPC; | ||
| 265 | return ret; | ||
| 266 | } | ||
| 267 | |||
| 268 | static int register_probe_event(struct trace_probe *tp); | 349 | static int register_probe_event(struct trace_probe *tp); |
| 269 | static void unregister_probe_event(struct trace_probe *tp); | 350 | static void unregister_probe_event(struct trace_probe *tp); |
| 270 | 351 | ||
| @@ -347,11 +428,12 @@ error: | |||
| 347 | 428 | ||
| 348 | static void free_probe_arg(struct probe_arg *arg) | 429 | static void free_probe_arg(struct probe_arg *arg) |
| 349 | { | 430 | { |
| 350 | if (arg->fetch.func == fetch_symbol) | 431 | if (CHECK_BASIC_FETCH_FUNCS(deref, arg->fetch.fn)) |
| 432 | free_deref_fetch_param(arg->fetch.data); | ||
| 433 | else if (CHECK_BASIC_FETCH_FUNCS(symbol, arg->fetch.fn)) | ||
| 351 | free_symbol_cache(arg->fetch.data); | 434 | free_symbol_cache(arg->fetch.data); |
| 352 | else if (arg->fetch.func == fetch_indirect) | ||
| 353 | free_indirect_fetch_data(arg->fetch.data); | ||
| 354 | kfree(arg->name); | 435 | kfree(arg->name); |
| 436 | kfree(arg->comm); | ||
| 355 | } | 437 | } |
| 356 | 438 | ||
| 357 | static void free_trace_probe(struct trace_probe *tp) | 439 | static void free_trace_probe(struct trace_probe *tp) |
| @@ -457,28 +539,30 @@ static int split_symbol_offset(char *symbol, unsigned long *offset) | |||
| 457 | #define PARAM_MAX_ARGS 16 | 539 | #define PARAM_MAX_ARGS 16 |
| 458 | #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) | 540 | #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) |
| 459 | 541 | ||
| 460 | static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) | 542 | static int parse_probe_vars(char *arg, const struct fetch_type *t, |
| 543 | struct fetch_param *f, int is_return) | ||
| 461 | { | 544 | { |
| 462 | int ret = 0; | 545 | int ret = 0; |
| 463 | unsigned long param; | 546 | unsigned long param; |
| 464 | 547 | ||
| 465 | if (strcmp(arg, "retval") == 0) { | 548 | if (strcmp(arg, "retval") == 0) { |
| 466 | if (is_return) { | 549 | if (is_return) |
| 467 | ff->func = fetch_retvalue; | 550 | f->fn = t->retval; |
| 468 | ff->data = NULL; | 551 | else |
| 469 | } else | ||
| 470 | ret = -EINVAL; | 552 | ret = -EINVAL; |
| 471 | } else if (strncmp(arg, "stack", 5) == 0) { | 553 | } else if (strncmp(arg, "stack", 5) == 0) { |
| 472 | if (arg[5] == '\0') { | 554 | if (arg[5] == '\0') { |
| 473 | ff->func = fetch_stack_address; | 555 | if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0) |
| 474 | ff->data = NULL; | 556 | f->fn = fetch_stack_address; |
| 557 | else | ||
| 558 | ret = -EINVAL; | ||
| 475 | } else if (isdigit(arg[5])) { | 559 | } else if (isdigit(arg[5])) { |
| 476 | ret = strict_strtoul(arg + 5, 10, ¶m); | 560 | ret = strict_strtoul(arg + 5, 10, ¶m); |
| 477 | if (ret || param > PARAM_MAX_STACK) | 561 | if (ret || param > PARAM_MAX_STACK) |
| 478 | ret = -EINVAL; | 562 | ret = -EINVAL; |
| 479 | else { | 563 | else { |
| 480 | ff->func = fetch_stack; | 564 | f->fn = t->stack; |
| 481 | ff->data = (void *)param; | 565 | f->data = (void *)param; |
| 482 | } | 566 | } |
| 483 | } else | 567 | } else |
| 484 | ret = -EINVAL; | 568 | ret = -EINVAL; |
| @@ -488,7 +572,8 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) | |||
| 488 | } | 572 | } |
| 489 | 573 | ||
| 490 | /* Recursive argument parser */ | 574 | /* Recursive argument parser */ |
| 491 | static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) | 575 | static int __parse_probe_arg(char *arg, const struct fetch_type *t, |
| 576 | struct fetch_param *f, int is_return) | ||
| 492 | { | 577 | { |
| 493 | int ret = 0; | 578 | int ret = 0; |
| 494 | unsigned long param; | 579 | unsigned long param; |
| @@ -497,13 +582,13 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) | |||
| 497 | 582 | ||
| 498 | switch (arg[0]) { | 583 | switch (arg[0]) { |
| 499 | case '$': | 584 | case '$': |
| 500 | ret = parse_probe_vars(arg + 1, ff, is_return); | 585 | ret = parse_probe_vars(arg + 1, t, f, is_return); |
| 501 | break; | 586 | break; |
| 502 | case '%': /* named register */ | 587 | case '%': /* named register */ |
| 503 | ret = regs_query_register_offset(arg + 1); | 588 | ret = regs_query_register_offset(arg + 1); |
| 504 | if (ret >= 0) { | 589 | if (ret >= 0) { |
| 505 | ff->func = fetch_register; | 590 | f->fn = t->reg; |
| 506 | ff->data = (void *)(unsigned long)ret; | 591 | f->data = (void *)(unsigned long)ret; |
| 507 | ret = 0; | 592 | ret = 0; |
| 508 | } | 593 | } |
| 509 | break; | 594 | break; |
| @@ -512,26 +597,22 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) | |||
| 512 | ret = strict_strtoul(arg + 1, 0, ¶m); | 597 | ret = strict_strtoul(arg + 1, 0, ¶m); |
| 513 | if (ret) | 598 | if (ret) |
| 514 | break; | 599 | break; |
| 515 | ff->func = fetch_memory; | 600 | f->fn = t->memory; |
| 516 | ff->data = (void *)param; | 601 | f->data = (void *)param; |
| 517 | } else { | 602 | } else { |
| 518 | ret = split_symbol_offset(arg + 1, &offset); | 603 | ret = split_symbol_offset(arg + 1, &offset); |
| 519 | if (ret) | 604 | if (ret) |
| 520 | break; | 605 | break; |
| 521 | ff->data = alloc_symbol_cache(arg + 1, offset); | 606 | f->data = alloc_symbol_cache(arg + 1, offset); |
| 522 | if (ff->data) | 607 | if (f->data) |
| 523 | ff->func = fetch_symbol; | 608 | f->fn = t->symbol; |
| 524 | else | ||
| 525 | ret = -EINVAL; | ||
| 526 | } | 609 | } |
| 527 | break; | 610 | break; |
| 528 | case '+': /* indirect memory */ | 611 | case '+': /* deref memory */ |
| 529 | case '-': | 612 | case '-': |
| 530 | tmp = strchr(arg, '('); | 613 | tmp = strchr(arg, '('); |
| 531 | if (!tmp) { | 614 | if (!tmp) |
| 532 | ret = -EINVAL; | ||
| 533 | break; | 615 | break; |
| 534 | } | ||
| 535 | *tmp = '\0'; | 616 | *tmp = '\0'; |
| 536 | ret = strict_strtol(arg + 1, 0, &offset); | 617 | ret = strict_strtol(arg + 1, 0, &offset); |
| 537 | if (ret) | 618 | if (ret) |
| @@ -541,38 +622,58 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) | |||
| 541 | arg = tmp + 1; | 622 | arg = tmp + 1; |
| 542 | tmp = strrchr(arg, ')'); | 623 | tmp = strrchr(arg, ')'); |
| 543 | if (tmp) { | 624 | if (tmp) { |
| 544 | struct indirect_fetch_data *id; | 625 | struct deref_fetch_param *dprm; |
| 626 | const struct fetch_type *t2 = find_fetch_type(NULL); | ||
| 545 | *tmp = '\0'; | 627 | *tmp = '\0'; |
| 546 | id = kzalloc(sizeof(struct indirect_fetch_data), | 628 | dprm = kzalloc(sizeof(struct deref_fetch_param), |
| 547 | GFP_KERNEL); | 629 | GFP_KERNEL); |
| 548 | if (!id) | 630 | if (!dprm) |
| 549 | return -ENOMEM; | 631 | return -ENOMEM; |
| 550 | id->offset = offset; | 632 | dprm->offset = offset; |
| 551 | ret = __parse_probe_arg(arg, &id->orig, is_return); | 633 | ret = __parse_probe_arg(arg, t2, &dprm->orig, |
| 634 | is_return); | ||
| 552 | if (ret) | 635 | if (ret) |
| 553 | kfree(id); | 636 | kfree(dprm); |
| 554 | else { | 637 | else { |
| 555 | ff->func = fetch_indirect; | 638 | f->fn = t->deref; |
| 556 | ff->data = (void *)id; | 639 | f->data = (void *)dprm; |
| 557 | } | 640 | } |
| 558 | } else | 641 | } |
| 559 | ret = -EINVAL; | ||
| 560 | break; | 642 | break; |
| 561 | default: | ||
| 562 | /* TODO: support custom handler */ | ||
| 563 | ret = -EINVAL; | ||
| 564 | } | 643 | } |
| 644 | if (!ret && !f->fn) | ||
| 645 | ret = -EINVAL; | ||
| 565 | return ret; | 646 | return ret; |
| 566 | } | 647 | } |
| 567 | 648 | ||
| 568 | /* String length checking wrapper */ | 649 | /* String length checking wrapper */ |
| 569 | static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) | 650 | static int parse_probe_arg(char *arg, struct trace_probe *tp, |
| 651 | struct probe_arg *parg, int is_return) | ||
| 570 | { | 652 | { |
| 653 | const char *t; | ||
| 654 | |||
| 571 | if (strlen(arg) > MAX_ARGSTR_LEN) { | 655 | if (strlen(arg) > MAX_ARGSTR_LEN) { |
| 572 | pr_info("Argument is too long.: %s\n", arg); | 656 | pr_info("Argument is too long.: %s\n", arg); |
| 573 | return -ENOSPC; | 657 | return -ENOSPC; |
| 574 | } | 658 | } |
| 575 | return __parse_probe_arg(arg, ff, is_return); | 659 | parg->comm = kstrdup(arg, GFP_KERNEL); |
| 660 | if (!parg->comm) { | ||
| 661 | pr_info("Failed to allocate memory for command '%s'.\n", arg); | ||
| 662 | return -ENOMEM; | ||
| 663 | } | ||
| 664 | t = strchr(parg->comm, ':'); | ||
| 665 | if (t) { | ||
| 666 | arg[t - parg->comm] = '\0'; | ||
| 667 | t++; | ||
| 668 | } | ||
| 669 | parg->type = find_fetch_type(t); | ||
| 670 | if (!parg->type) { | ||
| 671 | pr_info("Unsupported type: %s\n", t); | ||
| 672 | return -EINVAL; | ||
| 673 | } | ||
| 674 | parg->offset = tp->size; | ||
| 675 | tp->size += parg->type->size; | ||
| 676 | return __parse_probe_arg(arg, parg->type, &parg->fetch, is_return); | ||
| 576 | } | 677 | } |
| 577 | 678 | ||
| 578 | /* Return 1 if name is reserved or already used by another argument */ | 679 | /* Return 1 if name is reserved or already used by another argument */ |
| @@ -602,15 +703,18 @@ static int create_trace_probe(int argc, char **argv) | |||
| 602 | * @ADDR : fetch memory at ADDR (ADDR should be in kernel) | 703 | * @ADDR : fetch memory at ADDR (ADDR should be in kernel) |
| 603 | * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) | 704 | * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) |
| 604 | * %REG : fetch register REG | 705 | * %REG : fetch register REG |
| 605 | * Indirect memory fetch: | 706 | * Dereferencing memory fetch: |
| 606 | * +|-offs(ARG) : fetch memory at ARG +|- offs address. | 707 | * +|-offs(ARG) : fetch memory at ARG +|- offs address. |
| 607 | * Alias name of args: | 708 | * Alias name of args: |
| 608 | * NAME=FETCHARG : set NAME as alias of FETCHARG. | 709 | * NAME=FETCHARG : set NAME as alias of FETCHARG. |
| 710 | * Type of args: | ||
| 711 | * FETCHARG:TYPE : use TYPE instead of unsigned long. | ||
| 609 | */ | 712 | */ |
| 610 | struct trace_probe *tp; | 713 | struct trace_probe *tp; |
| 611 | int i, ret = 0; | 714 | int i, ret = 0; |
| 612 | int is_return = 0, is_delete = 0; | 715 | int is_return = 0, is_delete = 0; |
| 613 | char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; | 716 | char *symbol = NULL, *event = NULL, *group = NULL; |
| 717 | char *arg, *tmp; | ||
| 614 | unsigned long offset = 0; | 718 | unsigned long offset = 0; |
| 615 | void *addr = NULL; | 719 | void *addr = NULL; |
| 616 | char buf[MAX_EVENT_NAME_LEN]; | 720 | char buf[MAX_EVENT_NAME_LEN]; |
| @@ -723,13 +827,6 @@ static int create_trace_probe(int argc, char **argv) | |||
| 723 | else | 827 | else |
| 724 | arg = argv[i]; | 828 | arg = argv[i]; |
| 725 | 829 | ||
| 726 | if (conflict_field_name(argv[i], tp->args, i)) { | ||
| 727 | pr_info("Argument%d name '%s' conflicts with " | ||
| 728 | "another field.\n", i, argv[i]); | ||
| 729 | ret = -EINVAL; | ||
| 730 | goto error; | ||
| 731 | } | ||
| 732 | |||
| 733 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); | 830 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); |
| 734 | if (!tp->args[i].name) { | 831 | if (!tp->args[i].name) { |
| 735 | pr_info("Failed to allocate argument%d name '%s'.\n", | 832 | pr_info("Failed to allocate argument%d name '%s'.\n", |
| @@ -737,9 +834,19 @@ static int create_trace_probe(int argc, char **argv) | |||
| 737 | ret = -ENOMEM; | 834 | ret = -ENOMEM; |
| 738 | goto error; | 835 | goto error; |
| 739 | } | 836 | } |
| 837 | tmp = strchr(tp->args[i].name, ':'); | ||
| 838 | if (tmp) | ||
| 839 | *tmp = '_'; /* convert : to _ */ | ||
| 840 | |||
| 841 | if (conflict_field_name(tp->args[i].name, tp->args, i)) { | ||
| 842 | pr_info("Argument%d name '%s' conflicts with " | ||
| 843 | "another field.\n", i, argv[i]); | ||
| 844 | ret = -EINVAL; | ||
| 845 | goto error; | ||
| 846 | } | ||
| 740 | 847 | ||
| 741 | /* Parse fetch argument */ | 848 | /* Parse fetch argument */ |
| 742 | ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); | 849 | ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); |
| 743 | if (ret) { | 850 | if (ret) { |
| 744 | pr_info("Parse error at argument%d. (%d)\n", i, ret); | 851 | pr_info("Parse error at argument%d. (%d)\n", i, ret); |
| 745 | kfree(tp->args[i].name); | 852 | kfree(tp->args[i].name); |
| @@ -794,8 +901,7 @@ static void probes_seq_stop(struct seq_file *m, void *v) | |||
| 794 | static int probes_seq_show(struct seq_file *m, void *v) | 901 | static int probes_seq_show(struct seq_file *m, void *v) |
| 795 | { | 902 | { |
| 796 | struct trace_probe *tp = v; | 903 | struct trace_probe *tp = v; |
| 797 | int i, ret; | 904 | int i; |
| 798 | char buf[MAX_ARGSTR_LEN + 1]; | ||
| 799 | 905 | ||
| 800 | seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); | 906 | seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); |
| 801 | seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); | 907 | seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); |
| @@ -807,15 +913,10 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 807 | else | 913 | else |
| 808 | seq_printf(m, " %s", probe_symbol(tp)); | 914 | seq_printf(m, " %s", probe_symbol(tp)); |
| 809 | 915 | ||
| 810 | for (i = 0; i < tp->nr_args; i++) { | 916 | for (i = 0; i < tp->nr_args; i++) |
| 811 | ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch); | 917 | seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm); |
| 812 | if (ret < 0) { | ||
| 813 | pr_warning("Argument%d decoding error(%d).\n", i, ret); | ||
| 814 | return ret; | ||
| 815 | } | ||
| 816 | seq_printf(m, " %s=%s", tp->args[i].name, buf); | ||
| 817 | } | ||
| 818 | seq_printf(m, "\n"); | 918 | seq_printf(m, "\n"); |
| 919 | |||
| 819 | return 0; | 920 | return 0; |
| 820 | } | 921 | } |
| 821 | 922 | ||
| @@ -945,9 +1046,10 @@ static const struct file_operations kprobe_profile_ops = { | |||
| 945 | static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | 1046 | static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) |
| 946 | { | 1047 | { |
| 947 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1048 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
| 948 | struct kprobe_trace_entry *entry; | 1049 | struct kprobe_trace_entry_head *entry; |
| 949 | struct ring_buffer_event *event; | 1050 | struct ring_buffer_event *event; |
| 950 | struct ring_buffer *buffer; | 1051 | struct ring_buffer *buffer; |
| 1052 | u8 *data; | ||
| 951 | int size, i, pc; | 1053 | int size, i, pc; |
| 952 | unsigned long irq_flags; | 1054 | unsigned long irq_flags; |
| 953 | struct ftrace_event_call *call = &tp->call; | 1055 | struct ftrace_event_call *call = &tp->call; |
| @@ -957,7 +1059,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
| 957 | local_save_flags(irq_flags); | 1059 | local_save_flags(irq_flags); |
| 958 | pc = preempt_count(); | 1060 | pc = preempt_count(); |
| 959 | 1061 | ||
| 960 | size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | 1062 | size = sizeof(*entry) + tp->size; |
| 961 | 1063 | ||
| 962 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, | 1064 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, |
| 963 | irq_flags, pc); | 1065 | irq_flags, pc); |
| @@ -965,10 +1067,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
| 965 | return; | 1067 | return; |
| 966 | 1068 | ||
| 967 | entry = ring_buffer_event_data(event); | 1069 | entry = ring_buffer_event_data(event); |
| 968 | entry->nargs = tp->nr_args; | ||
| 969 | entry->ip = (unsigned long)kp->addr; | 1070 | entry->ip = (unsigned long)kp->addr; |
| 1071 | data = (u8 *)&entry[1]; | ||
| 970 | for (i = 0; i < tp->nr_args; i++) | 1072 | for (i = 0; i < tp->nr_args; i++) |
| 971 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1073 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); |
| 972 | 1074 | ||
| 973 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1075 | if (!filter_current_check_discard(buffer, call, entry, event)) |
| 974 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1076 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
| @@ -979,9 +1081,10 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
| 979 | struct pt_regs *regs) | 1081 | struct pt_regs *regs) |
| 980 | { | 1082 | { |
| 981 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1083 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
| 982 | struct kretprobe_trace_entry *entry; | 1084 | struct kretprobe_trace_entry_head *entry; |
| 983 | struct ring_buffer_event *event; | 1085 | struct ring_buffer_event *event; |
| 984 | struct ring_buffer *buffer; | 1086 | struct ring_buffer *buffer; |
| 1087 | u8 *data; | ||
| 985 | int size, i, pc; | 1088 | int size, i, pc; |
| 986 | unsigned long irq_flags; | 1089 | unsigned long irq_flags; |
| 987 | struct ftrace_event_call *call = &tp->call; | 1090 | struct ftrace_event_call *call = &tp->call; |
| @@ -989,7 +1092,7 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
| 989 | local_save_flags(irq_flags); | 1092 | local_save_flags(irq_flags); |
| 990 | pc = preempt_count(); | 1093 | pc = preempt_count(); |
| 991 | 1094 | ||
| 992 | size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | 1095 | size = sizeof(*entry) + tp->size; |
| 993 | 1096 | ||
| 994 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, | 1097 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, |
| 995 | irq_flags, pc); | 1098 | irq_flags, pc); |
| @@ -997,11 +1100,11 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
| 997 | return; | 1100 | return; |
| 998 | 1101 | ||
| 999 | entry = ring_buffer_event_data(event); | 1102 | entry = ring_buffer_event_data(event); |
| 1000 | entry->nargs = tp->nr_args; | ||
| 1001 | entry->func = (unsigned long)tp->rp.kp.addr; | 1103 | entry->func = (unsigned long)tp->rp.kp.addr; |
| 1002 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1104 | entry->ret_ip = (unsigned long)ri->ret_addr; |
| 1105 | data = (u8 *)&entry[1]; | ||
| 1003 | for (i = 0; i < tp->nr_args; i++) | 1106 | for (i = 0; i < tp->nr_args; i++) |
| 1004 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1107 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); |
| 1005 | 1108 | ||
| 1006 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1109 | if (!filter_current_check_discard(buffer, call, entry, event)) |
| 1007 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1110 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
| @@ -1011,13 +1114,14 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
| 1011 | enum print_line_t | 1114 | enum print_line_t |
| 1012 | print_kprobe_event(struct trace_iterator *iter, int flags) | 1115 | print_kprobe_event(struct trace_iterator *iter, int flags) |
| 1013 | { | 1116 | { |
| 1014 | struct kprobe_trace_entry *field; | 1117 | struct kprobe_trace_entry_head *field; |
| 1015 | struct trace_seq *s = &iter->seq; | 1118 | struct trace_seq *s = &iter->seq; |
| 1016 | struct trace_event *event; | 1119 | struct trace_event *event; |
| 1017 | struct trace_probe *tp; | 1120 | struct trace_probe *tp; |
| 1121 | u8 *data; | ||
| 1018 | int i; | 1122 | int i; |
| 1019 | 1123 | ||
| 1020 | field = (struct kprobe_trace_entry *)iter->ent; | 1124 | field = (struct kprobe_trace_entry_head *)iter->ent; |
| 1021 | event = ftrace_find_event(field->ent.type); | 1125 | event = ftrace_find_event(field->ent.type); |
| 1022 | tp = container_of(event, struct trace_probe, event); | 1126 | tp = container_of(event, struct trace_probe, event); |
| 1023 | 1127 | ||
| @@ -1030,9 +1134,10 @@ print_kprobe_event(struct trace_iterator *iter, int flags) | |||
| 1030 | if (!trace_seq_puts(s, ")")) | 1134 | if (!trace_seq_puts(s, ")")) |
| 1031 | goto partial; | 1135 | goto partial; |
| 1032 | 1136 | ||
| 1033 | for (i = 0; i < field->nargs; i++) | 1137 | data = (u8 *)&field[1]; |
| 1034 | if (!trace_seq_printf(s, " %s=%lx", | 1138 | for (i = 0; i < tp->nr_args; i++) |
| 1035 | tp->args[i].name, field->args[i])) | 1139 | if (!tp->args[i].type->print(s, tp->args[i].name, |
| 1140 | data + tp->args[i].offset)) | ||
| 1036 | goto partial; | 1141 | goto partial; |
| 1037 | 1142 | ||
| 1038 | if (!trace_seq_puts(s, "\n")) | 1143 | if (!trace_seq_puts(s, "\n")) |
| @@ -1046,13 +1151,14 @@ partial: | |||
| 1046 | enum print_line_t | 1151 | enum print_line_t |
| 1047 | print_kretprobe_event(struct trace_iterator *iter, int flags) | 1152 | print_kretprobe_event(struct trace_iterator *iter, int flags) |
| 1048 | { | 1153 | { |
| 1049 | struct kretprobe_trace_entry *field; | 1154 | struct kretprobe_trace_entry_head *field; |
| 1050 | struct trace_seq *s = &iter->seq; | 1155 | struct trace_seq *s = &iter->seq; |
| 1051 | struct trace_event *event; | 1156 | struct trace_event *event; |
| 1052 | struct trace_probe *tp; | 1157 | struct trace_probe *tp; |
| 1158 | u8 *data; | ||
| 1053 | int i; | 1159 | int i; |
| 1054 | 1160 | ||
| 1055 | field = (struct kretprobe_trace_entry *)iter->ent; | 1161 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
| 1056 | event = ftrace_find_event(field->ent.type); | 1162 | event = ftrace_find_event(field->ent.type); |
| 1057 | tp = container_of(event, struct trace_probe, event); | 1163 | tp = container_of(event, struct trace_probe, event); |
| 1058 | 1164 | ||
| @@ -1071,9 +1177,10 @@ print_kretprobe_event(struct trace_iterator *iter, int flags) | |||
| 1071 | if (!trace_seq_puts(s, ")")) | 1177 | if (!trace_seq_puts(s, ")")) |
| 1072 | goto partial; | 1178 | goto partial; |
| 1073 | 1179 | ||
| 1074 | for (i = 0; i < field->nargs; i++) | 1180 | data = (u8 *)&field[1]; |
| 1075 | if (!trace_seq_printf(s, " %s=%lx", | 1181 | for (i = 0; i < tp->nr_args; i++) |
| 1076 | tp->args[i].name, field->args[i])) | 1182 | if (!tp->args[i].type->print(s, tp->args[i].name, |
| 1183 | data + tp->args[i].offset)) | ||
| 1077 | goto partial; | 1184 | goto partial; |
| 1078 | 1185 | ||
| 1079 | if (!trace_seq_puts(s, "\n")) | 1186 | if (!trace_seq_puts(s, "\n")) |
| @@ -1129,29 +1236,43 @@ static int probe_event_raw_init(struct ftrace_event_call *event_call) | |||
| 1129 | static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | 1236 | static int kprobe_event_define_fields(struct ftrace_event_call *event_call) |
| 1130 | { | 1237 | { |
| 1131 | int ret, i; | 1238 | int ret, i; |
| 1132 | struct kprobe_trace_entry field; | 1239 | struct kprobe_trace_entry_head field; |
| 1133 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1240 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
| 1134 | 1241 | ||
| 1135 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1242 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
| 1136 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); | ||
| 1137 | /* Set argument names as fields */ | 1243 | /* Set argument names as fields */ |
| 1138 | for (i = 0; i < tp->nr_args; i++) | 1244 | for (i = 0; i < tp->nr_args; i++) { |
| 1139 | DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); | 1245 | ret = trace_define_field(event_call, tp->args[i].type->name, |
| 1246 | tp->args[i].name, | ||
| 1247 | sizeof(field) + tp->args[i].offset, | ||
| 1248 | tp->args[i].type->size, | ||
| 1249 | tp->args[i].type->is_signed, | ||
| 1250 | FILTER_OTHER); | ||
| 1251 | if (ret) | ||
| 1252 | return ret; | ||
| 1253 | } | ||
| 1140 | return 0; | 1254 | return 0; |
| 1141 | } | 1255 | } |
| 1142 | 1256 | ||
| 1143 | static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | 1257 | static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) |
| 1144 | { | 1258 | { |
| 1145 | int ret, i; | 1259 | int ret, i; |
| 1146 | struct kretprobe_trace_entry field; | 1260 | struct kretprobe_trace_entry_head field; |
| 1147 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1261 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
| 1148 | 1262 | ||
| 1149 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | 1263 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
| 1150 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | 1264 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
| 1151 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); | ||
| 1152 | /* Set argument names as fields */ | 1265 | /* Set argument names as fields */ |
| 1153 | for (i = 0; i < tp->nr_args; i++) | 1266 | for (i = 0; i < tp->nr_args; i++) { |
| 1154 | DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); | 1267 | ret = trace_define_field(event_call, tp->args[i].type->name, |
| 1268 | tp->args[i].name, | ||
| 1269 | sizeof(field) + tp->args[i].offset, | ||
| 1270 | tp->args[i].type->size, | ||
| 1271 | tp->args[i].type->is_signed, | ||
| 1272 | FILTER_OTHER); | ||
| 1273 | if (ret) | ||
| 1274 | return ret; | ||
| 1275 | } | ||
| 1155 | return 0; | 1276 | return 0; |
| 1156 | } | 1277 | } |
| 1157 | 1278 | ||
| @@ -1176,8 +1297,8 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) | |||
| 1176 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); | 1297 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); |
| 1177 | 1298 | ||
| 1178 | for (i = 0; i < tp->nr_args; i++) { | 1299 | for (i = 0; i < tp->nr_args; i++) { |
| 1179 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%%lx", | 1300 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", |
| 1180 | tp->args[i].name); | 1301 | tp->args[i].name, tp->args[i].type->fmt); |
| 1181 | } | 1302 | } |
| 1182 | 1303 | ||
| 1183 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); | 1304 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); |
| @@ -1219,12 +1340,13 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
| 1219 | { | 1340 | { |
| 1220 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1341 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
| 1221 | struct ftrace_event_call *call = &tp->call; | 1342 | struct ftrace_event_call *call = &tp->call; |
| 1222 | struct kprobe_trace_entry *entry; | 1343 | struct kprobe_trace_entry_head *entry; |
| 1344 | u8 *data; | ||
| 1223 | int size, __size, i; | 1345 | int size, __size, i; |
| 1224 | unsigned long irq_flags; | 1346 | unsigned long irq_flags; |
| 1225 | int rctx; | 1347 | int rctx; |
| 1226 | 1348 | ||
| 1227 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | 1349 | __size = sizeof(*entry) + tp->size; |
| 1228 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1350 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
| 1229 | size -= sizeof(u32); | 1351 | size -= sizeof(u32); |
| 1230 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | 1352 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
| @@ -1235,10 +1357,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
| 1235 | if (!entry) | 1357 | if (!entry) |
| 1236 | return; | 1358 | return; |
| 1237 | 1359 | ||
| 1238 | entry->nargs = tp->nr_args; | ||
| 1239 | entry->ip = (unsigned long)kp->addr; | 1360 | entry->ip = (unsigned long)kp->addr; |
| 1361 | data = (u8 *)&entry[1]; | ||
| 1240 | for (i = 0; i < tp->nr_args; i++) | 1362 | for (i = 0; i < tp->nr_args; i++) |
| 1241 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1363 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); |
| 1242 | 1364 | ||
| 1243 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); | 1365 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); |
| 1244 | } | 1366 | } |
| @@ -1249,12 +1371,13 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
| 1249 | { | 1371 | { |
| 1250 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1372 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
| 1251 | struct ftrace_event_call *call = &tp->call; | 1373 | struct ftrace_event_call *call = &tp->call; |
| 1252 | struct kretprobe_trace_entry *entry; | 1374 | struct kretprobe_trace_entry_head *entry; |
| 1375 | u8 *data; | ||
| 1253 | int size, __size, i; | 1376 | int size, __size, i; |
| 1254 | unsigned long irq_flags; | 1377 | unsigned long irq_flags; |
| 1255 | int rctx; | 1378 | int rctx; |
| 1256 | 1379 | ||
| 1257 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | 1380 | __size = sizeof(*entry) + tp->size; |
| 1258 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1381 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
| 1259 | size -= sizeof(u32); | 1382 | size -= sizeof(u32); |
| 1260 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | 1383 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
| @@ -1265,11 +1388,11 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
| 1265 | if (!entry) | 1388 | if (!entry) |
| 1266 | return; | 1389 | return; |
| 1267 | 1390 | ||
| 1268 | entry->nargs = tp->nr_args; | ||
| 1269 | entry->func = (unsigned long)tp->rp.kp.addr; | 1391 | entry->func = (unsigned long)tp->rp.kp.addr; |
| 1270 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1392 | entry->ret_ip = (unsigned long)ri->ret_addr; |
| 1393 | data = (u8 *)&entry[1]; | ||
| 1271 | for (i = 0; i < tp->nr_args; i++) | 1394 | for (i = 0; i < tp->nr_args; i++) |
| 1272 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1395 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); |
| 1273 | 1396 | ||
| 1274 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, | 1397 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, |
| 1275 | irq_flags, regs); | 1398 | irq_flags, regs); |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index d59cd6879477..8eaf00749b65 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c | |||
| @@ -34,12 +34,6 @@ | |||
| 34 | 34 | ||
| 35 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
| 36 | 36 | ||
| 37 | /* | ||
| 38 | * For now, let us restrict the no. of symbols traced simultaneously to number | ||
| 39 | * of available hardware breakpoint registers. | ||
| 40 | */ | ||
| 41 | #define KSYM_TRACER_MAX HBP_NUM | ||
| 42 | |||
| 43 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ | 37 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ |
| 44 | 38 | ||
| 45 | struct trace_ksym { | 39 | struct trace_ksym { |
| @@ -53,7 +47,6 @@ struct trace_ksym { | |||
| 53 | 47 | ||
| 54 | static struct trace_array *ksym_trace_array; | 48 | static struct trace_array *ksym_trace_array; |
| 55 | 49 | ||
| 56 | static unsigned int ksym_filter_entry_count; | ||
| 57 | static unsigned int ksym_tracing_enabled; | 50 | static unsigned int ksym_tracing_enabled; |
| 58 | 51 | ||
| 59 | static HLIST_HEAD(ksym_filter_head); | 52 | static HLIST_HEAD(ksym_filter_head); |
| @@ -181,13 +174,6 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | |||
| 181 | struct trace_ksym *entry; | 174 | struct trace_ksym *entry; |
| 182 | int ret = -ENOMEM; | 175 | int ret = -ENOMEM; |
| 183 | 176 | ||
| 184 | if (ksym_filter_entry_count >= KSYM_TRACER_MAX) { | ||
| 185 | printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No" | ||
| 186 | " new requests for tracing can be accepted now.\n", | ||
| 187 | KSYM_TRACER_MAX); | ||
| 188 | return -ENOSPC; | ||
| 189 | } | ||
| 190 | |||
| 191 | entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); | 177 | entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); |
| 192 | if (!entry) | 178 | if (!entry) |
| 193 | return -ENOMEM; | 179 | return -ENOMEM; |
| @@ -203,13 +189,17 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | |||
| 203 | 189 | ||
| 204 | if (IS_ERR(entry->ksym_hbp)) { | 190 | if (IS_ERR(entry->ksym_hbp)) { |
| 205 | ret = PTR_ERR(entry->ksym_hbp); | 191 | ret = PTR_ERR(entry->ksym_hbp); |
| 206 | printk(KERN_INFO "ksym_tracer request failed. Try again" | 192 | if (ret == -ENOSPC) { |
| 207 | " later!!\n"); | 193 | printk(KERN_ERR "ksym_tracer: Maximum limit reached." |
| 194 | " No new requests for tracing can be accepted now.\n"); | ||
| 195 | } else { | ||
| 196 | printk(KERN_INFO "ksym_tracer request failed. Try again" | ||
| 197 | " later!!\n"); | ||
| 198 | } | ||
| 208 | goto err; | 199 | goto err; |
| 209 | } | 200 | } |
| 210 | 201 | ||
| 211 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); | 202 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); |
| 212 | ksym_filter_entry_count++; | ||
| 213 | 203 | ||
| 214 | return 0; | 204 | return 0; |
| 215 | 205 | ||
| @@ -265,7 +255,6 @@ static void __ksym_trace_reset(void) | |||
| 265 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, | 255 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, |
| 266 | ksym_hlist) { | 256 | ksym_hlist) { |
| 267 | unregister_wide_hw_breakpoint(entry->ksym_hbp); | 257 | unregister_wide_hw_breakpoint(entry->ksym_hbp); |
| 268 | ksym_filter_entry_count--; | ||
| 269 | hlist_del_rcu(&(entry->ksym_hlist)); | 258 | hlist_del_rcu(&(entry->ksym_hlist)); |
| 270 | synchronize_rcu(); | 259 | synchronize_rcu(); |
| 271 | kfree(entry); | 260 | kfree(entry); |
| @@ -338,7 +327,6 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 338 | goto out_unlock; | 327 | goto out_unlock; |
| 339 | } | 328 | } |
| 340 | /* Error or "symbol:---" case: drop it */ | 329 | /* Error or "symbol:---" case: drop it */ |
| 341 | ksym_filter_entry_count--; | ||
| 342 | hlist_del_rcu(&(entry->ksym_hlist)); | 330 | hlist_del_rcu(&(entry->ksym_hlist)); |
| 343 | synchronize_rcu(); | 331 | synchronize_rcu(); |
| 344 | kfree(entry); | 332 | kfree(entry); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 8e46b3323cdc..2404c129a8c9 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -253,7 +253,7 @@ void *trace_seq_reserve(struct trace_seq *s, size_t len) | |||
| 253 | void *ret; | 253 | void *ret; |
| 254 | 254 | ||
| 255 | if (s->full) | 255 | if (s->full) |
| 256 | return 0; | 256 | return NULL; |
| 257 | 257 | ||
| 258 | if (len > ((PAGE_SIZE - 1) - s->len)) { | 258 | if (len > ((PAGE_SIZE - 1) - s->len)) { |
| 259 | s->full = 1; | 259 | s->full = 1; |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 5fca0f51fde4..a55fccfede5d 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -50,8 +50,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static void | 52 | static void |
| 53 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 53 | probe_sched_switch(struct task_struct *prev, struct task_struct *next) |
| 54 | struct task_struct *next) | ||
| 55 | { | 54 | { |
| 56 | struct trace_array_cpu *data; | 55 | struct trace_array_cpu *data; |
| 57 | unsigned long flags; | 56 | unsigned long flags; |
| @@ -109,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
| 109 | } | 108 | } |
| 110 | 109 | ||
| 111 | static void | 110 | static void |
| 112 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) | 111 | probe_sched_wakeup(struct task_struct *wakee, int success) |
| 113 | { | 112 | { |
| 114 | struct trace_array_cpu *data; | 113 | struct trace_array_cpu *data; |
| 115 | unsigned long flags; | 114 | unsigned long flags; |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 0271742abb8d..8052446ceeaa 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -107,8 +107,7 @@ static void probe_wakeup_migrate_task(struct task_struct *task, int cpu) | |||
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | static void notrace | 109 | static void notrace |
| 110 | probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | 110 | probe_wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) |
| 111 | struct task_struct *next) | ||
| 112 | { | 111 | { |
| 113 | struct trace_array_cpu *data; | 112 | struct trace_array_cpu *data; |
| 114 | cycle_t T0, T1, delta; | 113 | cycle_t T0, T1, delta; |
| @@ -200,7 +199,7 @@ static void wakeup_reset(struct trace_array *tr) | |||
| 200 | } | 199 | } |
| 201 | 200 | ||
| 202 | static void | 201 | static void |
| 203 | probe_wakeup(struct rq *rq, struct task_struct *p, int success) | 202 | probe_wakeup(struct task_struct *p, int success) |
| 204 | { | 203 | { |
| 205 | struct trace_array_cpu *data; | 204 | struct trace_array_cpu *data; |
| 206 | int cpu = smp_processor_id(); | 205 | int cpu = smp_processor_id(); |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 81003b4d617f..250e7f9bd2f0 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -17,7 +17,6 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
| 17 | case TRACE_BRANCH: | 17 | case TRACE_BRANCH: |
| 18 | case TRACE_GRAPH_ENT: | 18 | case TRACE_GRAPH_ENT: |
| 19 | case TRACE_GRAPH_RET: | 19 | case TRACE_GRAPH_RET: |
| 20 | case TRACE_HW_BRANCHES: | ||
| 21 | case TRACE_KSYM: | 20 | case TRACE_KSYM: |
| 22 | return 1; | 21 | return 1; |
| 23 | } | 22 | } |
| @@ -30,7 +29,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
| 30 | struct trace_entry *entry; | 29 | struct trace_entry *entry; |
| 31 | unsigned int loops = 0; | 30 | unsigned int loops = 0; |
| 32 | 31 | ||
| 33 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 32 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { |
| 34 | entry = ring_buffer_event_data(event); | 33 | entry = ring_buffer_event_data(event); |
| 35 | 34 | ||
| 36 | /* | 35 | /* |
| @@ -256,7 +255,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
| 256 | /* Maximum number of functions to trace before diagnosing a hang */ | 255 | /* Maximum number of functions to trace before diagnosing a hang */ |
| 257 | #define GRAPH_MAX_FUNC_TEST 100000000 | 256 | #define GRAPH_MAX_FUNC_TEST 100000000 |
| 258 | 257 | ||
| 259 | static void __ftrace_dump(bool disable_tracing); | 258 | static void |
| 259 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); | ||
| 260 | static unsigned int graph_hang_thresh; | 260 | static unsigned int graph_hang_thresh; |
| 261 | 261 | ||
| 262 | /* Wrap the real function entry probe to avoid possible hanging */ | 262 | /* Wrap the real function entry probe to avoid possible hanging */ |
| @@ -267,7 +267,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |||
| 267 | ftrace_graph_stop(); | 267 | ftrace_graph_stop(); |
| 268 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | 268 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); |
| 269 | if (ftrace_dump_on_oops) | 269 | if (ftrace_dump_on_oops) |
| 270 | __ftrace_dump(false); | 270 | __ftrace_dump(false, DUMP_ALL); |
| 271 | return 0; | 271 | return 0; |
| 272 | } | 272 | } |
| 273 | 273 | ||
| @@ -755,62 +755,6 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
| 755 | } | 755 | } |
| 756 | #endif /* CONFIG_BRANCH_TRACER */ | 756 | #endif /* CONFIG_BRANCH_TRACER */ |
| 757 | 757 | ||
| 758 | #ifdef CONFIG_HW_BRANCH_TRACER | ||
| 759 | int | ||
| 760 | trace_selftest_startup_hw_branches(struct tracer *trace, | ||
| 761 | struct trace_array *tr) | ||
| 762 | { | ||
| 763 | struct trace_iterator *iter; | ||
| 764 | struct tracer tracer; | ||
| 765 | unsigned long count; | ||
| 766 | int ret; | ||
| 767 | |||
| 768 | if (!trace->open) { | ||
| 769 | printk(KERN_CONT "missing open function..."); | ||
| 770 | return -1; | ||
| 771 | } | ||
| 772 | |||
| 773 | ret = tracer_init(trace, tr); | ||
| 774 | if (ret) { | ||
| 775 | warn_failed_init_tracer(trace, ret); | ||
| 776 | return ret; | ||
| 777 | } | ||
| 778 | |||
| 779 | /* | ||
| 780 | * The hw-branch tracer needs to collect the trace from the various | ||
| 781 | * cpu trace buffers - before tracing is stopped. | ||
| 782 | */ | ||
| 783 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||
| 784 | if (!iter) | ||
| 785 | return -ENOMEM; | ||
| 786 | |||
| 787 | memcpy(&tracer, trace, sizeof(tracer)); | ||
| 788 | |||
| 789 | iter->trace = &tracer; | ||
| 790 | iter->tr = tr; | ||
| 791 | iter->pos = -1; | ||
| 792 | mutex_init(&iter->mutex); | ||
| 793 | |||
| 794 | trace->open(iter); | ||
| 795 | |||
| 796 | mutex_destroy(&iter->mutex); | ||
| 797 | kfree(iter); | ||
| 798 | |||
| 799 | tracing_stop(); | ||
| 800 | |||
| 801 | ret = trace_test_buffer(tr, &count); | ||
| 802 | trace->reset(tr); | ||
| 803 | tracing_start(); | ||
| 804 | |||
| 805 | if (!ret && !count) { | ||
| 806 | printk(KERN_CONT "no entries found.."); | ||
| 807 | ret = -1; | ||
| 808 | } | ||
| 809 | |||
| 810 | return ret; | ||
| 811 | } | ||
| 812 | #endif /* CONFIG_HW_BRANCH_TRACER */ | ||
| 813 | |||
| 814 | #ifdef CONFIG_KSYM_TRACER | 758 | #ifdef CONFIG_KSYM_TRACER |
| 815 | static int ksym_selftest_dummy; | 759 | static int ksym_selftest_dummy; |
| 816 | 760 | ||
