diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/module.c | 8 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 30 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 179 | ||||
-rw-r--r-- | kernel/trace/ring_buffer_benchmark.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace.c | 127 | ||||
-rw-r--r-- | kernel/trace/trace.h | 27 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 169 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 271 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 7 |
10 files changed, 676 insertions, 149 deletions
diff --git a/kernel/module.c b/kernel/module.c index 0838246d8c94..e2564580f3f1 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -59,8 +59,6 @@ | |||
59 | #define CREATE_TRACE_POINTS | 59 | #define CREATE_TRACE_POINTS |
60 | #include <trace/events/module.h> | 60 | #include <trace/events/module.h> |
61 | 61 | ||
62 | EXPORT_TRACEPOINT_SYMBOL(module_get); | ||
63 | |||
64 | #if 0 | 62 | #if 0 |
65 | #define DEBUGP printk | 63 | #define DEBUGP printk |
66 | #else | 64 | #else |
@@ -515,6 +513,9 @@ MODINFO_ATTR(srcversion); | |||
515 | static char last_unloaded_module[MODULE_NAME_LEN+1]; | 513 | static char last_unloaded_module[MODULE_NAME_LEN+1]; |
516 | 514 | ||
517 | #ifdef CONFIG_MODULE_UNLOAD | 515 | #ifdef CONFIG_MODULE_UNLOAD |
516 | |||
517 | EXPORT_TRACEPOINT_SYMBOL(module_get); | ||
518 | |||
518 | /* Init the unload section of the module. */ | 519 | /* Init the unload section of the module. */ |
519 | static void module_unload_init(struct module *mod) | 520 | static void module_unload_init(struct module *mod) |
520 | { | 521 | { |
@@ -857,8 +858,7 @@ void module_put(struct module *module) | |||
857 | smp_wmb(); /* see comment in module_refcount */ | 858 | smp_wmb(); /* see comment in module_refcount */ |
858 | __this_cpu_inc(module->refptr->decs); | 859 | __this_cpu_inc(module->refptr->decs); |
859 | 860 | ||
860 | trace_module_put(module, _RET_IP_, | 861 | trace_module_put(module, _RET_IP_); |
861 | __this_cpu_read(module->refptr->decs)); | ||
862 | /* Maybe they're waiting for us to drop reference? */ | 862 | /* Maybe they're waiting for us to drop reference? */ |
863 | if (unlikely(!module_is_live(module))) | 863 | if (unlikely(!module_is_live(module))) |
864 | wake_up_process(module->waiter); | 864 | wake_up_process(module->waiter); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index aa3a92b511e2..32837e19e3bd 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -264,6 +264,7 @@ struct ftrace_profile { | |||
264 | unsigned long counter; | 264 | unsigned long counter; |
265 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 265 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
266 | unsigned long long time; | 266 | unsigned long long time; |
267 | unsigned long long time_squared; | ||
267 | #endif | 268 | #endif |
268 | }; | 269 | }; |
269 | 270 | ||
@@ -366,9 +367,9 @@ static int function_stat_headers(struct seq_file *m) | |||
366 | { | 367 | { |
367 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 368 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
368 | seq_printf(m, " Function " | 369 | seq_printf(m, " Function " |
369 | "Hit Time Avg\n" | 370 | "Hit Time Avg s^2\n" |
370 | " -------- " | 371 | " -------- " |
371 | "--- ---- ---\n"); | 372 | "--- ---- --- ---\n"); |
372 | #else | 373 | #else |
373 | seq_printf(m, " Function Hit\n" | 374 | seq_printf(m, " Function Hit\n" |
374 | " -------- ---\n"); | 375 | " -------- ---\n"); |
@@ -384,6 +385,7 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
384 | static DEFINE_MUTEX(mutex); | 385 | static DEFINE_MUTEX(mutex); |
385 | static struct trace_seq s; | 386 | static struct trace_seq s; |
386 | unsigned long long avg; | 387 | unsigned long long avg; |
388 | unsigned long long stddev; | ||
387 | #endif | 389 | #endif |
388 | 390 | ||
389 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 391 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
@@ -394,11 +396,25 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
394 | avg = rec->time; | 396 | avg = rec->time; |
395 | do_div(avg, rec->counter); | 397 | do_div(avg, rec->counter); |
396 | 398 | ||
399 | /* Sample standard deviation (s^2) */ | ||
400 | if (rec->counter <= 1) | ||
401 | stddev = 0; | ||
402 | else { | ||
403 | stddev = rec->time_squared - rec->counter * avg * avg; | ||
404 | /* | ||
405 | * Divide only 1000 for ns^2 -> us^2 conversion. | ||
406 | * trace_print_graph_duration will divide 1000 again. | ||
407 | */ | ||
408 | do_div(stddev, (rec->counter - 1) * 1000); | ||
409 | } | ||
410 | |||
397 | mutex_lock(&mutex); | 411 | mutex_lock(&mutex); |
398 | trace_seq_init(&s); | 412 | trace_seq_init(&s); |
399 | trace_print_graph_duration(rec->time, &s); | 413 | trace_print_graph_duration(rec->time, &s); |
400 | trace_seq_puts(&s, " "); | 414 | trace_seq_puts(&s, " "); |
401 | trace_print_graph_duration(avg, &s); | 415 | trace_print_graph_duration(avg, &s); |
416 | trace_seq_puts(&s, " "); | ||
417 | trace_print_graph_duration(stddev, &s); | ||
402 | trace_print_seq(m, &s); | 418 | trace_print_seq(m, &s); |
403 | mutex_unlock(&mutex); | 419 | mutex_unlock(&mutex); |
404 | #endif | 420 | #endif |
@@ -650,6 +666,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
650 | if (!stat->hash || !ftrace_profile_enabled) | 666 | if (!stat->hash || !ftrace_profile_enabled) |
651 | goto out; | 667 | goto out; |
652 | 668 | ||
669 | /* If the calltime was zero'd ignore it */ | ||
670 | if (!trace->calltime) | ||
671 | goto out; | ||
672 | |||
653 | calltime = trace->rettime - trace->calltime; | 673 | calltime = trace->rettime - trace->calltime; |
654 | 674 | ||
655 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | 675 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { |
@@ -668,8 +688,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
668 | } | 688 | } |
669 | 689 | ||
670 | rec = ftrace_find_profiled_func(stat, trace->func); | 690 | rec = ftrace_find_profiled_func(stat, trace->func); |
671 | if (rec) | 691 | if (rec) { |
672 | rec->time += calltime; | 692 | rec->time += calltime; |
693 | rec->time_squared += calltime * calltime; | ||
694 | } | ||
673 | 695 | ||
674 | out: | 696 | out: |
675 | local_irq_restore(flags); | 697 | local_irq_restore(flags); |
@@ -3338,11 +3360,11 @@ void unregister_ftrace_graph(void) | |||
3338 | goto out; | 3360 | goto out; |
3339 | 3361 | ||
3340 | ftrace_graph_active--; | 3362 | ftrace_graph_active--; |
3341 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); | ||
3342 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 3363 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
3343 | ftrace_graph_entry = ftrace_graph_entry_stub; | 3364 | ftrace_graph_entry = ftrace_graph_entry_stub; |
3344 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 3365 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
3345 | unregister_pm_notifier(&ftrace_suspend_notifier); | 3366 | unregister_pm_notifier(&ftrace_suspend_notifier); |
3367 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); | ||
3346 | 3368 | ||
3347 | out: | 3369 | out: |
3348 | mutex_unlock(&ftrace_lock); | 3370 | mutex_unlock(&ftrace_lock); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 41ca394feb22..7f6059c5aa94 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -319,6 +319,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); | |||
319 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 319 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
320 | #define TS_DELTA_TEST (~TS_MASK) | 320 | #define TS_DELTA_TEST (~TS_MASK) |
321 | 321 | ||
322 | /* Flag when events were overwritten */ | ||
323 | #define RB_MISSED_EVENTS (1 << 31) | ||
324 | /* Missed count stored at end */ | ||
325 | #define RB_MISSED_STORED (1 << 30) | ||
326 | |||
322 | struct buffer_data_page { | 327 | struct buffer_data_page { |
323 | u64 time_stamp; /* page time stamp */ | 328 | u64 time_stamp; /* page time stamp */ |
324 | local_t commit; /* write committed index */ | 329 | local_t commit; /* write committed index */ |
@@ -338,6 +343,7 @@ struct buffer_page { | |||
338 | local_t write; /* index for next write */ | 343 | local_t write; /* index for next write */ |
339 | unsigned read; /* index for next read */ | 344 | unsigned read; /* index for next read */ |
340 | local_t entries; /* entries on this page */ | 345 | local_t entries; /* entries on this page */ |
346 | unsigned long real_end; /* real end of data */ | ||
341 | struct buffer_data_page *page; /* Actual data page */ | 347 | struct buffer_data_page *page; /* Actual data page */ |
342 | }; | 348 | }; |
343 | 349 | ||
@@ -417,6 +423,12 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
417 | (unsigned int)sizeof(field.commit), | 423 | (unsigned int)sizeof(field.commit), |
418 | (unsigned int)is_signed_type(long)); | 424 | (unsigned int)is_signed_type(long)); |
419 | 425 | ||
426 | ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
427 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
428 | (unsigned int)offsetof(typeof(field), commit), | ||
429 | 1, | ||
430 | (unsigned int)is_signed_type(long)); | ||
431 | |||
420 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 432 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
421 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | 433 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
422 | (unsigned int)offsetof(typeof(field), data), | 434 | (unsigned int)offsetof(typeof(field), data), |
@@ -440,6 +452,8 @@ struct ring_buffer_per_cpu { | |||
440 | struct buffer_page *tail_page; /* write to tail */ | 452 | struct buffer_page *tail_page; /* write to tail */ |
441 | struct buffer_page *commit_page; /* committed pages */ | 453 | struct buffer_page *commit_page; /* committed pages */ |
442 | struct buffer_page *reader_page; | 454 | struct buffer_page *reader_page; |
455 | unsigned long lost_events; | ||
456 | unsigned long last_overrun; | ||
443 | local_t commit_overrun; | 457 | local_t commit_overrun; |
444 | local_t overrun; | 458 | local_t overrun; |
445 | local_t entries; | 459 | local_t entries; |
@@ -1762,6 +1776,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1762 | kmemcheck_annotate_bitfield(event, bitfield); | 1776 | kmemcheck_annotate_bitfield(event, bitfield); |
1763 | 1777 | ||
1764 | /* | 1778 | /* |
1779 | * Save the original length to the meta data. | ||
1780 | * This will be used by the reader to add lost event | ||
1781 | * counter. | ||
1782 | */ | ||
1783 | tail_page->real_end = tail; | ||
1784 | |||
1785 | /* | ||
1765 | * If this event is bigger than the minimum size, then | 1786 | * If this event is bigger than the minimum size, then |
1766 | * we need to be careful that we don't subtract the | 1787 | * we need to be careful that we don't subtract the |
1767 | * write counter enough to allow another writer to slip | 1788 | * write counter enough to allow another writer to slip |
@@ -1979,17 +2000,13 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
1979 | u64 *ts, u64 *delta) | 2000 | u64 *ts, u64 *delta) |
1980 | { | 2001 | { |
1981 | struct ring_buffer_event *event; | 2002 | struct ring_buffer_event *event; |
1982 | static int once; | ||
1983 | int ret; | 2003 | int ret; |
1984 | 2004 | ||
1985 | if (unlikely(*delta > (1ULL << 59) && !once++)) { | 2005 | WARN_ONCE(*delta > (1ULL << 59), |
1986 | printk(KERN_WARNING "Delta way too big! %llu" | 2006 | KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", |
1987 | " ts=%llu write stamp = %llu\n", | 2007 | (unsigned long long)*delta, |
1988 | (unsigned long long)*delta, | 2008 | (unsigned long long)*ts, |
1989 | (unsigned long long)*ts, | 2009 | (unsigned long long)cpu_buffer->write_stamp); |
1990 | (unsigned long long)cpu_buffer->write_stamp); | ||
1991 | WARN_ON(1); | ||
1992 | } | ||
1993 | 2010 | ||
1994 | /* | 2011 | /* |
1995 | * The delta is too big, we to add a | 2012 | * The delta is too big, we to add a |
@@ -2838,6 +2855,7 @@ static struct buffer_page * | |||
2838 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 2855 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
2839 | { | 2856 | { |
2840 | struct buffer_page *reader = NULL; | 2857 | struct buffer_page *reader = NULL; |
2858 | unsigned long overwrite; | ||
2841 | unsigned long flags; | 2859 | unsigned long flags; |
2842 | int nr_loops = 0; | 2860 | int nr_loops = 0; |
2843 | int ret; | 2861 | int ret; |
@@ -2879,6 +2897,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2879 | local_set(&cpu_buffer->reader_page->write, 0); | 2897 | local_set(&cpu_buffer->reader_page->write, 0); |
2880 | local_set(&cpu_buffer->reader_page->entries, 0); | 2898 | local_set(&cpu_buffer->reader_page->entries, 0); |
2881 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 2899 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2900 | cpu_buffer->reader_page->real_end = 0; | ||
2882 | 2901 | ||
2883 | spin: | 2902 | spin: |
2884 | /* | 2903 | /* |
@@ -2899,6 +2918,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2899 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); | 2918 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); |
2900 | 2919 | ||
2901 | /* | 2920 | /* |
2921 | * We want to make sure we read the overruns after we set up our | ||
2922 | * pointers to the next object. The writer side does a | ||
2923 | * cmpxchg to cross pages which acts as the mb on the writer | ||
2924 | * side. Note, the reader will constantly fail the swap | ||
2925 | * while the writer is updating the pointers, so this | ||
2926 | * guarantees that the overwrite recorded here is the one we | ||
2927 | * want to compare with the last_overrun. | ||
2928 | */ | ||
2929 | smp_mb(); | ||
2930 | overwrite = local_read(&(cpu_buffer->overrun)); | ||
2931 | |||
2932 | /* | ||
2902 | * Here's the tricky part. | 2933 | * Here's the tricky part. |
2903 | * | 2934 | * |
2904 | * We need to move the pointer past the header page. | 2935 | * We need to move the pointer past the header page. |
@@ -2929,6 +2960,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2929 | cpu_buffer->reader_page = reader; | 2960 | cpu_buffer->reader_page = reader; |
2930 | rb_reset_reader_page(cpu_buffer); | 2961 | rb_reset_reader_page(cpu_buffer); |
2931 | 2962 | ||
2963 | if (overwrite != cpu_buffer->last_overrun) { | ||
2964 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | ||
2965 | cpu_buffer->last_overrun = overwrite; | ||
2966 | } | ||
2967 | |||
2932 | goto again; | 2968 | goto again; |
2933 | 2969 | ||
2934 | out: | 2970 | out: |
@@ -3005,8 +3041,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
3005 | rb_advance_iter(iter); | 3041 | rb_advance_iter(iter); |
3006 | } | 3042 | } |
3007 | 3043 | ||
3044 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) | ||
3045 | { | ||
3046 | return cpu_buffer->lost_events; | ||
3047 | } | ||
3048 | |||
3008 | static struct ring_buffer_event * | 3049 | static struct ring_buffer_event * |
3009 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) | 3050 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, |
3051 | unsigned long *lost_events) | ||
3010 | { | 3052 | { |
3011 | struct ring_buffer_event *event; | 3053 | struct ring_buffer_event *event; |
3012 | struct buffer_page *reader; | 3054 | struct buffer_page *reader; |
@@ -3058,6 +3100,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) | |||
3058 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, | 3100 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, |
3059 | cpu_buffer->cpu, ts); | 3101 | cpu_buffer->cpu, ts); |
3060 | } | 3102 | } |
3103 | if (lost_events) | ||
3104 | *lost_events = rb_lost_events(cpu_buffer); | ||
3061 | return event; | 3105 | return event; |
3062 | 3106 | ||
3063 | default: | 3107 | default: |
@@ -3168,12 +3212,14 @@ static inline int rb_ok_to_lock(void) | |||
3168 | * @buffer: The ring buffer to read | 3212 | * @buffer: The ring buffer to read |
3169 | * @cpu: The cpu to peak at | 3213 | * @cpu: The cpu to peak at |
3170 | * @ts: The timestamp counter of this event. | 3214 | * @ts: The timestamp counter of this event. |
3215 | * @lost_events: a variable to store if events were lost (may be NULL) | ||
3171 | * | 3216 | * |
3172 | * This will return the event that will be read next, but does | 3217 | * This will return the event that will be read next, but does |
3173 | * not consume the data. | 3218 | * not consume the data. |
3174 | */ | 3219 | */ |
3175 | struct ring_buffer_event * | 3220 | struct ring_buffer_event * |
3176 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 3221 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, |
3222 | unsigned long *lost_events) | ||
3177 | { | 3223 | { |
3178 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 3224 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
3179 | struct ring_buffer_event *event; | 3225 | struct ring_buffer_event *event; |
@@ -3188,7 +3234,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3188 | local_irq_save(flags); | 3234 | local_irq_save(flags); |
3189 | if (dolock) | 3235 | if (dolock) |
3190 | spin_lock(&cpu_buffer->reader_lock); | 3236 | spin_lock(&cpu_buffer->reader_lock); |
3191 | event = rb_buffer_peek(cpu_buffer, ts); | 3237 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
3192 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3238 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3193 | rb_advance_reader(cpu_buffer); | 3239 | rb_advance_reader(cpu_buffer); |
3194 | if (dolock) | 3240 | if (dolock) |
@@ -3230,13 +3276,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3230 | /** | 3276 | /** |
3231 | * ring_buffer_consume - return an event and consume it | 3277 | * ring_buffer_consume - return an event and consume it |
3232 | * @buffer: The ring buffer to get the next event from | 3278 | * @buffer: The ring buffer to get the next event from |
3279 | * @cpu: the cpu to read the buffer from | ||
3280 | * @ts: a variable to store the timestamp (may be NULL) | ||
3281 | * @lost_events: a variable to store if events were lost (may be NULL) | ||
3233 | * | 3282 | * |
3234 | * Returns the next event in the ring buffer, and that event is consumed. | 3283 | * Returns the next event in the ring buffer, and that event is consumed. |
3235 | * Meaning, that sequential reads will keep returning a different event, | 3284 | * Meaning, that sequential reads will keep returning a different event, |
3236 | * and eventually empty the ring buffer if the producer is slower. | 3285 | * and eventually empty the ring buffer if the producer is slower. |
3237 | */ | 3286 | */ |
3238 | struct ring_buffer_event * | 3287 | struct ring_buffer_event * |
3239 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 3288 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, |
3289 | unsigned long *lost_events) | ||
3240 | { | 3290 | { |
3241 | struct ring_buffer_per_cpu *cpu_buffer; | 3291 | struct ring_buffer_per_cpu *cpu_buffer; |
3242 | struct ring_buffer_event *event = NULL; | 3292 | struct ring_buffer_event *event = NULL; |
@@ -3257,9 +3307,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3257 | if (dolock) | 3307 | if (dolock) |
3258 | spin_lock(&cpu_buffer->reader_lock); | 3308 | spin_lock(&cpu_buffer->reader_lock); |
3259 | 3309 | ||
3260 | event = rb_buffer_peek(cpu_buffer, ts); | 3310 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
3261 | if (event) | 3311 | if (event) { |
3312 | cpu_buffer->lost_events = 0; | ||
3262 | rb_advance_reader(cpu_buffer); | 3313 | rb_advance_reader(cpu_buffer); |
3314 | } | ||
3263 | 3315 | ||
3264 | if (dolock) | 3316 | if (dolock) |
3265 | spin_unlock(&cpu_buffer->reader_lock); | 3317 | spin_unlock(&cpu_buffer->reader_lock); |
@@ -3276,23 +3328,30 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3276 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | 3328 | EXPORT_SYMBOL_GPL(ring_buffer_consume); |
3277 | 3329 | ||
3278 | /** | 3330 | /** |
3279 | * ring_buffer_read_start - start a non consuming read of the buffer | 3331 | * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer |
3280 | * @buffer: The ring buffer to read from | 3332 | * @buffer: The ring buffer to read from |
3281 | * @cpu: The cpu buffer to iterate over | 3333 | * @cpu: The cpu buffer to iterate over |
3282 | * | 3334 | * |
3283 | * This starts up an iteration through the buffer. It also disables | 3335 | * This performs the initial preparations necessary to iterate |
3284 | * the recording to the buffer until the reading is finished. | 3336 | * through the buffer. Memory is allocated, buffer recording |
3285 | * This prevents the reading from being corrupted. This is not | 3337 | * is disabled, and the iterator pointer is returned to the caller. |
3286 | * a consuming read, so a producer is not expected. | ||
3287 | * | 3338 | * |
3288 | * Must be paired with ring_buffer_finish. | 3339 | * Disabling buffer recordng prevents the reading from being |
3340 | * corrupted. This is not a consuming read, so a producer is not | ||
3341 | * expected. | ||
3342 | * | ||
3343 | * After a sequence of ring_buffer_read_prepare calls, the user is | ||
3344 | * expected to make at least one call to ring_buffer_prepare_sync. | ||
3345 | * Afterwards, ring_buffer_read_start is invoked to get things going | ||
3346 | * for real. | ||
3347 | * | ||
3348 | * This overall must be paired with ring_buffer_finish. | ||
3289 | */ | 3349 | */ |
3290 | struct ring_buffer_iter * | 3350 | struct ring_buffer_iter * |
3291 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | 3351 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) |
3292 | { | 3352 | { |
3293 | struct ring_buffer_per_cpu *cpu_buffer; | 3353 | struct ring_buffer_per_cpu *cpu_buffer; |
3294 | struct ring_buffer_iter *iter; | 3354 | struct ring_buffer_iter *iter; |
3295 | unsigned long flags; | ||
3296 | 3355 | ||
3297 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 3356 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
3298 | return NULL; | 3357 | return NULL; |
@@ -3306,15 +3365,52 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
3306 | iter->cpu_buffer = cpu_buffer; | 3365 | iter->cpu_buffer = cpu_buffer; |
3307 | 3366 | ||
3308 | atomic_inc(&cpu_buffer->record_disabled); | 3367 | atomic_inc(&cpu_buffer->record_disabled); |
3368 | |||
3369 | return iter; | ||
3370 | } | ||
3371 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); | ||
3372 | |||
3373 | /** | ||
3374 | * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls | ||
3375 | * | ||
3376 | * All previously invoked ring_buffer_read_prepare calls to prepare | ||
3377 | * iterators will be synchronized. Afterwards, read_buffer_read_start | ||
3378 | * calls on those iterators are allowed. | ||
3379 | */ | ||
3380 | void | ||
3381 | ring_buffer_read_prepare_sync(void) | ||
3382 | { | ||
3309 | synchronize_sched(); | 3383 | synchronize_sched(); |
3384 | } | ||
3385 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | ||
3386 | |||
3387 | /** | ||
3388 | * ring_buffer_read_start - start a non consuming read of the buffer | ||
3389 | * @iter: The iterator returned by ring_buffer_read_prepare | ||
3390 | * | ||
3391 | * This finalizes the startup of an iteration through the buffer. | ||
3392 | * The iterator comes from a call to ring_buffer_read_prepare and | ||
3393 | * an intervening ring_buffer_read_prepare_sync must have been | ||
3394 | * performed. | ||
3395 | * | ||
3396 | * Must be paired with ring_buffer_finish. | ||
3397 | */ | ||
3398 | void | ||
3399 | ring_buffer_read_start(struct ring_buffer_iter *iter) | ||
3400 | { | ||
3401 | struct ring_buffer_per_cpu *cpu_buffer; | ||
3402 | unsigned long flags; | ||
3403 | |||
3404 | if (!iter) | ||
3405 | return; | ||
3406 | |||
3407 | cpu_buffer = iter->cpu_buffer; | ||
3310 | 3408 | ||
3311 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3409 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3312 | arch_spin_lock(&cpu_buffer->lock); | 3410 | arch_spin_lock(&cpu_buffer->lock); |
3313 | rb_iter_reset(iter); | 3411 | rb_iter_reset(iter); |
3314 | arch_spin_unlock(&cpu_buffer->lock); | 3412 | arch_spin_unlock(&cpu_buffer->lock); |
3315 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3413 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3316 | |||
3317 | return iter; | ||
3318 | } | 3414 | } |
3319 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 3415 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
3320 | 3416 | ||
@@ -3408,6 +3504,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
3408 | cpu_buffer->write_stamp = 0; | 3504 | cpu_buffer->write_stamp = 0; |
3409 | cpu_buffer->read_stamp = 0; | 3505 | cpu_buffer->read_stamp = 0; |
3410 | 3506 | ||
3507 | cpu_buffer->lost_events = 0; | ||
3508 | cpu_buffer->last_overrun = 0; | ||
3509 | |||
3411 | rb_head_page_activate(cpu_buffer); | 3510 | rb_head_page_activate(cpu_buffer); |
3412 | } | 3511 | } |
3413 | 3512 | ||
@@ -3683,6 +3782,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3683 | struct ring_buffer_event *event; | 3782 | struct ring_buffer_event *event; |
3684 | struct buffer_data_page *bpage; | 3783 | struct buffer_data_page *bpage; |
3685 | struct buffer_page *reader; | 3784 | struct buffer_page *reader; |
3785 | unsigned long missed_events; | ||
3686 | unsigned long flags; | 3786 | unsigned long flags; |
3687 | unsigned int commit; | 3787 | unsigned int commit; |
3688 | unsigned int read; | 3788 | unsigned int read; |
@@ -3719,6 +3819,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3719 | read = reader->read; | 3819 | read = reader->read; |
3720 | commit = rb_page_commit(reader); | 3820 | commit = rb_page_commit(reader); |
3721 | 3821 | ||
3822 | /* Check if any events were dropped */ | ||
3823 | missed_events = cpu_buffer->lost_events; | ||
3824 | |||
3722 | /* | 3825 | /* |
3723 | * If this page has been partially read or | 3826 | * If this page has been partially read or |
3724 | * if len is not big enough to read the rest of the page or | 3827 | * if len is not big enough to read the rest of the page or |
@@ -3779,9 +3882,35 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3779 | local_set(&reader->entries, 0); | 3882 | local_set(&reader->entries, 0); |
3780 | reader->read = 0; | 3883 | reader->read = 0; |
3781 | *data_page = bpage; | 3884 | *data_page = bpage; |
3885 | |||
3886 | /* | ||
3887 | * Use the real_end for the data size, | ||
3888 | * This gives us a chance to store the lost events | ||
3889 | * on the page. | ||
3890 | */ | ||
3891 | if (reader->real_end) | ||
3892 | local_set(&bpage->commit, reader->real_end); | ||
3782 | } | 3893 | } |
3783 | ret = read; | 3894 | ret = read; |
3784 | 3895 | ||
3896 | cpu_buffer->lost_events = 0; | ||
3897 | /* | ||
3898 | * Set a flag in the commit field if we lost events | ||
3899 | */ | ||
3900 | if (missed_events) { | ||
3901 | commit = local_read(&bpage->commit); | ||
3902 | |||
3903 | /* If there is room at the end of the page to save the | ||
3904 | * missed events, then record it there. | ||
3905 | */ | ||
3906 | if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { | ||
3907 | memcpy(&bpage->data[commit], &missed_events, | ||
3908 | sizeof(missed_events)); | ||
3909 | local_add(RB_MISSED_STORED, &bpage->commit); | ||
3910 | } | ||
3911 | local_add(RB_MISSED_EVENTS, &bpage->commit); | ||
3912 | } | ||
3913 | |||
3785 | out_unlock: | 3914 | out_unlock: |
3786 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3915 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3787 | 3916 | ||
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index df74c7982255..302f8a614635 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -81,7 +81,7 @@ static enum event_status read_event(int cpu) | |||
81 | int *entry; | 81 | int *entry; |
82 | u64 ts; | 82 | u64 ts; |
83 | 83 | ||
84 | event = ring_buffer_consume(buffer, cpu, &ts); | 84 | event = ring_buffer_consume(buffer, cpu, &ts, NULL); |
85 | if (!event) | 85 | if (!event) |
86 | return EVENT_DROPPED; | 86 | return EVENT_DROPPED; |
87 | 87 | ||
@@ -113,7 +113,8 @@ static enum event_status read_page(int cpu) | |||
113 | ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); | 113 | ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); |
114 | if (ret >= 0) { | 114 | if (ret >= 0) { |
115 | rpage = bpage; | 115 | rpage = bpage; |
116 | commit = local_read(&rpage->commit); | 116 | /* The commit may have missed event flags set, clear them */ |
117 | commit = local_read(&rpage->commit) & 0xfffff; | ||
117 | for (i = 0; i < commit && !kill_test; i += inc) { | 118 | for (i = 0; i < commit && !kill_test; i += inc) { |
118 | 119 | ||
119 | if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { | 120 | if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 44f916a04065..756d7283318b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask; | |||
117 | * | 117 | * |
118 | * It is default off, but you can enable it with either specifying | 118 | * It is default off, but you can enable it with either specifying |
119 | * "ftrace_dump_on_oops" in the kernel command line, or setting | 119 | * "ftrace_dump_on_oops" in the kernel command line, or setting |
120 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | 120 | * /proc/sys/kernel/ftrace_dump_on_oops |
121 | * Set 1 if you want to dump buffers of all CPUs | ||
122 | * Set 2 if you want to dump the buffer of the CPU that triggered oops | ||
121 | */ | 123 | */ |
122 | int ftrace_dump_on_oops; | 124 | |
125 | enum ftrace_dump_mode ftrace_dump_on_oops; | ||
123 | 126 | ||
124 | static int tracing_set_tracer(const char *buf); | 127 | static int tracing_set_tracer(const char *buf); |
125 | 128 | ||
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace); | |||
139 | 142 | ||
140 | static int __init set_ftrace_dump_on_oops(char *str) | 143 | static int __init set_ftrace_dump_on_oops(char *str) |
141 | { | 144 | { |
142 | ftrace_dump_on_oops = 1; | 145 | if (*str++ != '=' || !*str) { |
143 | return 1; | 146 | ftrace_dump_on_oops = DUMP_ALL; |
147 | return 1; | ||
148 | } | ||
149 | |||
150 | if (!strcmp("orig_cpu", str)) { | ||
151 | ftrace_dump_on_oops = DUMP_ORIG; | ||
152 | return 1; | ||
153 | } | ||
154 | |||
155 | return 0; | ||
144 | } | 156 | } |
145 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 157 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
146 | 158 | ||
@@ -1545,7 +1557,8 @@ static void trace_iterator_increment(struct trace_iterator *iter) | |||
1545 | } | 1557 | } |
1546 | 1558 | ||
1547 | static struct trace_entry * | 1559 | static struct trace_entry * |
1548 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | 1560 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, |
1561 | unsigned long *lost_events) | ||
1549 | { | 1562 | { |
1550 | struct ring_buffer_event *event; | 1563 | struct ring_buffer_event *event; |
1551 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1564 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
@@ -1556,7 +1569,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1556 | if (buf_iter) | 1569 | if (buf_iter) |
1557 | event = ring_buffer_iter_peek(buf_iter, ts); | 1570 | event = ring_buffer_iter_peek(buf_iter, ts); |
1558 | else | 1571 | else |
1559 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | 1572 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
1573 | lost_events); | ||
1560 | 1574 | ||
1561 | ftrace_enable_cpu(); | 1575 | ftrace_enable_cpu(); |
1562 | 1576 | ||
@@ -1564,10 +1578,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1564 | } | 1578 | } |
1565 | 1579 | ||
1566 | static struct trace_entry * | 1580 | static struct trace_entry * |
1567 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1581 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
1582 | unsigned long *missing_events, u64 *ent_ts) | ||
1568 | { | 1583 | { |
1569 | struct ring_buffer *buffer = iter->tr->buffer; | 1584 | struct ring_buffer *buffer = iter->tr->buffer; |
1570 | struct trace_entry *ent, *next = NULL; | 1585 | struct trace_entry *ent, *next = NULL; |
1586 | unsigned long lost_events = 0, next_lost = 0; | ||
1571 | int cpu_file = iter->cpu_file; | 1587 | int cpu_file = iter->cpu_file; |
1572 | u64 next_ts = 0, ts; | 1588 | u64 next_ts = 0, ts; |
1573 | int next_cpu = -1; | 1589 | int next_cpu = -1; |
@@ -1580,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1580 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 1596 | if (cpu_file > TRACE_PIPE_ALL_CPU) { |
1581 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 1597 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
1582 | return NULL; | 1598 | return NULL; |
1583 | ent = peek_next_entry(iter, cpu_file, ent_ts); | 1599 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); |
1584 | if (ent_cpu) | 1600 | if (ent_cpu) |
1585 | *ent_cpu = cpu_file; | 1601 | *ent_cpu = cpu_file; |
1586 | 1602 | ||
@@ -1592,7 +1608,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1592 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1608 | if (ring_buffer_empty_cpu(buffer, cpu)) |
1593 | continue; | 1609 | continue; |
1594 | 1610 | ||
1595 | ent = peek_next_entry(iter, cpu, &ts); | 1611 | ent = peek_next_entry(iter, cpu, &ts, &lost_events); |
1596 | 1612 | ||
1597 | /* | 1613 | /* |
1598 | * Pick the entry with the smallest timestamp: | 1614 | * Pick the entry with the smallest timestamp: |
@@ -1601,6 +1617,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1601 | next = ent; | 1617 | next = ent; |
1602 | next_cpu = cpu; | 1618 | next_cpu = cpu; |
1603 | next_ts = ts; | 1619 | next_ts = ts; |
1620 | next_lost = lost_events; | ||
1604 | } | 1621 | } |
1605 | } | 1622 | } |
1606 | 1623 | ||
@@ -1610,6 +1627,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1610 | if (ent_ts) | 1627 | if (ent_ts) |
1611 | *ent_ts = next_ts; | 1628 | *ent_ts = next_ts; |
1612 | 1629 | ||
1630 | if (missing_events) | ||
1631 | *missing_events = next_lost; | ||
1632 | |||
1613 | return next; | 1633 | return next; |
1614 | } | 1634 | } |
1615 | 1635 | ||
@@ -1617,13 +1637,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1617 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 1637 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
1618 | int *ent_cpu, u64 *ent_ts) | 1638 | int *ent_cpu, u64 *ent_ts) |
1619 | { | 1639 | { |
1620 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1640 | return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
1621 | } | 1641 | } |
1622 | 1642 | ||
1623 | /* Find the next real entry, and increment the iterator to the next entry */ | 1643 | /* Find the next real entry, and increment the iterator to the next entry */ |
1624 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1644 | static void *find_next_entry_inc(struct trace_iterator *iter) |
1625 | { | 1645 | { |
1626 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1646 | iter->ent = __find_next_entry(iter, &iter->cpu, |
1647 | &iter->lost_events, &iter->ts); | ||
1627 | 1648 | ||
1628 | if (iter->ent) | 1649 | if (iter->ent) |
1629 | trace_iterator_increment(iter); | 1650 | trace_iterator_increment(iter); |
@@ -1635,7 +1656,8 @@ static void trace_consume(struct trace_iterator *iter) | |||
1635 | { | 1656 | { |
1636 | /* Don't allow ftrace to trace into the ring buffers */ | 1657 | /* Don't allow ftrace to trace into the ring buffers */ |
1637 | ftrace_disable_cpu(); | 1658 | ftrace_disable_cpu(); |
1638 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); | 1659 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
1660 | &iter->lost_events); | ||
1639 | ftrace_enable_cpu(); | 1661 | ftrace_enable_cpu(); |
1640 | } | 1662 | } |
1641 | 1663 | ||
@@ -1786,7 +1808,7 @@ static void print_func_help_header(struct seq_file *m) | |||
1786 | } | 1808 | } |
1787 | 1809 | ||
1788 | 1810 | ||
1789 | static void | 1811 | void |
1790 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 1812 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
1791 | { | 1813 | { |
1792 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1814 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
@@ -1995,7 +2017,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
1995 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; | 2017 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; |
1996 | } | 2018 | } |
1997 | 2019 | ||
1998 | static int trace_empty(struct trace_iterator *iter) | 2020 | int trace_empty(struct trace_iterator *iter) |
1999 | { | 2021 | { |
2000 | int cpu; | 2022 | int cpu; |
2001 | 2023 | ||
@@ -2030,6 +2052,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2030 | { | 2052 | { |
2031 | enum print_line_t ret; | 2053 | enum print_line_t ret; |
2032 | 2054 | ||
2055 | if (iter->lost_events) | ||
2056 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | ||
2057 | iter->cpu, iter->lost_events); | ||
2058 | |||
2033 | if (iter->trace && iter->trace->print_line) { | 2059 | if (iter->trace && iter->trace->print_line) { |
2034 | ret = iter->trace->print_line(iter); | 2060 | ret = iter->trace->print_line(iter); |
2035 | if (ret != TRACE_TYPE_UNHANDLED) | 2061 | if (ret != TRACE_TYPE_UNHANDLED) |
@@ -2058,6 +2084,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2058 | return print_trace_fmt(iter); | 2084 | return print_trace_fmt(iter); |
2059 | } | 2085 | } |
2060 | 2086 | ||
2087 | void trace_default_header(struct seq_file *m) | ||
2088 | { | ||
2089 | struct trace_iterator *iter = m->private; | ||
2090 | |||
2091 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
2092 | /* print nothing if the buffers are empty */ | ||
2093 | if (trace_empty(iter)) | ||
2094 | return; | ||
2095 | print_trace_header(m, iter); | ||
2096 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2097 | print_lat_help_header(m); | ||
2098 | } else { | ||
2099 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2100 | print_func_help_header(m); | ||
2101 | } | ||
2102 | } | ||
2103 | |||
2061 | static int s_show(struct seq_file *m, void *v) | 2104 | static int s_show(struct seq_file *m, void *v) |
2062 | { | 2105 | { |
2063 | struct trace_iterator *iter = v; | 2106 | struct trace_iterator *iter = v; |
@@ -2070,17 +2113,9 @@ static int s_show(struct seq_file *m, void *v) | |||
2070 | } | 2113 | } |
2071 | if (iter->trace && iter->trace->print_header) | 2114 | if (iter->trace && iter->trace->print_header) |
2072 | iter->trace->print_header(m); | 2115 | iter->trace->print_header(m); |
2073 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2116 | else |
2074 | /* print nothing if the buffers are empty */ | 2117 | trace_default_header(m); |
2075 | if (trace_empty(iter)) | 2118 | |
2076 | return 0; | ||
2077 | print_trace_header(m, iter); | ||
2078 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2079 | print_lat_help_header(m); | ||
2080 | } else { | ||
2081 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2082 | print_func_help_header(m); | ||
2083 | } | ||
2084 | } else if (iter->leftover) { | 2119 | } else if (iter->leftover) { |
2085 | /* | 2120 | /* |
2086 | * If we filled the seq_file buffer earlier, we | 2121 | * If we filled the seq_file buffer earlier, we |
@@ -2166,15 +2201,20 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2166 | 2201 | ||
2167 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2202 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
2168 | for_each_tracing_cpu(cpu) { | 2203 | for_each_tracing_cpu(cpu) { |
2169 | |||
2170 | iter->buffer_iter[cpu] = | 2204 | iter->buffer_iter[cpu] = |
2171 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2205 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
2206 | } | ||
2207 | ring_buffer_read_prepare_sync(); | ||
2208 | for_each_tracing_cpu(cpu) { | ||
2209 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
2172 | tracing_iter_reset(iter, cpu); | 2210 | tracing_iter_reset(iter, cpu); |
2173 | } | 2211 | } |
2174 | } else { | 2212 | } else { |
2175 | cpu = iter->cpu_file; | 2213 | cpu = iter->cpu_file; |
2176 | iter->buffer_iter[cpu] = | 2214 | iter->buffer_iter[cpu] = |
2177 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2215 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
2216 | ring_buffer_read_prepare_sync(); | ||
2217 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
2178 | tracing_iter_reset(iter, cpu); | 2218 | tracing_iter_reset(iter, cpu); |
2179 | } | 2219 | } |
2180 | 2220 | ||
@@ -4324,7 +4364,7 @@ static int trace_panic_handler(struct notifier_block *this, | |||
4324 | unsigned long event, void *unused) | 4364 | unsigned long event, void *unused) |
4325 | { | 4365 | { |
4326 | if (ftrace_dump_on_oops) | 4366 | if (ftrace_dump_on_oops) |
4327 | ftrace_dump(); | 4367 | ftrace_dump(ftrace_dump_on_oops); |
4328 | return NOTIFY_OK; | 4368 | return NOTIFY_OK; |
4329 | } | 4369 | } |
4330 | 4370 | ||
@@ -4341,7 +4381,7 @@ static int trace_die_handler(struct notifier_block *self, | |||
4341 | switch (val) { | 4381 | switch (val) { |
4342 | case DIE_OOPS: | 4382 | case DIE_OOPS: |
4343 | if (ftrace_dump_on_oops) | 4383 | if (ftrace_dump_on_oops) |
4344 | ftrace_dump(); | 4384 | ftrace_dump(ftrace_dump_on_oops); |
4345 | break; | 4385 | break; |
4346 | default: | 4386 | default: |
4347 | break; | 4387 | break; |
@@ -4382,7 +4422,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4382 | trace_seq_init(s); | 4422 | trace_seq_init(s); |
4383 | } | 4423 | } |
4384 | 4424 | ||
4385 | static void __ftrace_dump(bool disable_tracing) | 4425 | static void |
4426 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | ||
4386 | { | 4427 | { |
4387 | static arch_spinlock_t ftrace_dump_lock = | 4428 | static arch_spinlock_t ftrace_dump_lock = |
4388 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 4429 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
@@ -4415,12 +4456,25 @@ static void __ftrace_dump(bool disable_tracing) | |||
4415 | /* don't look at user memory in panic mode */ | 4456 | /* don't look at user memory in panic mode */ |
4416 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4457 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
4417 | 4458 | ||
4418 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
4419 | |||
4420 | /* Simulate the iterator */ | 4459 | /* Simulate the iterator */ |
4421 | iter.tr = &global_trace; | 4460 | iter.tr = &global_trace; |
4422 | iter.trace = current_trace; | 4461 | iter.trace = current_trace; |
4423 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 4462 | |
4463 | switch (oops_dump_mode) { | ||
4464 | case DUMP_ALL: | ||
4465 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
4466 | break; | ||
4467 | case DUMP_ORIG: | ||
4468 | iter.cpu_file = raw_smp_processor_id(); | ||
4469 | break; | ||
4470 | case DUMP_NONE: | ||
4471 | goto out_enable; | ||
4472 | default: | ||
4473 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | ||
4474 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
4475 | } | ||
4476 | |||
4477 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
4424 | 4478 | ||
4425 | /* | 4479 | /* |
4426 | * We need to stop all tracing on all CPUS to read the | 4480 | * We need to stop all tracing on all CPUS to read the |
@@ -4459,6 +4513,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4459 | else | 4513 | else |
4460 | printk(KERN_TRACE "---------------------------------\n"); | 4514 | printk(KERN_TRACE "---------------------------------\n"); |
4461 | 4515 | ||
4516 | out_enable: | ||
4462 | /* Re-enable tracing if requested */ | 4517 | /* Re-enable tracing if requested */ |
4463 | if (!disable_tracing) { | 4518 | if (!disable_tracing) { |
4464 | trace_flags |= old_userobj; | 4519 | trace_flags |= old_userobj; |
@@ -4475,9 +4530,9 @@ static void __ftrace_dump(bool disable_tracing) | |||
4475 | } | 4530 | } |
4476 | 4531 | ||
4477 | /* By default: disable tracing after the dump */ | 4532 | /* By default: disable tracing after the dump */ |
4478 | void ftrace_dump(void) | 4533 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
4479 | { | 4534 | { |
4480 | __ftrace_dump(true); | 4535 | __ftrace_dump(true, oops_dump_mode); |
4481 | } | 4536 | } |
4482 | 4537 | ||
4483 | __init static int tracer_alloc_buffers(void) | 4538 | __init static int tracer_alloc_buffers(void) |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2825ef2c0b15..911e9864e94a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -378,6 +378,9 @@ void trace_function(struct trace_array *tr, | |||
378 | unsigned long ip, | 378 | unsigned long ip, |
379 | unsigned long parent_ip, | 379 | unsigned long parent_ip, |
380 | unsigned long flags, int pc); | 380 | unsigned long flags, int pc); |
381 | void trace_default_header(struct seq_file *m); | ||
382 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); | ||
383 | int trace_empty(struct trace_iterator *iter); | ||
381 | 384 | ||
382 | void trace_graph_return(struct ftrace_graph_ret *trace); | 385 | void trace_graph_return(struct ftrace_graph_ret *trace); |
383 | int trace_graph_entry(struct ftrace_graph_ent *trace); | 386 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
@@ -491,9 +494,29 @@ extern int trace_clock_id; | |||
491 | 494 | ||
492 | /* Standard output formatting function used for function return traces */ | 495 | /* Standard output formatting function used for function return traces */ |
493 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 496 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
494 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | 497 | |
498 | /* Flag options */ | ||
499 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | ||
500 | #define TRACE_GRAPH_PRINT_CPU 0x2 | ||
501 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | ||
502 | #define TRACE_GRAPH_PRINT_PROC 0x8 | ||
503 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | ||
504 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | ||
505 | |||
506 | extern enum print_line_t | ||
507 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | ||
508 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); | ||
495 | extern enum print_line_t | 509 | extern enum print_line_t |
496 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | 510 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
511 | extern void graph_trace_open(struct trace_iterator *iter); | ||
512 | extern void graph_trace_close(struct trace_iterator *iter); | ||
513 | extern int __trace_graph_entry(struct trace_array *tr, | ||
514 | struct ftrace_graph_ent *trace, | ||
515 | unsigned long flags, int pc); | ||
516 | extern void __trace_graph_return(struct trace_array *tr, | ||
517 | struct ftrace_graph_ret *trace, | ||
518 | unsigned long flags, int pc); | ||
519 | |||
497 | 520 | ||
498 | #ifdef CONFIG_DYNAMIC_FTRACE | 521 | #ifdef CONFIG_DYNAMIC_FTRACE |
499 | /* TODO: make this variable */ | 522 | /* TODO: make this variable */ |
@@ -524,7 +547,7 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
524 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 547 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
525 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | 548 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
526 | static inline enum print_line_t | 549 | static inline enum print_line_t |
527 | print_graph_function(struct trace_iterator *iter) | 550 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
528 | { | 551 | { |
529 | return TRACE_TYPE_UNHANDLED; | 552 | return TRACE_TYPE_UNHANDLED; |
530 | } | 553 | } |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 9aed1a5cf553..dd11c830eb84 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -40,7 +40,7 @@ struct fgraph_data { | |||
40 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 40 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
41 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 41 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
42 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 42 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
43 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | 43 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
44 | 44 | ||
45 | static struct tracer_opt trace_opts[] = { | 45 | static struct tracer_opt trace_opts[] = { |
46 | /* Display overruns? (for self-debug purpose) */ | 46 | /* Display overruns? (for self-debug purpose) */ |
@@ -179,7 +179,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |||
179 | return ret; | 179 | return ret; |
180 | } | 180 | } |
181 | 181 | ||
182 | static int __trace_graph_entry(struct trace_array *tr, | 182 | int __trace_graph_entry(struct trace_array *tr, |
183 | struct ftrace_graph_ent *trace, | 183 | struct ftrace_graph_ent *trace, |
184 | unsigned long flags, | 184 | unsigned long flags, |
185 | int pc) | 185 | int pc) |
@@ -246,7 +246,7 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | |||
246 | return trace_graph_entry(trace); | 246 | return trace_graph_entry(trace); |
247 | } | 247 | } |
248 | 248 | ||
249 | static void __trace_graph_return(struct trace_array *tr, | 249 | void __trace_graph_return(struct trace_array *tr, |
250 | struct ftrace_graph_ret *trace, | 250 | struct ftrace_graph_ret *trace, |
251 | unsigned long flags, | 251 | unsigned long flags, |
252 | int pc) | 252 | int pc) |
@@ -490,9 +490,10 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
490 | * We need to consume the current entry to see | 490 | * We need to consume the current entry to see |
491 | * the next one. | 491 | * the next one. |
492 | */ | 492 | */ |
493 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | 493 | ring_buffer_consume(iter->tr->buffer, iter->cpu, |
494 | NULL, NULL); | ||
494 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 495 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, |
495 | NULL); | 496 | NULL, NULL); |
496 | } | 497 | } |
497 | 498 | ||
498 | if (!event) | 499 | if (!event) |
@@ -526,17 +527,18 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
526 | 527 | ||
527 | /* Signal a overhead of time execution to the output */ | 528 | /* Signal a overhead of time execution to the output */ |
528 | static int | 529 | static int |
529 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | 530 | print_graph_overhead(unsigned long long duration, struct trace_seq *s, |
531 | u32 flags) | ||
530 | { | 532 | { |
531 | /* If duration disappear, we don't need anything */ | 533 | /* If duration disappear, we don't need anything */ |
532 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | 534 | if (!(flags & TRACE_GRAPH_PRINT_DURATION)) |
533 | return 1; | 535 | return 1; |
534 | 536 | ||
535 | /* Non nested entry or return */ | 537 | /* Non nested entry or return */ |
536 | if (duration == -1) | 538 | if (duration == -1) |
537 | return trace_seq_printf(s, " "); | 539 | return trace_seq_printf(s, " "); |
538 | 540 | ||
539 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 541 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { |
540 | /* Duration exceeded 100 msecs */ | 542 | /* Duration exceeded 100 msecs */ |
541 | if (duration > 100000ULL) | 543 | if (duration > 100000ULL) |
542 | return trace_seq_printf(s, "! "); | 544 | return trace_seq_printf(s, "! "); |
@@ -562,7 +564,7 @@ static int print_graph_abs_time(u64 t, struct trace_seq *s) | |||
562 | 564 | ||
563 | static enum print_line_t | 565 | static enum print_line_t |
564 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, | 566 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
565 | enum trace_type type, int cpu, pid_t pid) | 567 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
566 | { | 568 | { |
567 | int ret; | 569 | int ret; |
568 | struct trace_seq *s = &iter->seq; | 570 | struct trace_seq *s = &iter->seq; |
@@ -572,21 +574,21 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
572 | return TRACE_TYPE_UNHANDLED; | 574 | return TRACE_TYPE_UNHANDLED; |
573 | 575 | ||
574 | /* Absolute time */ | 576 | /* Absolute time */ |
575 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 577 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
576 | ret = print_graph_abs_time(iter->ts, s); | 578 | ret = print_graph_abs_time(iter->ts, s); |
577 | if (!ret) | 579 | if (!ret) |
578 | return TRACE_TYPE_PARTIAL_LINE; | 580 | return TRACE_TYPE_PARTIAL_LINE; |
579 | } | 581 | } |
580 | 582 | ||
581 | /* Cpu */ | 583 | /* Cpu */ |
582 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 584 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
583 | ret = print_graph_cpu(s, cpu); | 585 | ret = print_graph_cpu(s, cpu); |
584 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 586 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
585 | return TRACE_TYPE_PARTIAL_LINE; | 587 | return TRACE_TYPE_PARTIAL_LINE; |
586 | } | 588 | } |
587 | 589 | ||
588 | /* Proc */ | 590 | /* Proc */ |
589 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 591 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
590 | ret = print_graph_proc(s, pid); | 592 | ret = print_graph_proc(s, pid); |
591 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 593 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
592 | return TRACE_TYPE_PARTIAL_LINE; | 594 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -596,7 +598,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
596 | } | 598 | } |
597 | 599 | ||
598 | /* No overhead */ | 600 | /* No overhead */ |
599 | ret = print_graph_overhead(-1, s); | 601 | ret = print_graph_overhead(-1, s, flags); |
600 | if (!ret) | 602 | if (!ret) |
601 | return TRACE_TYPE_PARTIAL_LINE; | 603 | return TRACE_TYPE_PARTIAL_LINE; |
602 | 604 | ||
@@ -609,7 +611,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
609 | return TRACE_TYPE_PARTIAL_LINE; | 611 | return TRACE_TYPE_PARTIAL_LINE; |
610 | 612 | ||
611 | /* Don't close the duration column if haven't one */ | 613 | /* Don't close the duration column if haven't one */ |
612 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 614 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
613 | trace_seq_printf(s, " |"); | 615 | trace_seq_printf(s, " |"); |
614 | ret = trace_seq_printf(s, "\n"); | 616 | ret = trace_seq_printf(s, "\n"); |
615 | 617 | ||
@@ -679,7 +681,8 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
679 | static enum print_line_t | 681 | static enum print_line_t |
680 | print_graph_entry_leaf(struct trace_iterator *iter, | 682 | print_graph_entry_leaf(struct trace_iterator *iter, |
681 | struct ftrace_graph_ent_entry *entry, | 683 | struct ftrace_graph_ent_entry *entry, |
682 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | 684 | struct ftrace_graph_ret_entry *ret_entry, |
685 | struct trace_seq *s, u32 flags) | ||
683 | { | 686 | { |
684 | struct fgraph_data *data = iter->private; | 687 | struct fgraph_data *data = iter->private; |
685 | struct ftrace_graph_ret *graph_ret; | 688 | struct ftrace_graph_ret *graph_ret; |
@@ -711,12 +714,12 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
711 | } | 714 | } |
712 | 715 | ||
713 | /* Overhead */ | 716 | /* Overhead */ |
714 | ret = print_graph_overhead(duration, s); | 717 | ret = print_graph_overhead(duration, s, flags); |
715 | if (!ret) | 718 | if (!ret) |
716 | return TRACE_TYPE_PARTIAL_LINE; | 719 | return TRACE_TYPE_PARTIAL_LINE; |
717 | 720 | ||
718 | /* Duration */ | 721 | /* Duration */ |
719 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 722 | if (flags & TRACE_GRAPH_PRINT_DURATION) { |
720 | ret = print_graph_duration(duration, s); | 723 | ret = print_graph_duration(duration, s); |
721 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 724 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
722 | return TRACE_TYPE_PARTIAL_LINE; | 725 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -739,7 +742,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
739 | static enum print_line_t | 742 | static enum print_line_t |
740 | print_graph_entry_nested(struct trace_iterator *iter, | 743 | print_graph_entry_nested(struct trace_iterator *iter, |
741 | struct ftrace_graph_ent_entry *entry, | 744 | struct ftrace_graph_ent_entry *entry, |
742 | struct trace_seq *s, int cpu) | 745 | struct trace_seq *s, int cpu, u32 flags) |
743 | { | 746 | { |
744 | struct ftrace_graph_ent *call = &entry->graph_ent; | 747 | struct ftrace_graph_ent *call = &entry->graph_ent; |
745 | struct fgraph_data *data = iter->private; | 748 | struct fgraph_data *data = iter->private; |
@@ -759,12 +762,12 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
759 | } | 762 | } |
760 | 763 | ||
761 | /* No overhead */ | 764 | /* No overhead */ |
762 | ret = print_graph_overhead(-1, s); | 765 | ret = print_graph_overhead(-1, s, flags); |
763 | if (!ret) | 766 | if (!ret) |
764 | return TRACE_TYPE_PARTIAL_LINE; | 767 | return TRACE_TYPE_PARTIAL_LINE; |
765 | 768 | ||
766 | /* No time */ | 769 | /* No time */ |
767 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 770 | if (flags & TRACE_GRAPH_PRINT_DURATION) { |
768 | ret = trace_seq_printf(s, " | "); | 771 | ret = trace_seq_printf(s, " | "); |
769 | if (!ret) | 772 | if (!ret) |
770 | return TRACE_TYPE_PARTIAL_LINE; | 773 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -790,7 +793,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
790 | 793 | ||
791 | static enum print_line_t | 794 | static enum print_line_t |
792 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | 795 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
793 | int type, unsigned long addr) | 796 | int type, unsigned long addr, u32 flags) |
794 | { | 797 | { |
795 | struct fgraph_data *data = iter->private; | 798 | struct fgraph_data *data = iter->private; |
796 | struct trace_entry *ent = iter->ent; | 799 | struct trace_entry *ent = iter->ent; |
@@ -803,27 +806,27 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
803 | 806 | ||
804 | if (type) { | 807 | if (type) { |
805 | /* Interrupt */ | 808 | /* Interrupt */ |
806 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid); | 809 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
807 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 810 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
808 | return TRACE_TYPE_PARTIAL_LINE; | 811 | return TRACE_TYPE_PARTIAL_LINE; |
809 | } | 812 | } |
810 | 813 | ||
811 | /* Absolute time */ | 814 | /* Absolute time */ |
812 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 815 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
813 | ret = print_graph_abs_time(iter->ts, s); | 816 | ret = print_graph_abs_time(iter->ts, s); |
814 | if (!ret) | 817 | if (!ret) |
815 | return TRACE_TYPE_PARTIAL_LINE; | 818 | return TRACE_TYPE_PARTIAL_LINE; |
816 | } | 819 | } |
817 | 820 | ||
818 | /* Cpu */ | 821 | /* Cpu */ |
819 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 822 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
820 | ret = print_graph_cpu(s, cpu); | 823 | ret = print_graph_cpu(s, cpu); |
821 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 824 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
822 | return TRACE_TYPE_PARTIAL_LINE; | 825 | return TRACE_TYPE_PARTIAL_LINE; |
823 | } | 826 | } |
824 | 827 | ||
825 | /* Proc */ | 828 | /* Proc */ |
826 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 829 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
827 | ret = print_graph_proc(s, ent->pid); | 830 | ret = print_graph_proc(s, ent->pid); |
828 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 831 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
829 | return TRACE_TYPE_PARTIAL_LINE; | 832 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -845,7 +848,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
845 | 848 | ||
846 | static enum print_line_t | 849 | static enum print_line_t |
847 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 850 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
848 | struct trace_iterator *iter) | 851 | struct trace_iterator *iter, u32 flags) |
849 | { | 852 | { |
850 | struct fgraph_data *data = iter->private; | 853 | struct fgraph_data *data = iter->private; |
851 | struct ftrace_graph_ent *call = &field->graph_ent; | 854 | struct ftrace_graph_ent *call = &field->graph_ent; |
@@ -853,14 +856,14 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
853 | static enum print_line_t ret; | 856 | static enum print_line_t ret; |
854 | int cpu = iter->cpu; | 857 | int cpu = iter->cpu; |
855 | 858 | ||
856 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) | 859 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) |
857 | return TRACE_TYPE_PARTIAL_LINE; | 860 | return TRACE_TYPE_PARTIAL_LINE; |
858 | 861 | ||
859 | leaf_ret = get_return_for_leaf(iter, field); | 862 | leaf_ret = get_return_for_leaf(iter, field); |
860 | if (leaf_ret) | 863 | if (leaf_ret) |
861 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s); | 864 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
862 | else | 865 | else |
863 | ret = print_graph_entry_nested(iter, field, s, cpu); | 866 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
864 | 867 | ||
865 | if (data) { | 868 | if (data) { |
866 | /* | 869 | /* |
@@ -879,7 +882,8 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
879 | 882 | ||
880 | static enum print_line_t | 883 | static enum print_line_t |
881 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 884 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
882 | struct trace_entry *ent, struct trace_iterator *iter) | 885 | struct trace_entry *ent, struct trace_iterator *iter, |
886 | u32 flags) | ||
883 | { | 887 | { |
884 | unsigned long long duration = trace->rettime - trace->calltime; | 888 | unsigned long long duration = trace->rettime - trace->calltime; |
885 | struct fgraph_data *data = iter->private; | 889 | struct fgraph_data *data = iter->private; |
@@ -909,16 +913,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
909 | } | 913 | } |
910 | } | 914 | } |
911 | 915 | ||
912 | if (print_graph_prologue(iter, s, 0, 0)) | 916 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
913 | return TRACE_TYPE_PARTIAL_LINE; | 917 | return TRACE_TYPE_PARTIAL_LINE; |
914 | 918 | ||
915 | /* Overhead */ | 919 | /* Overhead */ |
916 | ret = print_graph_overhead(duration, s); | 920 | ret = print_graph_overhead(duration, s, flags); |
917 | if (!ret) | 921 | if (!ret) |
918 | return TRACE_TYPE_PARTIAL_LINE; | 922 | return TRACE_TYPE_PARTIAL_LINE; |
919 | 923 | ||
920 | /* Duration */ | 924 | /* Duration */ |
921 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 925 | if (flags & TRACE_GRAPH_PRINT_DURATION) { |
922 | ret = print_graph_duration(duration, s); | 926 | ret = print_graph_duration(duration, s); |
923 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 927 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
924 | return TRACE_TYPE_PARTIAL_LINE; | 928 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -948,14 +952,15 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
948 | } | 952 | } |
949 | 953 | ||
950 | /* Overrun */ | 954 | /* Overrun */ |
951 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | 955 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { |
952 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 956 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", |
953 | trace->overrun); | 957 | trace->overrun); |
954 | if (!ret) | 958 | if (!ret) |
955 | return TRACE_TYPE_PARTIAL_LINE; | 959 | return TRACE_TYPE_PARTIAL_LINE; |
956 | } | 960 | } |
957 | 961 | ||
958 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); | 962 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
963 | cpu, pid, flags); | ||
959 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 964 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
960 | return TRACE_TYPE_PARTIAL_LINE; | 965 | return TRACE_TYPE_PARTIAL_LINE; |
961 | 966 | ||
@@ -963,8 +968,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
963 | } | 968 | } |
964 | 969 | ||
965 | static enum print_line_t | 970 | static enum print_line_t |
966 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | 971 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
967 | struct trace_iterator *iter) | 972 | struct trace_iterator *iter, u32 flags) |
968 | { | 973 | { |
969 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 974 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
970 | struct fgraph_data *data = iter->private; | 975 | struct fgraph_data *data = iter->private; |
@@ -976,16 +981,16 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
976 | if (data) | 981 | if (data) |
977 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; | 982 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
978 | 983 | ||
979 | if (print_graph_prologue(iter, s, 0, 0)) | 984 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
980 | return TRACE_TYPE_PARTIAL_LINE; | 985 | return TRACE_TYPE_PARTIAL_LINE; |
981 | 986 | ||
982 | /* No overhead */ | 987 | /* No overhead */ |
983 | ret = print_graph_overhead(-1, s); | 988 | ret = print_graph_overhead(-1, s, flags); |
984 | if (!ret) | 989 | if (!ret) |
985 | return TRACE_TYPE_PARTIAL_LINE; | 990 | return TRACE_TYPE_PARTIAL_LINE; |
986 | 991 | ||
987 | /* No time */ | 992 | /* No time */ |
988 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 993 | if (flags & TRACE_GRAPH_PRINT_DURATION) { |
989 | ret = trace_seq_printf(s, " | "); | 994 | ret = trace_seq_printf(s, " | "); |
990 | if (!ret) | 995 | if (!ret) |
991 | return TRACE_TYPE_PARTIAL_LINE; | 996 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -1040,7 +1045,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1040 | 1045 | ||
1041 | 1046 | ||
1042 | enum print_line_t | 1047 | enum print_line_t |
1043 | print_graph_function(struct trace_iterator *iter) | 1048 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
1044 | { | 1049 | { |
1045 | struct ftrace_graph_ent_entry *field; | 1050 | struct ftrace_graph_ent_entry *field; |
1046 | struct fgraph_data *data = iter->private; | 1051 | struct fgraph_data *data = iter->private; |
@@ -1061,7 +1066,7 @@ print_graph_function(struct trace_iterator *iter) | |||
1061 | if (data && data->failed) { | 1066 | if (data && data->failed) { |
1062 | field = &data->ent; | 1067 | field = &data->ent; |
1063 | iter->cpu = data->cpu; | 1068 | iter->cpu = data->cpu; |
1064 | ret = print_graph_entry(field, s, iter); | 1069 | ret = print_graph_entry(field, s, iter, flags); |
1065 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { | 1070 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1066 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | 1071 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; |
1067 | ret = TRACE_TYPE_NO_CONSUME; | 1072 | ret = TRACE_TYPE_NO_CONSUME; |
@@ -1081,32 +1086,49 @@ print_graph_function(struct trace_iterator *iter) | |||
1081 | struct ftrace_graph_ent_entry saved; | 1086 | struct ftrace_graph_ent_entry saved; |
1082 | trace_assign_type(field, entry); | 1087 | trace_assign_type(field, entry); |
1083 | saved = *field; | 1088 | saved = *field; |
1084 | return print_graph_entry(&saved, s, iter); | 1089 | return print_graph_entry(&saved, s, iter, flags); |
1085 | } | 1090 | } |
1086 | case TRACE_GRAPH_RET: { | 1091 | case TRACE_GRAPH_RET: { |
1087 | struct ftrace_graph_ret_entry *field; | 1092 | struct ftrace_graph_ret_entry *field; |
1088 | trace_assign_type(field, entry); | 1093 | trace_assign_type(field, entry); |
1089 | return print_graph_return(&field->ret, s, entry, iter); | 1094 | return print_graph_return(&field->ret, s, entry, iter, flags); |
1090 | } | 1095 | } |
1096 | case TRACE_STACK: | ||
1097 | case TRACE_FN: | ||
1098 | /* dont trace stack and functions as comments */ | ||
1099 | return TRACE_TYPE_UNHANDLED; | ||
1100 | |||
1091 | default: | 1101 | default: |
1092 | return print_graph_comment(s, entry, iter); | 1102 | return print_graph_comment(s, entry, iter, flags); |
1093 | } | 1103 | } |
1094 | 1104 | ||
1095 | return TRACE_TYPE_HANDLED; | 1105 | return TRACE_TYPE_HANDLED; |
1096 | } | 1106 | } |
1097 | 1107 | ||
1098 | static void print_lat_header(struct seq_file *s) | 1108 | static enum print_line_t |
1109 | print_graph_function(struct trace_iterator *iter) | ||
1110 | { | ||
1111 | return print_graph_function_flags(iter, tracer_flags.val); | ||
1112 | } | ||
1113 | |||
1114 | static enum print_line_t | ||
1115 | print_graph_function_event(struct trace_iterator *iter, int flags) | ||
1116 | { | ||
1117 | return print_graph_function(iter); | ||
1118 | } | ||
1119 | |||
1120 | static void print_lat_header(struct seq_file *s, u32 flags) | ||
1099 | { | 1121 | { |
1100 | static const char spaces[] = " " /* 16 spaces */ | 1122 | static const char spaces[] = " " /* 16 spaces */ |
1101 | " " /* 4 spaces */ | 1123 | " " /* 4 spaces */ |
1102 | " "; /* 17 spaces */ | 1124 | " "; /* 17 spaces */ |
1103 | int size = 0; | 1125 | int size = 0; |
1104 | 1126 | ||
1105 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1127 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1106 | size += 16; | 1128 | size += 16; |
1107 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1129 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1108 | size += 4; | 1130 | size += 4; |
1109 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1131 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1110 | size += 17; | 1132 | size += 17; |
1111 | 1133 | ||
1112 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | 1134 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); |
@@ -1117,43 +1139,48 @@ static void print_lat_header(struct seq_file *s) | |||
1117 | seq_printf(s, "#%.*s|||| / \n", size, spaces); | 1139 | seq_printf(s, "#%.*s|||| / \n", size, spaces); |
1118 | } | 1140 | } |
1119 | 1141 | ||
1120 | static void print_graph_headers(struct seq_file *s) | 1142 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
1121 | { | 1143 | { |
1122 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; | 1144 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
1123 | 1145 | ||
1124 | if (lat) | 1146 | if (lat) |
1125 | print_lat_header(s); | 1147 | print_lat_header(s, flags); |
1126 | 1148 | ||
1127 | /* 1st line */ | 1149 | /* 1st line */ |
1128 | seq_printf(s, "#"); | 1150 | seq_printf(s, "#"); |
1129 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1151 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1130 | seq_printf(s, " TIME "); | 1152 | seq_printf(s, " TIME "); |
1131 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1153 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1132 | seq_printf(s, " CPU"); | 1154 | seq_printf(s, " CPU"); |
1133 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1155 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1134 | seq_printf(s, " TASK/PID "); | 1156 | seq_printf(s, " TASK/PID "); |
1135 | if (lat) | 1157 | if (lat) |
1136 | seq_printf(s, "|||||"); | 1158 | seq_printf(s, "|||||"); |
1137 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 1159 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1138 | seq_printf(s, " DURATION "); | 1160 | seq_printf(s, " DURATION "); |
1139 | seq_printf(s, " FUNCTION CALLS\n"); | 1161 | seq_printf(s, " FUNCTION CALLS\n"); |
1140 | 1162 | ||
1141 | /* 2nd line */ | 1163 | /* 2nd line */ |
1142 | seq_printf(s, "#"); | 1164 | seq_printf(s, "#"); |
1143 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1165 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1144 | seq_printf(s, " | "); | 1166 | seq_printf(s, " | "); |
1145 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1167 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1146 | seq_printf(s, " | "); | 1168 | seq_printf(s, " | "); |
1147 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1169 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1148 | seq_printf(s, " | | "); | 1170 | seq_printf(s, " | | "); |
1149 | if (lat) | 1171 | if (lat) |
1150 | seq_printf(s, "|||||"); | 1172 | seq_printf(s, "|||||"); |
1151 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 1173 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1152 | seq_printf(s, " | | "); | 1174 | seq_printf(s, " | | "); |
1153 | seq_printf(s, " | | | |\n"); | 1175 | seq_printf(s, " | | | |\n"); |
1154 | } | 1176 | } |
1155 | 1177 | ||
1156 | static void graph_trace_open(struct trace_iterator *iter) | 1178 | void print_graph_headers(struct seq_file *s) |
1179 | { | ||
1180 | print_graph_headers_flags(s, tracer_flags.val); | ||
1181 | } | ||
1182 | |||
1183 | void graph_trace_open(struct trace_iterator *iter) | ||
1157 | { | 1184 | { |
1158 | /* pid and depth on the last trace processed */ | 1185 | /* pid and depth on the last trace processed */ |
1159 | struct fgraph_data *data; | 1186 | struct fgraph_data *data; |
@@ -1188,7 +1215,7 @@ static void graph_trace_open(struct trace_iterator *iter) | |||
1188 | pr_warning("function graph tracer: not enough memory\n"); | 1215 | pr_warning("function graph tracer: not enough memory\n"); |
1189 | } | 1216 | } |
1190 | 1217 | ||
1191 | static void graph_trace_close(struct trace_iterator *iter) | 1218 | void graph_trace_close(struct trace_iterator *iter) |
1192 | { | 1219 | { |
1193 | struct fgraph_data *data = iter->private; | 1220 | struct fgraph_data *data = iter->private; |
1194 | 1221 | ||
@@ -1198,6 +1225,16 @@ static void graph_trace_close(struct trace_iterator *iter) | |||
1198 | } | 1225 | } |
1199 | } | 1226 | } |
1200 | 1227 | ||
1228 | static struct trace_event graph_trace_entry_event = { | ||
1229 | .type = TRACE_GRAPH_ENT, | ||
1230 | .trace = print_graph_function_event, | ||
1231 | }; | ||
1232 | |||
1233 | static struct trace_event graph_trace_ret_event = { | ||
1234 | .type = TRACE_GRAPH_RET, | ||
1235 | .trace = print_graph_function_event, | ||
1236 | }; | ||
1237 | |||
1201 | static struct tracer graph_trace __read_mostly = { | 1238 | static struct tracer graph_trace __read_mostly = { |
1202 | .name = "function_graph", | 1239 | .name = "function_graph", |
1203 | .open = graph_trace_open, | 1240 | .open = graph_trace_open, |
@@ -1219,6 +1256,16 @@ static __init int init_graph_trace(void) | |||
1219 | { | 1256 | { |
1220 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | 1257 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1221 | 1258 | ||
1259 | if (!register_ftrace_event(&graph_trace_entry_event)) { | ||
1260 | pr_warning("Warning: could not register graph trace events\n"); | ||
1261 | return 1; | ||
1262 | } | ||
1263 | |||
1264 | if (!register_ftrace_event(&graph_trace_ret_event)) { | ||
1265 | pr_warning("Warning: could not register graph trace events\n"); | ||
1266 | return 1; | ||
1267 | } | ||
1268 | |||
1222 | return register_tracer(&graph_trace); | 1269 | return register_tracer(&graph_trace); |
1223 | } | 1270 | } |
1224 | 1271 | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2974bc7538c7..6fd486e0cef4 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -34,6 +34,9 @@ static int trace_type __read_mostly; | |||
34 | 34 | ||
35 | static int save_lat_flag; | 35 | static int save_lat_flag; |
36 | 36 | ||
37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); | ||
38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | ||
39 | |||
37 | #ifdef CONFIG_PREEMPT_TRACER | 40 | #ifdef CONFIG_PREEMPT_TRACER |
38 | static inline int | 41 | static inline int |
39 | preempt_trace(void) | 42 | preempt_trace(void) |
@@ -55,6 +58,23 @@ irq_trace(void) | |||
55 | # define irq_trace() (0) | 58 | # define irq_trace() (0) |
56 | #endif | 59 | #endif |
57 | 60 | ||
61 | #define TRACE_DISPLAY_GRAPH 1 | ||
62 | |||
63 | static struct tracer_opt trace_opts[] = { | ||
64 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
65 | /* display latency trace as call graph */ | ||
66 | { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, | ||
67 | #endif | ||
68 | { } /* Empty entry */ | ||
69 | }; | ||
70 | |||
71 | static struct tracer_flags tracer_flags = { | ||
72 | .val = 0, | ||
73 | .opts = trace_opts, | ||
74 | }; | ||
75 | |||
76 | #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) | ||
77 | |||
58 | /* | 78 | /* |
59 | * Sequence count - we record it when starting a measurement and | 79 | * Sequence count - we record it when starting a measurement and |
60 | * skip the latency if the sequence has changed - some other section | 80 | * skip the latency if the sequence has changed - some other section |
@@ -108,6 +128,202 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
108 | }; | 128 | }; |
109 | #endif /* CONFIG_FUNCTION_TRACER */ | 129 | #endif /* CONFIG_FUNCTION_TRACER */ |
110 | 130 | ||
131 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
132 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | ||
133 | { | ||
134 | int cpu; | ||
135 | |||
136 | if (!(bit & TRACE_DISPLAY_GRAPH)) | ||
137 | return -EINVAL; | ||
138 | |||
139 | if (!(is_graph() ^ set)) | ||
140 | return 0; | ||
141 | |||
142 | stop_irqsoff_tracer(irqsoff_trace, !set); | ||
143 | |||
144 | for_each_possible_cpu(cpu) | ||
145 | per_cpu(tracing_cpu, cpu) = 0; | ||
146 | |||
147 | tracing_max_latency = 0; | ||
148 | tracing_reset_online_cpus(irqsoff_trace); | ||
149 | |||
150 | return start_irqsoff_tracer(irqsoff_trace, set); | ||
151 | } | ||
152 | |||
153 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | ||
154 | { | ||
155 | struct trace_array *tr = irqsoff_trace; | ||
156 | struct trace_array_cpu *data; | ||
157 | unsigned long flags; | ||
158 | long disabled; | ||
159 | int ret; | ||
160 | int cpu; | ||
161 | int pc; | ||
162 | |||
163 | cpu = raw_smp_processor_id(); | ||
164 | if (likely(!per_cpu(tracing_cpu, cpu))) | ||
165 | return 0; | ||
166 | |||
167 | local_save_flags(flags); | ||
168 | /* slight chance to get a false positive on tracing_cpu */ | ||
169 | if (!irqs_disabled_flags(flags)) | ||
170 | return 0; | ||
171 | |||
172 | data = tr->data[cpu]; | ||
173 | disabled = atomic_inc_return(&data->disabled); | ||
174 | |||
175 | if (likely(disabled == 1)) { | ||
176 | pc = preempt_count(); | ||
177 | ret = __trace_graph_entry(tr, trace, flags, pc); | ||
178 | } else | ||
179 | ret = 0; | ||
180 | |||
181 | atomic_dec(&data->disabled); | ||
182 | return ret; | ||
183 | } | ||
184 | |||
185 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) | ||
186 | { | ||
187 | struct trace_array *tr = irqsoff_trace; | ||
188 | struct trace_array_cpu *data; | ||
189 | unsigned long flags; | ||
190 | long disabled; | ||
191 | int cpu; | ||
192 | int pc; | ||
193 | |||
194 | cpu = raw_smp_processor_id(); | ||
195 | if (likely(!per_cpu(tracing_cpu, cpu))) | ||
196 | return; | ||
197 | |||
198 | local_save_flags(flags); | ||
199 | /* slight chance to get a false positive on tracing_cpu */ | ||
200 | if (!irqs_disabled_flags(flags)) | ||
201 | return; | ||
202 | |||
203 | data = tr->data[cpu]; | ||
204 | disabled = atomic_inc_return(&data->disabled); | ||
205 | |||
206 | if (likely(disabled == 1)) { | ||
207 | pc = preempt_count(); | ||
208 | __trace_graph_return(tr, trace, flags, pc); | ||
209 | } | ||
210 | |||
211 | atomic_dec(&data->disabled); | ||
212 | } | ||
213 | |||
214 | static void irqsoff_trace_open(struct trace_iterator *iter) | ||
215 | { | ||
216 | if (is_graph()) | ||
217 | graph_trace_open(iter); | ||
218 | |||
219 | } | ||
220 | |||
221 | static void irqsoff_trace_close(struct trace_iterator *iter) | ||
222 | { | ||
223 | if (iter->private) | ||
224 | graph_trace_close(iter); | ||
225 | } | ||
226 | |||
227 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ | ||
228 | TRACE_GRAPH_PRINT_PROC) | ||
229 | |||
230 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | ||
231 | { | ||
232 | u32 flags = GRAPH_TRACER_FLAGS; | ||
233 | |||
234 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
235 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
236 | else | ||
237 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
238 | |||
239 | /* | ||
240 | * In graph mode call the graph tracer output function, | ||
241 | * otherwise go with the TRACE_FN event handler | ||
242 | */ | ||
243 | if (is_graph()) | ||
244 | return print_graph_function_flags(iter, flags); | ||
245 | |||
246 | return TRACE_TYPE_UNHANDLED; | ||
247 | } | ||
248 | |||
249 | static void irqsoff_print_header(struct seq_file *s) | ||
250 | { | ||
251 | if (is_graph()) { | ||
252 | struct trace_iterator *iter = s->private; | ||
253 | u32 flags = GRAPH_TRACER_FLAGS; | ||
254 | |||
255 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | ||
256 | /* print nothing if the buffers are empty */ | ||
257 | if (trace_empty(iter)) | ||
258 | return; | ||
259 | |||
260 | print_trace_header(s, iter); | ||
261 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
262 | } else | ||
263 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
264 | |||
265 | print_graph_headers_flags(s, flags); | ||
266 | } else | ||
267 | trace_default_header(s); | ||
268 | } | ||
269 | |||
270 | static void | ||
271 | trace_graph_function(struct trace_array *tr, | ||
272 | unsigned long ip, unsigned long flags, int pc) | ||
273 | { | ||
274 | u64 time = trace_clock_local(); | ||
275 | struct ftrace_graph_ent ent = { | ||
276 | .func = ip, | ||
277 | .depth = 0, | ||
278 | }; | ||
279 | struct ftrace_graph_ret ret = { | ||
280 | .func = ip, | ||
281 | .depth = 0, | ||
282 | .calltime = time, | ||
283 | .rettime = time, | ||
284 | }; | ||
285 | |||
286 | __trace_graph_entry(tr, &ent, flags, pc); | ||
287 | __trace_graph_return(tr, &ret, flags, pc); | ||
288 | } | ||
289 | |||
290 | static void | ||
291 | __trace_function(struct trace_array *tr, | ||
292 | unsigned long ip, unsigned long parent_ip, | ||
293 | unsigned long flags, int pc) | ||
294 | { | ||
295 | if (!is_graph()) | ||
296 | trace_function(tr, ip, parent_ip, flags, pc); | ||
297 | else { | ||
298 | trace_graph_function(tr, parent_ip, flags, pc); | ||
299 | trace_graph_function(tr, ip, flags, pc); | ||
300 | } | ||
301 | } | ||
302 | |||
303 | #else | ||
304 | #define __trace_function trace_function | ||
305 | |||
306 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | ||
307 | { | ||
308 | return -EINVAL; | ||
309 | } | ||
310 | |||
311 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | ||
312 | { | ||
313 | return -1; | ||
314 | } | ||
315 | |||
316 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | ||
317 | { | ||
318 | return TRACE_TYPE_UNHANDLED; | ||
319 | } | ||
320 | |||
321 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } | ||
322 | static void irqsoff_print_header(struct seq_file *s) { } | ||
323 | static void irqsoff_trace_open(struct trace_iterator *iter) { } | ||
324 | static void irqsoff_trace_close(struct trace_iterator *iter) { } | ||
325 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
326 | |||
111 | /* | 327 | /* |
112 | * Should this new latency be reported/recorded? | 328 | * Should this new latency be reported/recorded? |
113 | */ | 329 | */ |
@@ -150,7 +366,7 @@ check_critical_timing(struct trace_array *tr, | |||
150 | if (!report_latency(delta)) | 366 | if (!report_latency(delta)) |
151 | goto out_unlock; | 367 | goto out_unlock; |
152 | 368 | ||
153 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 369 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
154 | /* Skip 5 functions to get to the irq/preempt enable function */ | 370 | /* Skip 5 functions to get to the irq/preempt enable function */ |
155 | __trace_stack(tr, flags, 5, pc); | 371 | __trace_stack(tr, flags, 5, pc); |
156 | 372 | ||
@@ -172,7 +388,7 @@ out_unlock: | |||
172 | out: | 388 | out: |
173 | data->critical_sequence = max_sequence; | 389 | data->critical_sequence = max_sequence; |
174 | data->preempt_timestamp = ftrace_now(cpu); | 390 | data->preempt_timestamp = ftrace_now(cpu); |
175 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 391 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
176 | } | 392 | } |
177 | 393 | ||
178 | static inline void | 394 | static inline void |
@@ -204,7 +420,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
204 | 420 | ||
205 | local_save_flags(flags); | 421 | local_save_flags(flags); |
206 | 422 | ||
207 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 423 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
208 | 424 | ||
209 | per_cpu(tracing_cpu, cpu) = 1; | 425 | per_cpu(tracing_cpu, cpu) = 1; |
210 | 426 | ||
@@ -238,7 +454,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
238 | atomic_inc(&data->disabled); | 454 | atomic_inc(&data->disabled); |
239 | 455 | ||
240 | local_save_flags(flags); | 456 | local_save_flags(flags); |
241 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 457 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
242 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 458 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
243 | data->critical_start = 0; | 459 | data->critical_start = 0; |
244 | atomic_dec(&data->disabled); | 460 | atomic_dec(&data->disabled); |
@@ -347,19 +563,32 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) | |||
347 | } | 563 | } |
348 | #endif /* CONFIG_PREEMPT_TRACER */ | 564 | #endif /* CONFIG_PREEMPT_TRACER */ |
349 | 565 | ||
350 | static void start_irqsoff_tracer(struct trace_array *tr) | 566 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) |
351 | { | 567 | { |
352 | register_ftrace_function(&trace_ops); | 568 | int ret = 0; |
353 | if (tracing_is_enabled()) | 569 | |
570 | if (!graph) | ||
571 | ret = register_ftrace_function(&trace_ops); | ||
572 | else | ||
573 | ret = register_ftrace_graph(&irqsoff_graph_return, | ||
574 | &irqsoff_graph_entry); | ||
575 | |||
576 | if (!ret && tracing_is_enabled()) | ||
354 | tracer_enabled = 1; | 577 | tracer_enabled = 1; |
355 | else | 578 | else |
356 | tracer_enabled = 0; | 579 | tracer_enabled = 0; |
580 | |||
581 | return ret; | ||
357 | } | 582 | } |
358 | 583 | ||
359 | static void stop_irqsoff_tracer(struct trace_array *tr) | 584 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
360 | { | 585 | { |
361 | tracer_enabled = 0; | 586 | tracer_enabled = 0; |
362 | unregister_ftrace_function(&trace_ops); | 587 | |
588 | if (!graph) | ||
589 | unregister_ftrace_function(&trace_ops); | ||
590 | else | ||
591 | unregister_ftrace_graph(); | ||
363 | } | 592 | } |
364 | 593 | ||
365 | static void __irqsoff_tracer_init(struct trace_array *tr) | 594 | static void __irqsoff_tracer_init(struct trace_array *tr) |
@@ -372,12 +601,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
372 | /* make sure that the tracer is visible */ | 601 | /* make sure that the tracer is visible */ |
373 | smp_wmb(); | 602 | smp_wmb(); |
374 | tracing_reset_online_cpus(tr); | 603 | tracing_reset_online_cpus(tr); |
375 | start_irqsoff_tracer(tr); | 604 | |
605 | if (start_irqsoff_tracer(tr, is_graph())) | ||
606 | printk(KERN_ERR "failed to start irqsoff tracer\n"); | ||
376 | } | 607 | } |
377 | 608 | ||
378 | static void irqsoff_tracer_reset(struct trace_array *tr) | 609 | static void irqsoff_tracer_reset(struct trace_array *tr) |
379 | { | 610 | { |
380 | stop_irqsoff_tracer(tr); | 611 | stop_irqsoff_tracer(tr, is_graph()); |
381 | 612 | ||
382 | if (!save_lat_flag) | 613 | if (!save_lat_flag) |
383 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 614 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; |
@@ -409,9 +640,15 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
409 | .start = irqsoff_tracer_start, | 640 | .start = irqsoff_tracer_start, |
410 | .stop = irqsoff_tracer_stop, | 641 | .stop = irqsoff_tracer_stop, |
411 | .print_max = 1, | 642 | .print_max = 1, |
643 | .print_header = irqsoff_print_header, | ||
644 | .print_line = irqsoff_print_line, | ||
645 | .flags = &tracer_flags, | ||
646 | .set_flag = irqsoff_set_flag, | ||
412 | #ifdef CONFIG_FTRACE_SELFTEST | 647 | #ifdef CONFIG_FTRACE_SELFTEST |
413 | .selftest = trace_selftest_startup_irqsoff, | 648 | .selftest = trace_selftest_startup_irqsoff, |
414 | #endif | 649 | #endif |
650 | .open = irqsoff_trace_open, | ||
651 | .close = irqsoff_trace_close, | ||
415 | }; | 652 | }; |
416 | # define register_irqsoff(trace) register_tracer(&trace) | 653 | # define register_irqsoff(trace) register_tracer(&trace) |
417 | #else | 654 | #else |
@@ -435,9 +672,15 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
435 | .start = irqsoff_tracer_start, | 672 | .start = irqsoff_tracer_start, |
436 | .stop = irqsoff_tracer_stop, | 673 | .stop = irqsoff_tracer_stop, |
437 | .print_max = 1, | 674 | .print_max = 1, |
675 | .print_header = irqsoff_print_header, | ||
676 | .print_line = irqsoff_print_line, | ||
677 | .flags = &tracer_flags, | ||
678 | .set_flag = irqsoff_set_flag, | ||
438 | #ifdef CONFIG_FTRACE_SELFTEST | 679 | #ifdef CONFIG_FTRACE_SELFTEST |
439 | .selftest = trace_selftest_startup_preemptoff, | 680 | .selftest = trace_selftest_startup_preemptoff, |
440 | #endif | 681 | #endif |
682 | .open = irqsoff_trace_open, | ||
683 | .close = irqsoff_trace_close, | ||
441 | }; | 684 | }; |
442 | # define register_preemptoff(trace) register_tracer(&trace) | 685 | # define register_preemptoff(trace) register_tracer(&trace) |
443 | #else | 686 | #else |
@@ -463,9 +706,15 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
463 | .start = irqsoff_tracer_start, | 706 | .start = irqsoff_tracer_start, |
464 | .stop = irqsoff_tracer_stop, | 707 | .stop = irqsoff_tracer_stop, |
465 | .print_max = 1, | 708 | .print_max = 1, |
709 | .print_header = irqsoff_print_header, | ||
710 | .print_line = irqsoff_print_line, | ||
711 | .flags = &tracer_flags, | ||
712 | .set_flag = irqsoff_set_flag, | ||
466 | #ifdef CONFIG_FTRACE_SELFTEST | 713 | #ifdef CONFIG_FTRACE_SELFTEST |
467 | .selftest = trace_selftest_startup_preemptirqsoff, | 714 | .selftest = trace_selftest_startup_preemptirqsoff, |
468 | #endif | 715 | #endif |
716 | .open = irqsoff_trace_open, | ||
717 | .close = irqsoff_trace_close, | ||
469 | }; | 718 | }; |
470 | 719 | ||
471 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 720 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 8e46b3323cdc..2404c129a8c9 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -253,7 +253,7 @@ void *trace_seq_reserve(struct trace_seq *s, size_t len) | |||
253 | void *ret; | 253 | void *ret; |
254 | 254 | ||
255 | if (s->full) | 255 | if (s->full) |
256 | return 0; | 256 | return NULL; |
257 | 257 | ||
258 | if (len > ((PAGE_SIZE - 1) - s->len)) { | 258 | if (len > ((PAGE_SIZE - 1) - s->len)) { |
259 | s->full = 1; | 259 | s->full = 1; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 81003b4d617f..6a9d36ddfcf2 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -30,7 +30,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
30 | struct trace_entry *entry; | 30 | struct trace_entry *entry; |
31 | unsigned int loops = 0; | 31 | unsigned int loops = 0; |
32 | 32 | ||
33 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 33 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { |
34 | entry = ring_buffer_event_data(event); | 34 | entry = ring_buffer_event_data(event); |
35 | 35 | ||
36 | /* | 36 | /* |
@@ -256,7 +256,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
256 | /* Maximum number of functions to trace before diagnosing a hang */ | 256 | /* Maximum number of functions to trace before diagnosing a hang */ |
257 | #define GRAPH_MAX_FUNC_TEST 100000000 | 257 | #define GRAPH_MAX_FUNC_TEST 100000000 |
258 | 258 | ||
259 | static void __ftrace_dump(bool disable_tracing); | 259 | static void |
260 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); | ||
260 | static unsigned int graph_hang_thresh; | 261 | static unsigned int graph_hang_thresh; |
261 | 262 | ||
262 | /* Wrap the real function entry probe to avoid possible hanging */ | 263 | /* Wrap the real function entry probe to avoid possible hanging */ |
@@ -267,7 +268,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |||
267 | ftrace_graph_stop(); | 268 | ftrace_graph_stop(); |
268 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | 269 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); |
269 | if (ftrace_dump_on_oops) | 270 | if (ftrace_dump_on_oops) |
270 | __ftrace_dump(false); | 271 | __ftrace_dump(false, DUMP_ALL); |
271 | return 0; | 272 | return 0; |
272 | } | 273 | } |
273 | 274 | ||