diff options
Diffstat (limited to 'kernel')
27 files changed, 3061 insertions, 1353 deletions
diff --git a/kernel/relay.c b/kernel/relay.c index 9d79b7854fa6..edc0ba6d8160 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -677,9 +677,7 @@ int relay_late_setup_files(struct rchan *chan, | |||
| 677 | */ | 677 | */ |
| 678 | for_each_online_cpu(i) { | 678 | for_each_online_cpu(i) { |
| 679 | if (unlikely(!chan->buf[i])) { | 679 | if (unlikely(!chan->buf[i])) { |
| 680 | printk(KERN_ERR "relay_late_setup_files: CPU %u " | 680 | WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); |
| 681 | "has no buffer, it must have!\n", i); | ||
| 682 | BUG(); | ||
| 683 | err = -EINVAL; | 681 | err = -EINVAL; |
| 684 | break; | 682 | break; |
| 685 | } | 683 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 242d0d47a70d..566c8c9e3a6d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4409,10 +4409,7 @@ void scheduler_tick(void) | |||
| 4409 | #endif | 4409 | #endif |
| 4410 | } | 4410 | } |
| 4411 | 4411 | ||
| 4412 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | 4412 | unsigned long get_parent_ip(unsigned long addr) |
| 4413 | defined(CONFIG_PREEMPT_TRACER)) | ||
| 4414 | |||
| 4415 | static inline unsigned long get_parent_ip(unsigned long addr) | ||
| 4416 | { | 4413 | { |
| 4417 | if (in_lock_functions(addr)) { | 4414 | if (in_lock_functions(addr)) { |
| 4418 | addr = CALLER_ADDR2; | 4415 | addr = CALLER_ADDR2; |
| @@ -4422,6 +4419,9 @@ static inline unsigned long get_parent_ip(unsigned long addr) | |||
| 4422 | return addr; | 4419 | return addr; |
| 4423 | } | 4420 | } |
| 4424 | 4421 | ||
| 4422 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | ||
| 4423 | defined(CONFIG_PREEMPT_TRACER)) | ||
| 4424 | |||
| 4425 | void __kprobes add_preempt_count(int val) | 4425 | void __kprobes add_preempt_count(int val) |
| 4426 | { | 4426 | { |
| 4427 | #ifdef CONFIG_DEBUG_PREEMPT | 4427 | #ifdef CONFIG_DEBUG_PREEMPT |
diff --git a/kernel/softirq.c b/kernel/softirq.c index bdbe9de9cd8d..6edfc2c11d99 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/freezer.h> | 21 | #include <linux/freezer.h> |
| 22 | #include <linux/kthread.h> | 22 | #include <linux/kthread.h> |
| 23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
| 24 | #include <linux/ftrace.h> | ||
| 24 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
| 25 | #include <linux/tick.h> | 26 | #include <linux/tick.h> |
| 26 | 27 | ||
| @@ -79,13 +80,23 @@ static void __local_bh_disable(unsigned long ip) | |||
| 79 | WARN_ON_ONCE(in_irq()); | 80 | WARN_ON_ONCE(in_irq()); |
| 80 | 81 | ||
| 81 | raw_local_irq_save(flags); | 82 | raw_local_irq_save(flags); |
| 82 | add_preempt_count(SOFTIRQ_OFFSET); | 83 | /* |
| 84 | * The preempt tracer hooks into add_preempt_count and will break | ||
| 85 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | ||
| 86 | * is set and before current->softirq_enabled is cleared. | ||
| 87 | * We must manually increment preempt_count here and manually | ||
| 88 | * call the trace_preempt_off later. | ||
| 89 | */ | ||
| 90 | preempt_count() += SOFTIRQ_OFFSET; | ||
| 83 | /* | 91 | /* |
| 84 | * Were softirqs turned off above: | 92 | * Were softirqs turned off above: |
| 85 | */ | 93 | */ |
| 86 | if (softirq_count() == SOFTIRQ_OFFSET) | 94 | if (softirq_count() == SOFTIRQ_OFFSET) |
| 87 | trace_softirqs_off(ip); | 95 | trace_softirqs_off(ip); |
| 88 | raw_local_irq_restore(flags); | 96 | raw_local_irq_restore(flags); |
| 97 | |||
| 98 | if (preempt_count() == SOFTIRQ_OFFSET) | ||
| 99 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | ||
| 89 | } | 100 | } |
| 90 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 101 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
| 91 | static inline void __local_bh_disable(unsigned long ip) | 102 | static inline void __local_bh_disable(unsigned long ip) |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e2a4ff6fc3a6..28f2644484d9 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -164,9 +164,8 @@ config BOOT_TRACER | |||
| 164 | representation of the delays during initcalls - but the raw | 164 | representation of the delays during initcalls - but the raw |
| 165 | /debug/tracing/trace text output is readable too. | 165 | /debug/tracing/trace text output is readable too. |
| 166 | 166 | ||
| 167 | ( Note that tracing self tests can't be enabled if this tracer is | 167 | You must pass in ftrace=initcall to the kernel command line |
| 168 | selected, because the self-tests are an initcall as well and that | 168 | to enable this on bootup. |
| 169 | would invalidate the boot trace. ) | ||
| 170 | 169 | ||
| 171 | config TRACE_BRANCH_PROFILING | 170 | config TRACE_BRANCH_PROFILING |
| 172 | bool "Trace likely/unlikely profiler" | 171 | bool "Trace likely/unlikely profiler" |
| @@ -264,6 +263,38 @@ config HW_BRANCH_TRACER | |||
| 264 | This tracer records all branches on the system in a circular | 263 | This tracer records all branches on the system in a circular |
| 265 | buffer giving access to the last N branches for each cpu. | 264 | buffer giving access to the last N branches for each cpu. |
| 266 | 265 | ||
| 266 | config KMEMTRACE | ||
| 267 | bool "Trace SLAB allocations" | ||
| 268 | select TRACING | ||
| 269 | help | ||
| 270 | kmemtrace provides tracing for slab allocator functions, such as | ||
| 271 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | ||
| 272 | data is then fed to the userspace application in order to analyse | ||
| 273 | allocation hotspots, internal fragmentation and so on, making it | ||
| 274 | possible to see how well an allocator performs, as well as debug | ||
| 275 | and profile kernel code. | ||
| 276 | |||
| 277 | This requires an userspace application to use. See | ||
| 278 | Documentation/vm/kmemtrace.txt for more information. | ||
| 279 | |||
| 280 | Saying Y will make the kernel somewhat larger and slower. However, | ||
| 281 | if you disable kmemtrace at run-time or boot-time, the performance | ||
| 282 | impact is minimal (depending on the arch the kernel is built for). | ||
| 283 | |||
| 284 | If unsure, say N. | ||
| 285 | |||
| 286 | config WORKQUEUE_TRACER | ||
| 287 | bool "Trace workqueues" | ||
| 288 | select TRACING | ||
| 289 | help | ||
| 290 | The workqueue tracer provides some statistical informations | ||
| 291 | about each cpu workqueue thread such as the number of the | ||
| 292 | works inserted and executed since their creation. It can help | ||
| 293 | to evaluate the amount of work each of them have to perform. | ||
| 294 | For example it can help a developer to decide whether he should | ||
| 295 | choose a per cpu workqueue instead of a singlethreaded one. | ||
| 296 | |||
| 297 | |||
| 267 | config DYNAMIC_FTRACE | 298 | config DYNAMIC_FTRACE |
| 268 | bool "enable/disable ftrace tracepoints dynamically" | 299 | bool "enable/disable ftrace tracepoints dynamically" |
| 269 | depends on FUNCTION_TRACER | 300 | depends on FUNCTION_TRACER |
| @@ -294,7 +325,7 @@ config FTRACE_SELFTEST | |||
| 294 | 325 | ||
| 295 | config FTRACE_STARTUP_TEST | 326 | config FTRACE_STARTUP_TEST |
| 296 | bool "Perform a startup test on ftrace" | 327 | bool "Perform a startup test on ftrace" |
| 297 | depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER | 328 | depends on TRACING && DEBUG_KERNEL |
| 298 | select FTRACE_SELFTEST | 329 | select FTRACE_SELFTEST |
| 299 | help | 330 | help |
| 300 | This option performs a series of startup tests on ftrace. On bootup | 331 | This option performs a series of startup tests on ftrace. On bootup |
| @@ -302,4 +333,27 @@ config FTRACE_STARTUP_TEST | |||
| 302 | functioning properly. It will do tests on all the configured | 333 | functioning properly. It will do tests on all the configured |
| 303 | tracers of ftrace. | 334 | tracers of ftrace. |
| 304 | 335 | ||
| 336 | config MMIOTRACE | ||
| 337 | bool "Memory mapped IO tracing" | ||
| 338 | depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI | ||
| 339 | select TRACING | ||
| 340 | help | ||
| 341 | Mmiotrace traces Memory Mapped I/O access and is meant for | ||
| 342 | debugging and reverse engineering. It is called from the ioremap | ||
| 343 | implementation and works via page faults. Tracing is disabled by | ||
| 344 | default and can be enabled at run-time. | ||
| 345 | |||
| 346 | See Documentation/tracers/mmiotrace.txt. | ||
| 347 | If you are not helping to develop drivers, say N. | ||
| 348 | |||
| 349 | config MMIOTRACE_TEST | ||
| 350 | tristate "Test module for mmiotrace" | ||
| 351 | depends on MMIOTRACE && m | ||
| 352 | help | ||
| 353 | This is a dumb module for testing mmiotrace. It is very dangerous | ||
| 354 | as it will write garbage to IO memory starting at a given address. | ||
| 355 | However, it should be safe to use on e.g. unused portion of VRAM. | ||
| 356 | |||
| 357 | Say N, unless you absolutely know what you are doing. | ||
| 358 | |||
| 305 | endmenu | 359 | endmenu |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 349d5a93653f..f76d48f3527d 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -19,6 +19,8 @@ obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o | |||
| 19 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 19 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
| 20 | 20 | ||
| 21 | obj-$(CONFIG_TRACING) += trace.o | 21 | obj-$(CONFIG_TRACING) += trace.o |
| 22 | obj-$(CONFIG_TRACING) += trace_output.o | ||
| 23 | obj-$(CONFIG_TRACING) += trace_stat.o | ||
| 22 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 24 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
| 23 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | 25 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o |
| 24 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | 26 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
| @@ -33,5 +35,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | |||
| 33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 35 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
| 34 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o | 36 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o |
| 35 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | 37 | obj-$(CONFIG_POWER_TRACER) += trace_power.o |
| 38 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | ||
| 39 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o | ||
| 36 | 40 | ||
| 37 | libftrace-y := ftrace.o | 41 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7dcf6e9f2b04..68610031780b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -264,14 +264,6 @@ static void ftrace_update_pid_func(void) | |||
| 264 | # error Dynamic ftrace depends on MCOUNT_RECORD | 264 | # error Dynamic ftrace depends on MCOUNT_RECORD |
| 265 | #endif | 265 | #endif |
| 266 | 266 | ||
| 267 | /* | ||
| 268 | * Since MCOUNT_ADDR may point to mcount itself, we do not want | ||
| 269 | * to get it confused by reading a reference in the code as we | ||
| 270 | * are parsing on objcopy output of text. Use a variable for | ||
| 271 | * it instead. | ||
| 272 | */ | ||
| 273 | static unsigned long mcount_addr = MCOUNT_ADDR; | ||
| 274 | |||
| 275 | enum { | 267 | enum { |
| 276 | FTRACE_ENABLE_CALLS = (1 << 0), | 268 | FTRACE_ENABLE_CALLS = (1 << 0), |
| 277 | FTRACE_DISABLE_CALLS = (1 << 1), | 269 | FTRACE_DISABLE_CALLS = (1 << 1), |
| @@ -290,7 +282,7 @@ static DEFINE_MUTEX(ftrace_regex_lock); | |||
| 290 | 282 | ||
| 291 | struct ftrace_page { | 283 | struct ftrace_page { |
| 292 | struct ftrace_page *next; | 284 | struct ftrace_page *next; |
| 293 | unsigned long index; | 285 | int index; |
| 294 | struct dyn_ftrace records[]; | 286 | struct dyn_ftrace records[]; |
| 295 | }; | 287 | }; |
| 296 | 288 | ||
| @@ -464,7 +456,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
| 464 | unsigned long ip, fl; | 456 | unsigned long ip, fl; |
| 465 | unsigned long ftrace_addr; | 457 | unsigned long ftrace_addr; |
| 466 | 458 | ||
| 467 | ftrace_addr = (unsigned long)ftrace_caller; | 459 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
| 468 | 460 | ||
| 469 | ip = rec->ip; | 461 | ip = rec->ip; |
| 470 | 462 | ||
| @@ -576,7 +568,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |||
| 576 | 568 | ||
| 577 | ip = rec->ip; | 569 | ip = rec->ip; |
| 578 | 570 | ||
| 579 | ret = ftrace_make_nop(mod, rec, mcount_addr); | 571 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
| 580 | if (ret) { | 572 | if (ret) { |
| 581 | ftrace_bug(ret, ip); | 573 | ftrace_bug(ret, ip); |
| 582 | rec->flags |= FTRACE_FL_FAILED; | 574 | rec->flags |= FTRACE_FL_FAILED; |
| @@ -787,7 +779,7 @@ enum { | |||
| 787 | 779 | ||
| 788 | struct ftrace_iterator { | 780 | struct ftrace_iterator { |
| 789 | struct ftrace_page *pg; | 781 | struct ftrace_page *pg; |
| 790 | unsigned idx; | 782 | int idx; |
| 791 | unsigned flags; | 783 | unsigned flags; |
| 792 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | 784 | unsigned char buffer[FTRACE_BUFF_MAX+1]; |
| 793 | unsigned buffer_idx; | 785 | unsigned buffer_idx; |
| @@ -1737,9 +1729,12 @@ static void clear_ftrace_pid(struct pid *pid) | |||
| 1737 | { | 1729 | { |
| 1738 | struct task_struct *p; | 1730 | struct task_struct *p; |
| 1739 | 1731 | ||
| 1732 | rcu_read_lock(); | ||
| 1740 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1733 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
| 1741 | clear_tsk_trace_trace(p); | 1734 | clear_tsk_trace_trace(p); |
| 1742 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1735 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
| 1736 | rcu_read_unlock(); | ||
| 1737 | |||
| 1743 | put_pid(pid); | 1738 | put_pid(pid); |
| 1744 | } | 1739 | } |
| 1745 | 1740 | ||
| @@ -1747,9 +1742,11 @@ static void set_ftrace_pid(struct pid *pid) | |||
| 1747 | { | 1742 | { |
| 1748 | struct task_struct *p; | 1743 | struct task_struct *p; |
| 1749 | 1744 | ||
| 1745 | rcu_read_lock(); | ||
| 1750 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1746 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
| 1751 | set_tsk_trace_trace(p); | 1747 | set_tsk_trace_trace(p); |
| 1752 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1748 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
| 1749 | rcu_read_unlock(); | ||
| 1753 | } | 1750 | } |
| 1754 | 1751 | ||
| 1755 | static void clear_ftrace_pid_task(struct pid **pid) | 1752 | static void clear_ftrace_pid_task(struct pid **pid) |
| @@ -1903,7 +1900,7 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
| 1903 | } | 1900 | } |
| 1904 | 1901 | ||
| 1905 | /** | 1902 | /** |
| 1906 | * unregister_ftrace_function - unresgister a function for profiling. | 1903 | * unregister_ftrace_function - unregister a function for profiling. |
| 1907 | * @ops - ops structure that holds the function to unregister | 1904 | * @ops - ops structure that holds the function to unregister |
| 1908 | * | 1905 | * |
| 1909 | * Unregister a function that was added to be called by ftrace profiling. | 1906 | * Unregister a function that was added to be called by ftrace profiling. |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c new file mode 100644 index 000000000000..f04c0625f1cd --- /dev/null +++ b/kernel/trace/kmemtrace.c | |||
| @@ -0,0 +1,350 @@ | |||
| 1 | /* | ||
| 2 | * Memory allocator tracing | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
| 5 | * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi> | ||
| 6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/dcache.h> | ||
| 10 | #include <linux/debugfs.h> | ||
| 11 | #include <linux/fs.h> | ||
| 12 | #include <linux/seq_file.h> | ||
| 13 | #include <trace/kmemtrace.h> | ||
| 14 | |||
| 15 | #include "trace.h" | ||
| 16 | #include "trace_output.h" | ||
| 17 | |||
| 18 | /* Select an alternative, minimalistic output than the original one */ | ||
| 19 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | ||
| 20 | |||
| 21 | static struct tracer_opt kmem_opts[] = { | ||
| 22 | /* Default disable the minimalistic output */ | ||
| 23 | { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, | ||
| 24 | { } | ||
| 25 | }; | ||
| 26 | |||
| 27 | static struct tracer_flags kmem_tracer_flags = { | ||
| 28 | .val = 0, | ||
| 29 | .opts = kmem_opts | ||
| 30 | }; | ||
| 31 | |||
| 32 | |||
| 33 | static bool kmem_tracing_enabled __read_mostly; | ||
| 34 | static struct trace_array *kmemtrace_array; | ||
| 35 | |||
| 36 | static int kmem_trace_init(struct trace_array *tr) | ||
| 37 | { | ||
| 38 | int cpu; | ||
| 39 | kmemtrace_array = tr; | ||
| 40 | |||
| 41 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
| 42 | tracing_reset(tr, cpu); | ||
| 43 | |||
| 44 | kmem_tracing_enabled = true; | ||
| 45 | |||
| 46 | return 0; | ||
| 47 | } | ||
| 48 | |||
| 49 | static void kmem_trace_reset(struct trace_array *tr) | ||
| 50 | { | ||
| 51 | kmem_tracing_enabled = false; | ||
| 52 | } | ||
| 53 | |||
| 54 | static void kmemtrace_headers(struct seq_file *s) | ||
| 55 | { | ||
| 56 | /* Don't need headers for the original kmemtrace output */ | ||
| 57 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
| 58 | return; | ||
| 59 | |||
| 60 | seq_printf(s, "#\n"); | ||
| 61 | seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " | ||
| 62 | " POINTER NODE CALLER\n"); | ||
| 63 | seq_printf(s, "# FREE | | | | " | ||
| 64 | " | | | |\n"); | ||
| 65 | seq_printf(s, "# |\n\n"); | ||
| 66 | } | ||
| 67 | |||
| 68 | /* | ||
| 69 | * The two following functions give the original output from kmemtrace, | ||
| 70 | * or something close to....perhaps they need some missing things | ||
| 71 | */ | ||
| 72 | static enum print_line_t | ||
| 73 | kmemtrace_print_alloc_original(struct trace_iterator *iter, | ||
| 74 | struct kmemtrace_alloc_entry *entry) | ||
| 75 | { | ||
| 76 | struct trace_seq *s = &iter->seq; | ||
| 77 | int ret; | ||
| 78 | |||
| 79 | /* Taken from the old linux/kmemtrace.h */ | ||
| 80 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu " | ||
| 81 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | ||
| 82 | entry->type_id, entry->call_site, (unsigned long) entry->ptr, | ||
| 83 | (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc, | ||
| 84 | (unsigned long) entry->gfp_flags, entry->node); | ||
| 85 | |||
| 86 | if (!ret) | ||
| 87 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 88 | |||
| 89 | return TRACE_TYPE_HANDLED; | ||
| 90 | } | ||
| 91 | |||
| 92 | static enum print_line_t | ||
| 93 | kmemtrace_print_free_original(struct trace_iterator *iter, | ||
| 94 | struct kmemtrace_free_entry *entry) | ||
| 95 | { | ||
| 96 | struct trace_seq *s = &iter->seq; | ||
| 97 | int ret; | ||
| 98 | |||
| 99 | /* Taken from the old linux/kmemtrace.h */ | ||
| 100 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n", | ||
| 101 | entry->type_id, entry->call_site, (unsigned long) entry->ptr); | ||
| 102 | |||
| 103 | if (!ret) | ||
| 104 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 105 | |||
| 106 | return TRACE_TYPE_HANDLED; | ||
| 107 | } | ||
| 108 | |||
| 109 | |||
| 110 | /* The two other following provide a more minimalistic output */ | ||
| 111 | static enum print_line_t | ||
| 112 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, | ||
| 113 | struct kmemtrace_alloc_entry *entry) | ||
| 114 | { | ||
| 115 | struct trace_seq *s = &iter->seq; | ||
| 116 | int ret; | ||
| 117 | |||
| 118 | /* Alloc entry */ | ||
| 119 | ret = trace_seq_printf(s, " + "); | ||
| 120 | if (!ret) | ||
| 121 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 122 | |||
| 123 | /* Type */ | ||
| 124 | switch (entry->type_id) { | ||
| 125 | case KMEMTRACE_TYPE_KMALLOC: | ||
| 126 | ret = trace_seq_printf(s, "K "); | ||
| 127 | break; | ||
| 128 | case KMEMTRACE_TYPE_CACHE: | ||
| 129 | ret = trace_seq_printf(s, "C "); | ||
| 130 | break; | ||
| 131 | case KMEMTRACE_TYPE_PAGES: | ||
| 132 | ret = trace_seq_printf(s, "P "); | ||
| 133 | break; | ||
| 134 | default: | ||
| 135 | ret = trace_seq_printf(s, "? "); | ||
| 136 | } | ||
| 137 | |||
| 138 | if (!ret) | ||
| 139 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 140 | |||
| 141 | /* Requested */ | ||
| 142 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_req); | ||
| 143 | if (!ret) | ||
| 144 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 145 | |||
| 146 | /* Allocated */ | ||
| 147 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc); | ||
| 148 | if (!ret) | ||
| 149 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 150 | |||
| 151 | /* Flags | ||
| 152 | * TODO: would be better to see the name of the GFP flag names | ||
| 153 | */ | ||
| 154 | ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); | ||
| 155 | if (!ret) | ||
| 156 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 157 | |||
| 158 | /* Pointer to allocated */ | ||
| 159 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
| 160 | if (!ret) | ||
| 161 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 162 | |||
| 163 | /* Node */ | ||
| 164 | ret = trace_seq_printf(s, "%4d ", entry->node); | ||
| 165 | if (!ret) | ||
| 166 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 167 | |||
| 168 | /* Call site */ | ||
| 169 | ret = seq_print_ip_sym(s, entry->call_site, 0); | ||
| 170 | if (!ret) | ||
| 171 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 172 | |||
| 173 | if (!trace_seq_printf(s, "\n")) | ||
| 174 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 175 | |||
| 176 | return TRACE_TYPE_HANDLED; | ||
| 177 | } | ||
| 178 | |||
| 179 | static enum print_line_t | ||
| 180 | kmemtrace_print_free_compress(struct trace_iterator *iter, | ||
| 181 | struct kmemtrace_free_entry *entry) | ||
| 182 | { | ||
| 183 | struct trace_seq *s = &iter->seq; | ||
| 184 | int ret; | ||
| 185 | |||
| 186 | /* Free entry */ | ||
| 187 | ret = trace_seq_printf(s, " - "); | ||
| 188 | if (!ret) | ||
| 189 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 190 | |||
| 191 | /* Type */ | ||
| 192 | switch (entry->type_id) { | ||
| 193 | case KMEMTRACE_TYPE_KMALLOC: | ||
| 194 | ret = trace_seq_printf(s, "K "); | ||
| 195 | break; | ||
| 196 | case KMEMTRACE_TYPE_CACHE: | ||
| 197 | ret = trace_seq_printf(s, "C "); | ||
| 198 | break; | ||
| 199 | case KMEMTRACE_TYPE_PAGES: | ||
| 200 | ret = trace_seq_printf(s, "P "); | ||
| 201 | break; | ||
| 202 | default: | ||
| 203 | ret = trace_seq_printf(s, "? "); | ||
| 204 | } | ||
| 205 | |||
| 206 | if (!ret) | ||
| 207 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 208 | |||
| 209 | /* Skip requested/allocated/flags */ | ||
| 210 | ret = trace_seq_printf(s, " "); | ||
| 211 | if (!ret) | ||
| 212 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 213 | |||
| 214 | /* Pointer to allocated */ | ||
| 215 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
| 216 | if (!ret) | ||
| 217 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 218 | |||
| 219 | /* Skip node */ | ||
| 220 | ret = trace_seq_printf(s, " "); | ||
| 221 | if (!ret) | ||
| 222 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 223 | |||
| 224 | /* Call site */ | ||
| 225 | ret = seq_print_ip_sym(s, entry->call_site, 0); | ||
| 226 | if (!ret) | ||
| 227 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 228 | |||
| 229 | if (!trace_seq_printf(s, "\n")) | ||
| 230 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 231 | |||
| 232 | return TRACE_TYPE_HANDLED; | ||
| 233 | } | ||
| 234 | |||
| 235 | static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | ||
| 236 | { | ||
| 237 | struct trace_entry *entry = iter->ent; | ||
| 238 | |||
| 239 | switch (entry->type) { | ||
| 240 | case TRACE_KMEM_ALLOC: { | ||
| 241 | struct kmemtrace_alloc_entry *field; | ||
| 242 | trace_assign_type(field, entry); | ||
| 243 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | ||
| 244 | return kmemtrace_print_alloc_compress(iter, field); | ||
| 245 | else | ||
| 246 | return kmemtrace_print_alloc_original(iter, field); | ||
| 247 | } | ||
| 248 | |||
| 249 | case TRACE_KMEM_FREE: { | ||
| 250 | struct kmemtrace_free_entry *field; | ||
| 251 | trace_assign_type(field, entry); | ||
| 252 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | ||
| 253 | return kmemtrace_print_free_compress(iter, field); | ||
| 254 | else | ||
| 255 | return kmemtrace_print_free_original(iter, field); | ||
| 256 | } | ||
| 257 | |||
| 258 | default: | ||
| 259 | return TRACE_TYPE_UNHANDLED; | ||
| 260 | } | ||
| 261 | } | ||
| 262 | |||
| 263 | /* Trace allocations */ | ||
| 264 | void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
| 265 | unsigned long call_site, | ||
| 266 | const void *ptr, | ||
| 267 | size_t bytes_req, | ||
| 268 | size_t bytes_alloc, | ||
| 269 | gfp_t gfp_flags, | ||
| 270 | int node) | ||
| 271 | { | ||
| 272 | struct ring_buffer_event *event; | ||
| 273 | struct kmemtrace_alloc_entry *entry; | ||
| 274 | struct trace_array *tr = kmemtrace_array; | ||
| 275 | unsigned long irq_flags; | ||
| 276 | |||
| 277 | if (!kmem_tracing_enabled) | ||
| 278 | return; | ||
| 279 | |||
| 280 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
| 281 | &irq_flags); | ||
| 282 | if (!event) | ||
| 283 | return; | ||
| 284 | entry = ring_buffer_event_data(event); | ||
| 285 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
| 286 | |||
| 287 | entry->ent.type = TRACE_KMEM_ALLOC; | ||
| 288 | entry->call_site = call_site; | ||
| 289 | entry->ptr = ptr; | ||
| 290 | entry->bytes_req = bytes_req; | ||
| 291 | entry->bytes_alloc = bytes_alloc; | ||
| 292 | entry->gfp_flags = gfp_flags; | ||
| 293 | entry->node = node; | ||
| 294 | |||
| 295 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
| 296 | |||
| 297 | trace_wake_up(); | ||
| 298 | } | ||
| 299 | EXPORT_SYMBOL(kmemtrace_mark_alloc_node); | ||
| 300 | |||
| 301 | void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
| 302 | unsigned long call_site, | ||
| 303 | const void *ptr) | ||
| 304 | { | ||
| 305 | struct ring_buffer_event *event; | ||
| 306 | struct kmemtrace_free_entry *entry; | ||
| 307 | struct trace_array *tr = kmemtrace_array; | ||
| 308 | unsigned long irq_flags; | ||
| 309 | |||
| 310 | if (!kmem_tracing_enabled) | ||
| 311 | return; | ||
| 312 | |||
| 313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
| 314 | &irq_flags); | ||
| 315 | if (!event) | ||
| 316 | return; | ||
| 317 | entry = ring_buffer_event_data(event); | ||
| 318 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
| 319 | |||
| 320 | entry->ent.type = TRACE_KMEM_FREE; | ||
| 321 | entry->type_id = type_id; | ||
| 322 | entry->call_site = call_site; | ||
| 323 | entry->ptr = ptr; | ||
| 324 | |||
| 325 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
| 326 | |||
| 327 | trace_wake_up(); | ||
| 328 | } | ||
| 329 | EXPORT_SYMBOL(kmemtrace_mark_free); | ||
| 330 | |||
| 331 | static struct tracer kmem_tracer __read_mostly = { | ||
| 332 | .name = "kmemtrace", | ||
| 333 | .init = kmem_trace_init, | ||
| 334 | .reset = kmem_trace_reset, | ||
| 335 | .print_line = kmemtrace_print_line, | ||
| 336 | .print_header = kmemtrace_headers, | ||
| 337 | .flags = &kmem_tracer_flags | ||
| 338 | }; | ||
| 339 | |||
| 340 | void kmemtrace_init(void) | ||
| 341 | { | ||
| 342 | /* earliest opportunity to start kmem tracing */ | ||
| 343 | } | ||
| 344 | |||
| 345 | static int __init init_kmem_tracer(void) | ||
| 346 | { | ||
| 347 | return register_tracer(&kmem_tracer); | ||
| 348 | } | ||
| 349 | |||
| 350 | device_initcall(init_kmem_tracer); | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bd38c5cfd8ad..b36d7374ceef 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -123,8 +123,7 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | |||
| 123 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | 123 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); |
| 124 | 124 | ||
| 125 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 125 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) |
| 126 | #define RB_ALIGNMENT_SHIFT 2 | 126 | #define RB_ALIGNMENT 4U |
| 127 | #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT) | ||
| 128 | #define RB_MAX_SMALL_DATA 28 | 127 | #define RB_MAX_SMALL_DATA 28 |
| 129 | 128 | ||
| 130 | enum { | 129 | enum { |
| @@ -133,7 +132,7 @@ enum { | |||
| 133 | }; | 132 | }; |
| 134 | 133 | ||
| 135 | /* inline for ring buffer fast paths */ | 134 | /* inline for ring buffer fast paths */ |
| 136 | static inline unsigned | 135 | static unsigned |
| 137 | rb_event_length(struct ring_buffer_event *event) | 136 | rb_event_length(struct ring_buffer_event *event) |
| 138 | { | 137 | { |
| 139 | unsigned length; | 138 | unsigned length; |
| @@ -151,7 +150,7 @@ rb_event_length(struct ring_buffer_event *event) | |||
| 151 | 150 | ||
| 152 | case RINGBUF_TYPE_DATA: | 151 | case RINGBUF_TYPE_DATA: |
| 153 | if (event->len) | 152 | if (event->len) |
| 154 | length = event->len << RB_ALIGNMENT_SHIFT; | 153 | length = event->len * RB_ALIGNMENT; |
| 155 | else | 154 | else |
| 156 | length = event->array[0]; | 155 | length = event->array[0]; |
| 157 | return length + RB_EVNT_HDR_SIZE; | 156 | return length + RB_EVNT_HDR_SIZE; |
| @@ -179,7 +178,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event) | |||
| 179 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | 178 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); |
| 180 | 179 | ||
| 181 | /* inline for ring buffer fast paths */ | 180 | /* inline for ring buffer fast paths */ |
| 182 | static inline void * | 181 | static void * |
| 183 | rb_event_data(struct ring_buffer_event *event) | 182 | rb_event_data(struct ring_buffer_event *event) |
| 184 | { | 183 | { |
| 185 | BUG_ON(event->type != RINGBUF_TYPE_DATA); | 184 | BUG_ON(event->type != RINGBUF_TYPE_DATA); |
| @@ -229,10 +228,9 @@ static void rb_init_page(struct buffer_data_page *bpage) | |||
| 229 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 228 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
| 230 | * this issue out. | 229 | * this issue out. |
| 231 | */ | 230 | */ |
| 232 | static inline void free_buffer_page(struct buffer_page *bpage) | 231 | static void free_buffer_page(struct buffer_page *bpage) |
| 233 | { | 232 | { |
| 234 | if (bpage->page) | 233 | free_page((unsigned long)bpage->page); |
| 235 | free_page((unsigned long)bpage->page); | ||
| 236 | kfree(bpage); | 234 | kfree(bpage); |
| 237 | } | 235 | } |
| 238 | 236 | ||
| @@ -811,7 +809,7 @@ rb_event_index(struct ring_buffer_event *event) | |||
| 811 | return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); | 809 | return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); |
| 812 | } | 810 | } |
| 813 | 811 | ||
| 814 | static inline int | 812 | static int |
| 815 | rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, | 813 | rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, |
| 816 | struct ring_buffer_event *event) | 814 | struct ring_buffer_event *event) |
| 817 | { | 815 | { |
| @@ -825,7 +823,7 @@ rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 825 | rb_commit_index(cpu_buffer) == index; | 823 | rb_commit_index(cpu_buffer) == index; |
| 826 | } | 824 | } |
| 827 | 825 | ||
| 828 | static inline void | 826 | static void |
| 829 | rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | 827 | rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, |
| 830 | struct ring_buffer_event *event) | 828 | struct ring_buffer_event *event) |
| 831 | { | 829 | { |
| @@ -850,7 +848,7 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 850 | local_set(&cpu_buffer->commit_page->page->commit, index); | 848 | local_set(&cpu_buffer->commit_page->page->commit, index); |
| 851 | } | 849 | } |
| 852 | 850 | ||
| 853 | static inline void | 851 | static void |
| 854 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | 852 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) |
| 855 | { | 853 | { |
| 856 | /* | 854 | /* |
| @@ -896,7 +894,7 @@ static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 896 | cpu_buffer->reader_page->read = 0; | 894 | cpu_buffer->reader_page->read = 0; |
| 897 | } | 895 | } |
| 898 | 896 | ||
| 899 | static inline void rb_inc_iter(struct ring_buffer_iter *iter) | 897 | static void rb_inc_iter(struct ring_buffer_iter *iter) |
| 900 | { | 898 | { |
| 901 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 899 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| 902 | 900 | ||
| @@ -926,7 +924,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter) | |||
| 926 | * and with this, we can determine what to place into the | 924 | * and with this, we can determine what to place into the |
| 927 | * data field. | 925 | * data field. |
| 928 | */ | 926 | */ |
| 929 | static inline void | 927 | static void |
| 930 | rb_update_event(struct ring_buffer_event *event, | 928 | rb_update_event(struct ring_buffer_event *event, |
| 931 | unsigned type, unsigned length) | 929 | unsigned type, unsigned length) |
| 932 | { | 930 | { |
| @@ -938,15 +936,11 @@ rb_update_event(struct ring_buffer_event *event, | |||
| 938 | break; | 936 | break; |
| 939 | 937 | ||
| 940 | case RINGBUF_TYPE_TIME_EXTEND: | 938 | case RINGBUF_TYPE_TIME_EXTEND: |
| 941 | event->len = | 939 | event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT); |
| 942 | (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1)) | ||
| 943 | >> RB_ALIGNMENT_SHIFT; | ||
| 944 | break; | 940 | break; |
| 945 | 941 | ||
| 946 | case RINGBUF_TYPE_TIME_STAMP: | 942 | case RINGBUF_TYPE_TIME_STAMP: |
| 947 | event->len = | 943 | event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT); |
| 948 | (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1)) | ||
| 949 | >> RB_ALIGNMENT_SHIFT; | ||
| 950 | break; | 944 | break; |
| 951 | 945 | ||
| 952 | case RINGBUF_TYPE_DATA: | 946 | case RINGBUF_TYPE_DATA: |
| @@ -955,16 +949,14 @@ rb_update_event(struct ring_buffer_event *event, | |||
| 955 | event->len = 0; | 949 | event->len = 0; |
| 956 | event->array[0] = length; | 950 | event->array[0] = length; |
| 957 | } else | 951 | } else |
| 958 | event->len = | 952 | event->len = DIV_ROUND_UP(length, RB_ALIGNMENT); |
| 959 | (length + (RB_ALIGNMENT-1)) | ||
| 960 | >> RB_ALIGNMENT_SHIFT; | ||
| 961 | break; | 953 | break; |
| 962 | default: | 954 | default: |
| 963 | BUG(); | 955 | BUG(); |
| 964 | } | 956 | } |
| 965 | } | 957 | } |
| 966 | 958 | ||
| 967 | static inline unsigned rb_calculate_event_length(unsigned length) | 959 | static unsigned rb_calculate_event_length(unsigned length) |
| 968 | { | 960 | { |
| 969 | struct ring_buffer_event event; /* Used only for sizeof array */ | 961 | struct ring_buffer_event event; /* Used only for sizeof array */ |
| 970 | 962 | ||
| @@ -1438,7 +1430,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
| 1438 | } | 1430 | } |
| 1439 | EXPORT_SYMBOL_GPL(ring_buffer_write); | 1431 | EXPORT_SYMBOL_GPL(ring_buffer_write); |
| 1440 | 1432 | ||
| 1441 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 1433 | static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
| 1442 | { | 1434 | { |
| 1443 | struct buffer_page *reader = cpu_buffer->reader_page; | 1435 | struct buffer_page *reader = cpu_buffer->reader_page; |
| 1444 | struct buffer_page *head = cpu_buffer->head_page; | 1436 | struct buffer_page *head = cpu_buffer->head_page; |
| @@ -2277,9 +2269,24 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
| 2277 | if (buffer_a->pages != buffer_b->pages) | 2269 | if (buffer_a->pages != buffer_b->pages) |
| 2278 | return -EINVAL; | 2270 | return -EINVAL; |
| 2279 | 2271 | ||
| 2272 | if (ring_buffer_flags != RB_BUFFERS_ON) | ||
| 2273 | return -EAGAIN; | ||
| 2274 | |||
| 2275 | if (atomic_read(&buffer_a->record_disabled)) | ||
| 2276 | return -EAGAIN; | ||
| 2277 | |||
| 2278 | if (atomic_read(&buffer_b->record_disabled)) | ||
| 2279 | return -EAGAIN; | ||
| 2280 | |||
| 2280 | cpu_buffer_a = buffer_a->buffers[cpu]; | 2281 | cpu_buffer_a = buffer_a->buffers[cpu]; |
| 2281 | cpu_buffer_b = buffer_b->buffers[cpu]; | 2282 | cpu_buffer_b = buffer_b->buffers[cpu]; |
| 2282 | 2283 | ||
| 2284 | if (atomic_read(&cpu_buffer_a->record_disabled)) | ||
| 2285 | return -EAGAIN; | ||
| 2286 | |||
| 2287 | if (atomic_read(&cpu_buffer_b->record_disabled)) | ||
| 2288 | return -EAGAIN; | ||
| 2289 | |||
| 2283 | /* | 2290 | /* |
| 2284 | * We can't do a synchronize_sched here because this | 2291 | * We can't do a synchronize_sched here because this |
| 2285 | * function can be called in atomic context. | 2292 | * function can be called in atomic context. |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 17bb88d86ac2..fd51cf0b94c7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <linux/irqflags.h> | 37 | #include <linux/irqflags.h> |
| 38 | 38 | ||
| 39 | #include "trace.h" | 39 | #include "trace.h" |
| 40 | #include "trace_output.h" | ||
| 40 | 41 | ||
| 41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 42 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
| 42 | 43 | ||
| @@ -52,6 +53,11 @@ unsigned long __read_mostly tracing_thresh; | |||
| 52 | */ | 53 | */ |
| 53 | static bool __read_mostly tracing_selftest_running; | 54 | static bool __read_mostly tracing_selftest_running; |
| 54 | 55 | ||
| 56 | /* | ||
| 57 | * If a tracer is running, we do not want to run SELFTEST. | ||
| 58 | */ | ||
| 59 | static bool __read_mostly tracing_selftest_disabled; | ||
| 60 | |||
| 55 | /* For tracers that don't implement custom flags */ | 61 | /* For tracers that don't implement custom flags */ |
| 56 | static struct tracer_opt dummy_tracer_opt[] = { | 62 | static struct tracer_opt dummy_tracer_opt[] = { |
| 57 | { } | 63 | { } |
| @@ -109,14 +115,19 @@ static cpumask_var_t __read_mostly tracing_buffer_mask; | |||
| 109 | */ | 115 | */ |
| 110 | int ftrace_dump_on_oops; | 116 | int ftrace_dump_on_oops; |
| 111 | 117 | ||
| 112 | static int tracing_set_tracer(char *buf); | 118 | static int tracing_set_tracer(const char *buf); |
| 119 | |||
| 120 | #define BOOTUP_TRACER_SIZE 100 | ||
| 121 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | ||
| 122 | static char *default_bootup_tracer; | ||
| 113 | 123 | ||
| 114 | static int __init set_ftrace(char *str) | 124 | static int __init set_ftrace(char *str) |
| 115 | { | 125 | { |
| 116 | tracing_set_tracer(str); | 126 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); |
| 127 | default_bootup_tracer = bootup_tracer_buf; | ||
| 117 | return 1; | 128 | return 1; |
| 118 | } | 129 | } |
| 119 | __setup("ftrace", set_ftrace); | 130 | __setup("ftrace=", set_ftrace); |
| 120 | 131 | ||
| 121 | static int __init set_ftrace_dump_on_oops(char *str) | 132 | static int __init set_ftrace_dump_on_oops(char *str) |
| 122 | { | 133 | { |
| @@ -186,9 +197,6 @@ int tracing_is_enabled(void) | |||
| 186 | return tracer_enabled; | 197 | return tracer_enabled; |
| 187 | } | 198 | } |
| 188 | 199 | ||
| 189 | /* function tracing enabled */ | ||
| 190 | int ftrace_function_enabled; | ||
| 191 | |||
| 192 | /* | 200 | /* |
| 193 | * trace_buf_size is the size in bytes that is allocated | 201 | * trace_buf_size is the size in bytes that is allocated |
| 194 | * for a buffer. Note, the number of bytes is always rounded | 202 | * for a buffer. Note, the number of bytes is always rounded |
| @@ -229,7 +237,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
| 229 | 237 | ||
| 230 | /* trace_flags holds trace_options default values */ | 238 | /* trace_flags holds trace_options default values */ |
| 231 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 239 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
| 232 | TRACE_ITER_ANNOTATE; | 240 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO; |
| 233 | 241 | ||
| 234 | /** | 242 | /** |
| 235 | * trace_wake_up - wake up tasks waiting for trace input | 243 | * trace_wake_up - wake up tasks waiting for trace input |
| @@ -287,6 +295,7 @@ static const char *trace_options[] = { | |||
| 287 | "userstacktrace", | 295 | "userstacktrace", |
| 288 | "sym-userobj", | 296 | "sym-userobj", |
| 289 | "printk-msg-only", | 297 | "printk-msg-only", |
| 298 | "context-info", | ||
| 290 | NULL | 299 | NULL |
| 291 | }; | 300 | }; |
| 292 | 301 | ||
| @@ -329,132 +338,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 329 | tracing_record_cmdline(current); | 338 | tracing_record_cmdline(current); |
| 330 | } | 339 | } |
| 331 | 340 | ||
| 332 | /** | ||
| 333 | * trace_seq_printf - sequence printing of trace information | ||
| 334 | * @s: trace sequence descriptor | ||
| 335 | * @fmt: printf format string | ||
| 336 | * | ||
| 337 | * The tracer may use either sequence operations or its own | ||
| 338 | * copy to user routines. To simplify formating of a trace | ||
| 339 | * trace_seq_printf is used to store strings into a special | ||
| 340 | * buffer (@s). Then the output may be either used by | ||
| 341 | * the sequencer or pulled into another buffer. | ||
| 342 | */ | ||
| 343 | int | ||
| 344 | trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
| 345 | { | ||
| 346 | int len = (PAGE_SIZE - 1) - s->len; | ||
| 347 | va_list ap; | ||
| 348 | int ret; | ||
| 349 | |||
| 350 | if (!len) | ||
| 351 | return 0; | ||
| 352 | |||
| 353 | va_start(ap, fmt); | ||
| 354 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | ||
| 355 | va_end(ap); | ||
| 356 | |||
| 357 | /* If we can't write it all, don't bother writing anything */ | ||
| 358 | if (ret >= len) | ||
| 359 | return 0; | ||
| 360 | |||
| 361 | s->len += ret; | ||
| 362 | |||
| 363 | return len; | ||
| 364 | } | ||
| 365 | |||
| 366 | /** | ||
| 367 | * trace_seq_puts - trace sequence printing of simple string | ||
| 368 | * @s: trace sequence descriptor | ||
| 369 | * @str: simple string to record | ||
| 370 | * | ||
| 371 | * The tracer may use either the sequence operations or its own | ||
| 372 | * copy to user routines. This function records a simple string | ||
| 373 | * into a special buffer (@s) for later retrieval by a sequencer | ||
| 374 | * or other mechanism. | ||
| 375 | */ | ||
| 376 | static int | ||
| 377 | trace_seq_puts(struct trace_seq *s, const char *str) | ||
| 378 | { | ||
| 379 | int len = strlen(str); | ||
| 380 | |||
| 381 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
| 382 | return 0; | ||
| 383 | |||
| 384 | memcpy(s->buffer + s->len, str, len); | ||
| 385 | s->len += len; | ||
| 386 | |||
| 387 | return len; | ||
| 388 | } | ||
| 389 | |||
| 390 | static int | ||
| 391 | trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
| 392 | { | ||
| 393 | if (s->len >= (PAGE_SIZE - 1)) | ||
| 394 | return 0; | ||
| 395 | |||
| 396 | s->buffer[s->len++] = c; | ||
| 397 | |||
| 398 | return 1; | ||
| 399 | } | ||
| 400 | |||
| 401 | static int | ||
| 402 | trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) | ||
| 403 | { | ||
| 404 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
| 405 | return 0; | ||
| 406 | |||
| 407 | memcpy(s->buffer + s->len, mem, len); | ||
| 408 | s->len += len; | ||
| 409 | |||
| 410 | return len; | ||
| 411 | } | ||
| 412 | |||
| 413 | #define MAX_MEMHEX_BYTES 8 | ||
| 414 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
| 415 | |||
| 416 | static int | ||
| 417 | trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) | ||
| 418 | { | ||
| 419 | unsigned char hex[HEX_CHARS]; | ||
| 420 | unsigned char *data = mem; | ||
| 421 | int i, j; | ||
| 422 | |||
| 423 | #ifdef __BIG_ENDIAN | ||
| 424 | for (i = 0, j = 0; i < len; i++) { | ||
| 425 | #else | ||
| 426 | for (i = len-1, j = 0; i >= 0; i--) { | ||
| 427 | #endif | ||
| 428 | hex[j++] = hex_asc_hi(data[i]); | ||
| 429 | hex[j++] = hex_asc_lo(data[i]); | ||
| 430 | } | ||
| 431 | hex[j++] = ' '; | ||
| 432 | |||
| 433 | return trace_seq_putmem(s, hex, j); | ||
| 434 | } | ||
| 435 | |||
| 436 | static int | ||
| 437 | trace_seq_path(struct trace_seq *s, struct path *path) | ||
| 438 | { | ||
| 439 | unsigned char *p; | ||
| 440 | |||
| 441 | if (s->len >= (PAGE_SIZE - 1)) | ||
| 442 | return 0; | ||
| 443 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | ||
| 444 | if (!IS_ERR(p)) { | ||
| 445 | p = mangle_path(s->buffer + s->len, p, "\n"); | ||
| 446 | if (p) { | ||
| 447 | s->len = p - s->buffer; | ||
| 448 | return 1; | ||
| 449 | } | ||
| 450 | } else { | ||
| 451 | s->buffer[s->len++] = '?'; | ||
| 452 | return 1; | ||
| 453 | } | ||
| 454 | |||
| 455 | return 0; | ||
| 456 | } | ||
| 457 | |||
| 458 | static void | 341 | static void |
| 459 | trace_seq_reset(struct trace_seq *s) | 342 | trace_seq_reset(struct trace_seq *s) |
| 460 | { | 343 | { |
| @@ -543,7 +426,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 543 | 426 | ||
| 544 | ftrace_enable_cpu(); | 427 | ftrace_enable_cpu(); |
| 545 | 428 | ||
| 546 | WARN_ON_ONCE(ret); | 429 | WARN_ON_ONCE(ret && ret != -EAGAIN); |
| 547 | 430 | ||
| 548 | __update_max_tr(tr, tsk, cpu); | 431 | __update_max_tr(tr, tsk, cpu); |
| 549 | __raw_spin_unlock(&ftrace_max_lock); | 432 | __raw_spin_unlock(&ftrace_max_lock); |
| @@ -596,7 +479,7 @@ int register_tracer(struct tracer *type) | |||
| 596 | type->flags->opts = dummy_tracer_opt; | 479 | type->flags->opts = dummy_tracer_opt; |
| 597 | 480 | ||
| 598 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 481 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 599 | if (type->selftest) { | 482 | if (type->selftest && !tracing_selftest_disabled) { |
| 600 | struct tracer *saved_tracer = current_trace; | 483 | struct tracer *saved_tracer = current_trace; |
| 601 | struct trace_array *tr = &global_trace; | 484 | struct trace_array *tr = &global_trace; |
| 602 | int i; | 485 | int i; |
| @@ -638,8 +521,25 @@ int register_tracer(struct tracer *type) | |||
| 638 | out: | 521 | out: |
| 639 | tracing_selftest_running = false; | 522 | tracing_selftest_running = false; |
| 640 | mutex_unlock(&trace_types_lock); | 523 | mutex_unlock(&trace_types_lock); |
| 641 | lock_kernel(); | ||
| 642 | 524 | ||
| 525 | if (!ret && default_bootup_tracer) { | ||
| 526 | if (!strncmp(default_bootup_tracer, type->name, | ||
| 527 | BOOTUP_TRACER_SIZE)) { | ||
| 528 | printk(KERN_INFO "Starting tracer '%s'\n", | ||
| 529 | type->name); | ||
| 530 | /* Do we want this tracer to start on bootup? */ | ||
| 531 | tracing_set_tracer(type->name); | ||
| 532 | default_bootup_tracer = NULL; | ||
| 533 | /* disable other selftests, since this will break it. */ | ||
| 534 | tracing_selftest_disabled = 1; | ||
| 535 | #ifdef CONFIG_FTRACE_STARTUP_TEST | ||
| 536 | printk(KERN_INFO "Disabling FTRACE selftests due" | ||
| 537 | " to running tracer '%s'\n", type->name); | ||
| 538 | #endif | ||
| 539 | } | ||
| 540 | } | ||
| 541 | |||
| 542 | lock_kernel(); | ||
| 643 | return ret; | 543 | return ret; |
| 644 | } | 544 | } |
| 645 | 545 | ||
| @@ -738,13 +638,12 @@ void tracing_start(void) | |||
| 738 | return; | 638 | return; |
| 739 | 639 | ||
| 740 | spin_lock_irqsave(&tracing_start_lock, flags); | 640 | spin_lock_irqsave(&tracing_start_lock, flags); |
| 741 | if (--trace_stop_count) | 641 | if (--trace_stop_count) { |
| 742 | goto out; | 642 | if (trace_stop_count < 0) { |
| 743 | 643 | /* Someone screwed up their debugging */ | |
| 744 | if (trace_stop_count < 0) { | 644 | WARN_ON_ONCE(1); |
| 745 | /* Someone screwed up their debugging */ | 645 | trace_stop_count = 0; |
| 746 | WARN_ON_ONCE(1); | 646 | } |
| 747 | trace_stop_count = 0; | ||
| 748 | goto out; | 647 | goto out; |
| 749 | } | 648 | } |
| 750 | 649 | ||
| @@ -960,10 +859,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
| 960 | trace_function(tr, data, ip, parent_ip, flags, pc); | 859 | trace_function(tr, data, ip, parent_ip, flags, pc); |
| 961 | } | 860 | } |
| 962 | 861 | ||
| 963 | static void ftrace_trace_stack(struct trace_array *tr, | 862 | static void __ftrace_trace_stack(struct trace_array *tr, |
| 964 | struct trace_array_cpu *data, | 863 | struct trace_array_cpu *data, |
| 965 | unsigned long flags, | 864 | unsigned long flags, |
| 966 | int skip, int pc) | 865 | int skip, int pc) |
| 967 | { | 866 | { |
| 968 | #ifdef CONFIG_STACKTRACE | 867 | #ifdef CONFIG_STACKTRACE |
| 969 | struct ring_buffer_event *event; | 868 | struct ring_buffer_event *event; |
| @@ -971,9 +870,6 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
| 971 | struct stack_trace trace; | 870 | struct stack_trace trace; |
| 972 | unsigned long irq_flags; | 871 | unsigned long irq_flags; |
| 973 | 872 | ||
| 974 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
| 975 | return; | ||
| 976 | |||
| 977 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 873 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
| 978 | &irq_flags); | 874 | &irq_flags); |
| 979 | if (!event) | 875 | if (!event) |
| @@ -994,12 +890,23 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
| 994 | #endif | 890 | #endif |
| 995 | } | 891 | } |
| 996 | 892 | ||
| 893 | static void ftrace_trace_stack(struct trace_array *tr, | ||
| 894 | struct trace_array_cpu *data, | ||
| 895 | unsigned long flags, | ||
| 896 | int skip, int pc) | ||
| 897 | { | ||
| 898 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
| 899 | return; | ||
| 900 | |||
| 901 | __ftrace_trace_stack(tr, data, flags, skip, pc); | ||
| 902 | } | ||
| 903 | |||
| 997 | void __trace_stack(struct trace_array *tr, | 904 | void __trace_stack(struct trace_array *tr, |
| 998 | struct trace_array_cpu *data, | 905 | struct trace_array_cpu *data, |
| 999 | unsigned long flags, | 906 | unsigned long flags, |
| 1000 | int skip) | 907 | int skip, int pc) |
| 1001 | { | 908 | { |
| 1002 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | 909 | __ftrace_trace_stack(tr, data, flags, skip, pc); |
| 1003 | } | 910 | } |
| 1004 | 911 | ||
| 1005 | static void ftrace_trace_userstack(struct trace_array *tr, | 912 | static void ftrace_trace_userstack(struct trace_array *tr, |
| @@ -1163,65 +1070,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
| 1163 | local_irq_restore(flags); | 1070 | local_irq_restore(flags); |
| 1164 | } | 1071 | } |
| 1165 | 1072 | ||
| 1166 | #ifdef CONFIG_FUNCTION_TRACER | ||
| 1167 | static void | ||
| 1168 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | ||
| 1169 | { | ||
| 1170 | struct trace_array *tr = &global_trace; | ||
| 1171 | struct trace_array_cpu *data; | ||
| 1172 | unsigned long flags; | ||
| 1173 | long disabled; | ||
| 1174 | int cpu, resched; | ||
| 1175 | int pc; | ||
| 1176 | |||
| 1177 | if (unlikely(!ftrace_function_enabled)) | ||
| 1178 | return; | ||
| 1179 | |||
| 1180 | pc = preempt_count(); | ||
| 1181 | resched = ftrace_preempt_disable(); | ||
| 1182 | local_save_flags(flags); | ||
| 1183 | cpu = raw_smp_processor_id(); | ||
| 1184 | data = tr->data[cpu]; | ||
| 1185 | disabled = atomic_inc_return(&data->disabled); | ||
| 1186 | |||
| 1187 | if (likely(disabled == 1)) | ||
| 1188 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
| 1189 | |||
| 1190 | atomic_dec(&data->disabled); | ||
| 1191 | ftrace_preempt_enable(resched); | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | static void | ||
| 1195 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
| 1196 | { | ||
| 1197 | struct trace_array *tr = &global_trace; | ||
| 1198 | struct trace_array_cpu *data; | ||
| 1199 | unsigned long flags; | ||
| 1200 | long disabled; | ||
| 1201 | int cpu; | ||
| 1202 | int pc; | ||
| 1203 | |||
| 1204 | if (unlikely(!ftrace_function_enabled)) | ||
| 1205 | return; | ||
| 1206 | |||
| 1207 | /* | ||
| 1208 | * Need to use raw, since this must be called before the | ||
| 1209 | * recursive protection is performed. | ||
| 1210 | */ | ||
| 1211 | local_irq_save(flags); | ||
| 1212 | cpu = raw_smp_processor_id(); | ||
| 1213 | data = tr->data[cpu]; | ||
| 1214 | disabled = atomic_inc_return(&data->disabled); | ||
| 1215 | |||
| 1216 | if (likely(disabled == 1)) { | ||
| 1217 | pc = preempt_count(); | ||
| 1218 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | atomic_dec(&data->disabled); | ||
| 1222 | local_irq_restore(flags); | ||
| 1223 | } | ||
| 1224 | |||
| 1225 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1073 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1226 | int trace_graph_entry(struct ftrace_graph_ent *trace) | 1074 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
| 1227 | { | 1075 | { |
| @@ -1279,31 +1127,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
| 1279 | } | 1127 | } |
| 1280 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1128 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 1281 | 1129 | ||
| 1282 | static struct ftrace_ops trace_ops __read_mostly = | ||
| 1283 | { | ||
| 1284 | .func = function_trace_call, | ||
| 1285 | }; | ||
| 1286 | |||
| 1287 | void tracing_start_function_trace(void) | ||
| 1288 | { | ||
| 1289 | ftrace_function_enabled = 0; | ||
| 1290 | |||
| 1291 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
| 1292 | trace_ops.func = function_trace_call_preempt_only; | ||
| 1293 | else | ||
| 1294 | trace_ops.func = function_trace_call; | ||
| 1295 | |||
| 1296 | register_ftrace_function(&trace_ops); | ||
| 1297 | ftrace_function_enabled = 1; | ||
| 1298 | } | ||
| 1299 | |||
| 1300 | void tracing_stop_function_trace(void) | ||
| 1301 | { | ||
| 1302 | ftrace_function_enabled = 0; | ||
| 1303 | unregister_ftrace_function(&trace_ops); | ||
| 1304 | } | ||
| 1305 | #endif | ||
| 1306 | |||
| 1307 | enum trace_file_type { | 1130 | enum trace_file_type { |
| 1308 | TRACE_FILE_LAT_FMT = 1, | 1131 | TRACE_FILE_LAT_FMT = 1, |
| 1309 | TRACE_FILE_ANNOTATE = 2, | 1132 | TRACE_FILE_ANNOTATE = 2, |
| @@ -1376,8 +1199,8 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
| 1376 | } | 1199 | } |
| 1377 | 1200 | ||
| 1378 | /* Find the next real entry, without updating the iterator itself */ | 1201 | /* Find the next real entry, without updating the iterator itself */ |
| 1379 | static struct trace_entry * | 1202 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
| 1380 | find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1203 | int *ent_cpu, u64 *ent_ts) |
| 1381 | { | 1204 | { |
| 1382 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1205 | return __find_next_entry(iter, ent_cpu, ent_ts); |
| 1383 | } | 1206 | } |
| @@ -1472,154 +1295,6 @@ static void s_stop(struct seq_file *m, void *p) | |||
| 1472 | mutex_unlock(&trace_types_lock); | 1295 | mutex_unlock(&trace_types_lock); |
| 1473 | } | 1296 | } |
| 1474 | 1297 | ||
| 1475 | #ifdef CONFIG_KRETPROBES | ||
| 1476 | static inline const char *kretprobed(const char *name) | ||
| 1477 | { | ||
| 1478 | static const char tramp_name[] = "kretprobe_trampoline"; | ||
| 1479 | int size = sizeof(tramp_name); | ||
| 1480 | |||
| 1481 | if (strncmp(tramp_name, name, size) == 0) | ||
| 1482 | return "[unknown/kretprobe'd]"; | ||
| 1483 | return name; | ||
| 1484 | } | ||
| 1485 | #else | ||
| 1486 | static inline const char *kretprobed(const char *name) | ||
| 1487 | { | ||
| 1488 | return name; | ||
| 1489 | } | ||
| 1490 | #endif /* CONFIG_KRETPROBES */ | ||
| 1491 | |||
| 1492 | static int | ||
| 1493 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | ||
| 1494 | { | ||
| 1495 | #ifdef CONFIG_KALLSYMS | ||
| 1496 | char str[KSYM_SYMBOL_LEN]; | ||
| 1497 | const char *name; | ||
| 1498 | |||
| 1499 | kallsyms_lookup(address, NULL, NULL, NULL, str); | ||
| 1500 | |||
| 1501 | name = kretprobed(str); | ||
| 1502 | |||
| 1503 | return trace_seq_printf(s, fmt, name); | ||
| 1504 | #endif | ||
| 1505 | return 1; | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | static int | ||
| 1509 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | ||
| 1510 | unsigned long address) | ||
| 1511 | { | ||
| 1512 | #ifdef CONFIG_KALLSYMS | ||
| 1513 | char str[KSYM_SYMBOL_LEN]; | ||
| 1514 | const char *name; | ||
| 1515 | |||
| 1516 | sprint_symbol(str, address); | ||
| 1517 | name = kretprobed(str); | ||
| 1518 | |||
| 1519 | return trace_seq_printf(s, fmt, name); | ||
| 1520 | #endif | ||
| 1521 | return 1; | ||
| 1522 | } | ||
| 1523 | |||
| 1524 | #ifndef CONFIG_64BIT | ||
| 1525 | # define IP_FMT "%08lx" | ||
| 1526 | #else | ||
| 1527 | # define IP_FMT "%016lx" | ||
| 1528 | #endif | ||
| 1529 | |||
| 1530 | int | ||
| 1531 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | ||
| 1532 | { | ||
| 1533 | int ret; | ||
| 1534 | |||
| 1535 | if (!ip) | ||
| 1536 | return trace_seq_printf(s, "0"); | ||
| 1537 | |||
| 1538 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | ||
| 1539 | ret = seq_print_sym_offset(s, "%s", ip); | ||
| 1540 | else | ||
| 1541 | ret = seq_print_sym_short(s, "%s", ip); | ||
| 1542 | |||
| 1543 | if (!ret) | ||
| 1544 | return 0; | ||
| 1545 | |||
| 1546 | if (sym_flags & TRACE_ITER_SYM_ADDR) | ||
| 1547 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
| 1548 | return ret; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
| 1552 | unsigned long ip, unsigned long sym_flags) | ||
| 1553 | { | ||
| 1554 | struct file *file = NULL; | ||
| 1555 | unsigned long vmstart = 0; | ||
| 1556 | int ret = 1; | ||
| 1557 | |||
| 1558 | if (mm) { | ||
| 1559 | const struct vm_area_struct *vma; | ||
| 1560 | |||
| 1561 | down_read(&mm->mmap_sem); | ||
| 1562 | vma = find_vma(mm, ip); | ||
| 1563 | if (vma) { | ||
| 1564 | file = vma->vm_file; | ||
| 1565 | vmstart = vma->vm_start; | ||
| 1566 | } | ||
| 1567 | if (file) { | ||
| 1568 | ret = trace_seq_path(s, &file->f_path); | ||
| 1569 | if (ret) | ||
| 1570 | ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart); | ||
| 1571 | } | ||
| 1572 | up_read(&mm->mmap_sem); | ||
| 1573 | } | ||
| 1574 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | ||
| 1575 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
| 1576 | return ret; | ||
| 1577 | } | ||
| 1578 | |||
| 1579 | static int | ||
| 1580 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
| 1581 | unsigned long sym_flags) | ||
| 1582 | { | ||
| 1583 | struct mm_struct *mm = NULL; | ||
| 1584 | int ret = 1; | ||
| 1585 | unsigned int i; | ||
| 1586 | |||
| 1587 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | ||
| 1588 | struct task_struct *task; | ||
| 1589 | /* | ||
| 1590 | * we do the lookup on the thread group leader, | ||
| 1591 | * since individual threads might have already quit! | ||
| 1592 | */ | ||
| 1593 | rcu_read_lock(); | ||
| 1594 | task = find_task_by_vpid(entry->ent.tgid); | ||
| 1595 | if (task) | ||
| 1596 | mm = get_task_mm(task); | ||
| 1597 | rcu_read_unlock(); | ||
| 1598 | } | ||
| 1599 | |||
| 1600 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
| 1601 | unsigned long ip = entry->caller[i]; | ||
| 1602 | |||
| 1603 | if (ip == ULONG_MAX || !ret) | ||
| 1604 | break; | ||
| 1605 | if (i && ret) | ||
| 1606 | ret = trace_seq_puts(s, " <- "); | ||
| 1607 | if (!ip) { | ||
| 1608 | if (ret) | ||
| 1609 | ret = trace_seq_puts(s, "??"); | ||
| 1610 | continue; | ||
| 1611 | } | ||
| 1612 | if (!ret) | ||
| 1613 | break; | ||
| 1614 | if (ret) | ||
| 1615 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
| 1616 | } | ||
| 1617 | |||
| 1618 | if (mm) | ||
| 1619 | mmput(mm); | ||
| 1620 | return ret; | ||
| 1621 | } | ||
| 1622 | |||
| 1623 | static void print_lat_help_header(struct seq_file *m) | 1298 | static void print_lat_help_header(struct seq_file *m) |
| 1624 | { | 1299 | { |
| 1625 | seq_puts(m, "# _------=> CPU# \n"); | 1300 | seq_puts(m, "# _------=> CPU# \n"); |
| @@ -1704,103 +1379,6 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
| 1704 | seq_puts(m, "\n"); | 1379 | seq_puts(m, "\n"); |
| 1705 | } | 1380 | } |
| 1706 | 1381 | ||
| 1707 | static void | ||
| 1708 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | ||
| 1709 | { | ||
| 1710 | int hardirq, softirq; | ||
| 1711 | char *comm; | ||
| 1712 | |||
| 1713 | comm = trace_find_cmdline(entry->pid); | ||
| 1714 | |||
| 1715 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); | ||
| 1716 | trace_seq_printf(s, "%3d", cpu); | ||
| 1717 | trace_seq_printf(s, "%c%c", | ||
| 1718 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | ||
| 1719 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.', | ||
| 1720 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); | ||
| 1721 | |||
| 1722 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | ||
| 1723 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | ||
| 1724 | if (hardirq && softirq) { | ||
| 1725 | trace_seq_putc(s, 'H'); | ||
| 1726 | } else { | ||
| 1727 | if (hardirq) { | ||
| 1728 | trace_seq_putc(s, 'h'); | ||
| 1729 | } else { | ||
| 1730 | if (softirq) | ||
| 1731 | trace_seq_putc(s, 's'); | ||
| 1732 | else | ||
| 1733 | trace_seq_putc(s, '.'); | ||
| 1734 | } | ||
| 1735 | } | ||
| 1736 | |||
| 1737 | if (entry->preempt_count) | ||
| 1738 | trace_seq_printf(s, "%x", entry->preempt_count); | ||
| 1739 | else | ||
| 1740 | trace_seq_puts(s, "."); | ||
| 1741 | } | ||
| 1742 | |||
| 1743 | unsigned long preempt_mark_thresh = 100; | ||
| 1744 | |||
| 1745 | static void | ||
| 1746 | lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, | ||
| 1747 | unsigned long rel_usecs) | ||
| 1748 | { | ||
| 1749 | trace_seq_printf(s, " %4lldus", abs_usecs); | ||
| 1750 | if (rel_usecs > preempt_mark_thresh) | ||
| 1751 | trace_seq_puts(s, "!: "); | ||
| 1752 | else if (rel_usecs > 1) | ||
| 1753 | trace_seq_puts(s, "+: "); | ||
| 1754 | else | ||
| 1755 | trace_seq_puts(s, " : "); | ||
| 1756 | } | ||
| 1757 | |||
| 1758 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | ||
| 1759 | |||
| 1760 | static int task_state_char(unsigned long state) | ||
| 1761 | { | ||
| 1762 | int bit = state ? __ffs(state) + 1 : 0; | ||
| 1763 | |||
| 1764 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | /* | ||
| 1768 | * The message is supposed to contain an ending newline. | ||
| 1769 | * If the printing stops prematurely, try to add a newline of our own. | ||
| 1770 | */ | ||
| 1771 | void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | ||
| 1772 | { | ||
| 1773 | struct trace_entry *ent; | ||
| 1774 | struct trace_field_cont *cont; | ||
| 1775 | bool ok = true; | ||
| 1776 | |||
| 1777 | ent = peek_next_entry(iter, iter->cpu, NULL); | ||
| 1778 | if (!ent || ent->type != TRACE_CONT) { | ||
| 1779 | trace_seq_putc(s, '\n'); | ||
| 1780 | return; | ||
| 1781 | } | ||
| 1782 | |||
| 1783 | do { | ||
| 1784 | cont = (struct trace_field_cont *)ent; | ||
| 1785 | if (ok) | ||
| 1786 | ok = (trace_seq_printf(s, "%s", cont->buf) > 0); | ||
| 1787 | |||
| 1788 | ftrace_disable_cpu(); | ||
| 1789 | |||
| 1790 | if (iter->buffer_iter[iter->cpu]) | ||
| 1791 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
| 1792 | else | ||
| 1793 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
| 1794 | |||
| 1795 | ftrace_enable_cpu(); | ||
| 1796 | |||
| 1797 | ent = peek_next_entry(iter, iter->cpu, NULL); | ||
| 1798 | } while (ent && ent->type == TRACE_CONT); | ||
| 1799 | |||
| 1800 | if (!ok) | ||
| 1801 | trace_seq_putc(s, '\n'); | ||
| 1802 | } | ||
| 1803 | |||
| 1804 | static void test_cpu_buff_start(struct trace_iterator *iter) | 1382 | static void test_cpu_buff_start(struct trace_iterator *iter) |
| 1805 | { | 1383 | { |
| 1806 | struct trace_seq *s = &iter->seq; | 1384 | struct trace_seq *s = &iter->seq; |
| @@ -1818,138 +1396,31 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
| 1818 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | 1396 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); |
| 1819 | } | 1397 | } |
| 1820 | 1398 | ||
| 1821 | static enum print_line_t | 1399 | static enum print_line_t print_lat_fmt(struct trace_iterator *iter) |
| 1822 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | ||
| 1823 | { | 1400 | { |
| 1824 | struct trace_seq *s = &iter->seq; | 1401 | struct trace_seq *s = &iter->seq; |
| 1825 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1402 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
| 1826 | struct trace_entry *next_entry; | 1403 | struct trace_event *event; |
| 1827 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); | ||
| 1828 | struct trace_entry *entry = iter->ent; | 1404 | struct trace_entry *entry = iter->ent; |
| 1829 | unsigned long abs_usecs; | ||
| 1830 | unsigned long rel_usecs; | ||
| 1831 | u64 next_ts; | ||
| 1832 | char *comm; | ||
| 1833 | int S, T; | ||
| 1834 | int i; | ||
| 1835 | |||
| 1836 | if (entry->type == TRACE_CONT) | ||
| 1837 | return TRACE_TYPE_HANDLED; | ||
| 1838 | 1405 | ||
| 1839 | test_cpu_buff_start(iter); | 1406 | test_cpu_buff_start(iter); |
| 1840 | 1407 | ||
| 1841 | next_entry = find_next_entry(iter, NULL, &next_ts); | 1408 | event = ftrace_find_event(entry->type); |
| 1842 | if (!next_entry) | ||
| 1843 | next_ts = iter->ts; | ||
| 1844 | rel_usecs = ns2usecs(next_ts - iter->ts); | ||
| 1845 | abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); | ||
| 1846 | |||
| 1847 | if (verbose) { | ||
| 1848 | comm = trace_find_cmdline(entry->pid); | ||
| 1849 | trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" | ||
| 1850 | " %ld.%03ldms (+%ld.%03ldms): ", | ||
| 1851 | comm, | ||
| 1852 | entry->pid, cpu, entry->flags, | ||
| 1853 | entry->preempt_count, trace_idx, | ||
| 1854 | ns2usecs(iter->ts), | ||
| 1855 | abs_usecs/1000, | ||
| 1856 | abs_usecs % 1000, rel_usecs/1000, | ||
| 1857 | rel_usecs % 1000); | ||
| 1858 | } else { | ||
| 1859 | lat_print_generic(s, entry, cpu); | ||
| 1860 | lat_print_timestamp(s, abs_usecs, rel_usecs); | ||
| 1861 | } | ||
| 1862 | switch (entry->type) { | ||
| 1863 | case TRACE_FN: { | ||
| 1864 | struct ftrace_entry *field; | ||
| 1865 | |||
| 1866 | trace_assign_type(field, entry); | ||
| 1867 | |||
| 1868 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
| 1869 | trace_seq_puts(s, " ("); | ||
| 1870 | seq_print_ip_sym(s, field->parent_ip, sym_flags); | ||
| 1871 | trace_seq_puts(s, ")\n"); | ||
| 1872 | break; | ||
| 1873 | } | ||
| 1874 | case TRACE_CTX: | ||
| 1875 | case TRACE_WAKE: { | ||
| 1876 | struct ctx_switch_entry *field; | ||
| 1877 | |||
| 1878 | trace_assign_type(field, entry); | ||
| 1879 | |||
| 1880 | T = task_state_char(field->next_state); | ||
| 1881 | S = task_state_char(field->prev_state); | ||
| 1882 | comm = trace_find_cmdline(field->next_pid); | ||
| 1883 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | ||
| 1884 | field->prev_pid, | ||
| 1885 | field->prev_prio, | ||
| 1886 | S, entry->type == TRACE_CTX ? "==>" : " +", | ||
| 1887 | field->next_cpu, | ||
| 1888 | field->next_pid, | ||
| 1889 | field->next_prio, | ||
| 1890 | T, comm); | ||
| 1891 | break; | ||
| 1892 | } | ||
| 1893 | case TRACE_SPECIAL: { | ||
| 1894 | struct special_entry *field; | ||
| 1895 | |||
| 1896 | trace_assign_type(field, entry); | ||
| 1897 | |||
| 1898 | trace_seq_printf(s, "# %ld %ld %ld\n", | ||
| 1899 | field->arg1, | ||
| 1900 | field->arg2, | ||
| 1901 | field->arg3); | ||
| 1902 | break; | ||
| 1903 | } | ||
| 1904 | case TRACE_STACK: { | ||
| 1905 | struct stack_entry *field; | ||
| 1906 | |||
| 1907 | trace_assign_type(field, entry); | ||
| 1908 | 1409 | ||
| 1909 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | 1410 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 1910 | if (i) | 1411 | if (!trace_print_lat_context(iter)) |
| 1911 | trace_seq_puts(s, " <= "); | 1412 | goto partial; |
| 1912 | seq_print_ip_sym(s, field->caller[i], sym_flags); | ||
| 1913 | } | ||
| 1914 | trace_seq_puts(s, "\n"); | ||
| 1915 | break; | ||
| 1916 | } | ||
| 1917 | case TRACE_PRINT: { | ||
| 1918 | struct print_entry *field; | ||
| 1919 | |||
| 1920 | trace_assign_type(field, entry); | ||
| 1921 | |||
| 1922 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
| 1923 | trace_seq_printf(s, ": %s", field->buf); | ||
| 1924 | if (entry->flags & TRACE_FLAG_CONT) | ||
| 1925 | trace_seq_print_cont(s, iter); | ||
| 1926 | break; | ||
| 1927 | } | 1413 | } |
| 1928 | case TRACE_BRANCH: { | ||
| 1929 | struct trace_branch *field; | ||
| 1930 | 1414 | ||
| 1931 | trace_assign_type(field, entry); | 1415 | if (event && event->latency_trace) |
| 1416 | return event->latency_trace(iter, sym_flags); | ||
| 1932 | 1417 | ||
| 1933 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | 1418 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) |
| 1934 | field->correct ? " ok " : " MISS ", | 1419 | goto partial; |
| 1935 | field->func, | ||
| 1936 | field->file, | ||
| 1937 | field->line); | ||
| 1938 | break; | ||
| 1939 | } | ||
| 1940 | case TRACE_USER_STACK: { | ||
| 1941 | struct userstack_entry *field; | ||
| 1942 | |||
| 1943 | trace_assign_type(field, entry); | ||
| 1944 | 1420 | ||
| 1945 | seq_print_userip_objs(field, s, sym_flags); | ||
| 1946 | trace_seq_putc(s, '\n'); | ||
| 1947 | break; | ||
| 1948 | } | ||
| 1949 | default: | ||
| 1950 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | ||
| 1951 | } | ||
| 1952 | return TRACE_TYPE_HANDLED; | 1421 | return TRACE_TYPE_HANDLED; |
| 1422 | partial: | ||
| 1423 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1953 | } | 1424 | } |
| 1954 | 1425 | ||
| 1955 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | 1426 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) |
| @@ -1957,313 +1428,78 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
| 1957 | struct trace_seq *s = &iter->seq; | 1428 | struct trace_seq *s = &iter->seq; |
| 1958 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1429 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
| 1959 | struct trace_entry *entry; | 1430 | struct trace_entry *entry; |
| 1960 | unsigned long usec_rem; | 1431 | struct trace_event *event; |
| 1961 | unsigned long long t; | ||
| 1962 | unsigned long secs; | ||
| 1963 | char *comm; | ||
| 1964 | int ret; | ||
| 1965 | int S, T; | ||
| 1966 | int i; | ||
| 1967 | 1432 | ||
| 1968 | entry = iter->ent; | 1433 | entry = iter->ent; |
| 1969 | 1434 | ||
| 1970 | if (entry->type == TRACE_CONT) | ||
| 1971 | return TRACE_TYPE_HANDLED; | ||
| 1972 | |||
| 1973 | test_cpu_buff_start(iter); | 1435 | test_cpu_buff_start(iter); |
| 1974 | 1436 | ||
| 1975 | comm = trace_find_cmdline(iter->ent->pid); | 1437 | event = ftrace_find_event(entry->type); |
| 1976 | |||
| 1977 | t = ns2usecs(iter->ts); | ||
| 1978 | usec_rem = do_div(t, 1000000ULL); | ||
| 1979 | secs = (unsigned long)t; | ||
| 1980 | |||
| 1981 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); | ||
| 1982 | if (!ret) | ||
| 1983 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1984 | ret = trace_seq_printf(s, "[%03d] ", iter->cpu); | ||
| 1985 | if (!ret) | ||
| 1986 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1987 | ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); | ||
| 1988 | if (!ret) | ||
| 1989 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1990 | 1438 | ||
| 1991 | switch (entry->type) { | 1439 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 1992 | case TRACE_FN: { | 1440 | if (!trace_print_context(iter)) |
| 1993 | struct ftrace_entry *field; | 1441 | goto partial; |
| 1994 | |||
| 1995 | trace_assign_type(field, entry); | ||
| 1996 | |||
| 1997 | ret = seq_print_ip_sym(s, field->ip, sym_flags); | ||
| 1998 | if (!ret) | ||
| 1999 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2000 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && | ||
| 2001 | field->parent_ip) { | ||
| 2002 | ret = trace_seq_printf(s, " <-"); | ||
| 2003 | if (!ret) | ||
| 2004 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2005 | ret = seq_print_ip_sym(s, | ||
| 2006 | field->parent_ip, | ||
| 2007 | sym_flags); | ||
| 2008 | if (!ret) | ||
| 2009 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2010 | } | ||
| 2011 | ret = trace_seq_printf(s, "\n"); | ||
| 2012 | if (!ret) | ||
| 2013 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2014 | break; | ||
| 2015 | } | ||
| 2016 | case TRACE_CTX: | ||
| 2017 | case TRACE_WAKE: { | ||
| 2018 | struct ctx_switch_entry *field; | ||
| 2019 | |||
| 2020 | trace_assign_type(field, entry); | ||
| 2021 | |||
| 2022 | T = task_state_char(field->next_state); | ||
| 2023 | S = task_state_char(field->prev_state); | ||
| 2024 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", | ||
| 2025 | field->prev_pid, | ||
| 2026 | field->prev_prio, | ||
| 2027 | S, | ||
| 2028 | entry->type == TRACE_CTX ? "==>" : " +", | ||
| 2029 | field->next_cpu, | ||
| 2030 | field->next_pid, | ||
| 2031 | field->next_prio, | ||
| 2032 | T); | ||
| 2033 | if (!ret) | ||
| 2034 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2035 | break; | ||
| 2036 | } | 1442 | } |
| 2037 | case TRACE_SPECIAL: { | ||
| 2038 | struct special_entry *field; | ||
| 2039 | 1443 | ||
| 2040 | trace_assign_type(field, entry); | 1444 | if (event && event->trace) |
| 2041 | 1445 | return event->trace(iter, sym_flags); | |
| 2042 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | ||
| 2043 | field->arg1, | ||
| 2044 | field->arg2, | ||
| 2045 | field->arg3); | ||
| 2046 | if (!ret) | ||
| 2047 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2048 | break; | ||
| 2049 | } | ||
| 2050 | case TRACE_STACK: { | ||
| 2051 | struct stack_entry *field; | ||
| 2052 | |||
| 2053 | trace_assign_type(field, entry); | ||
| 2054 | |||
| 2055 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
| 2056 | if (i) { | ||
| 2057 | ret = trace_seq_puts(s, " <= "); | ||
| 2058 | if (!ret) | ||
| 2059 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2060 | } | ||
| 2061 | ret = seq_print_ip_sym(s, field->caller[i], | ||
| 2062 | sym_flags); | ||
| 2063 | if (!ret) | ||
| 2064 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2065 | } | ||
| 2066 | ret = trace_seq_puts(s, "\n"); | ||
| 2067 | if (!ret) | ||
| 2068 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2069 | break; | ||
| 2070 | } | ||
| 2071 | case TRACE_PRINT: { | ||
| 2072 | struct print_entry *field; | ||
| 2073 | |||
| 2074 | trace_assign_type(field, entry); | ||
| 2075 | |||
| 2076 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
| 2077 | trace_seq_printf(s, ": %s", field->buf); | ||
| 2078 | if (entry->flags & TRACE_FLAG_CONT) | ||
| 2079 | trace_seq_print_cont(s, iter); | ||
| 2080 | break; | ||
| 2081 | } | ||
| 2082 | case TRACE_GRAPH_RET: { | ||
| 2083 | return print_graph_function(iter); | ||
| 2084 | } | ||
| 2085 | case TRACE_GRAPH_ENT: { | ||
| 2086 | return print_graph_function(iter); | ||
| 2087 | } | ||
| 2088 | case TRACE_BRANCH: { | ||
| 2089 | struct trace_branch *field; | ||
| 2090 | 1446 | ||
| 2091 | trace_assign_type(field, entry); | 1447 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) |
| 1448 | goto partial; | ||
| 2092 | 1449 | ||
| 2093 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
| 2094 | field->correct ? " ok " : " MISS ", | ||
| 2095 | field->func, | ||
| 2096 | field->file, | ||
| 2097 | field->line); | ||
| 2098 | break; | ||
| 2099 | } | ||
| 2100 | case TRACE_USER_STACK: { | ||
| 2101 | struct userstack_entry *field; | ||
| 2102 | |||
| 2103 | trace_assign_type(field, entry); | ||
| 2104 | |||
| 2105 | ret = seq_print_userip_objs(field, s, sym_flags); | ||
| 2106 | if (!ret) | ||
| 2107 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2108 | ret = trace_seq_putc(s, '\n'); | ||
| 2109 | if (!ret) | ||
| 2110 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2111 | break; | ||
| 2112 | } | ||
| 2113 | } | ||
| 2114 | return TRACE_TYPE_HANDLED; | 1450 | return TRACE_TYPE_HANDLED; |
| 1451 | partial: | ||
| 1452 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2115 | } | 1453 | } |
| 2116 | 1454 | ||
| 2117 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 1455 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) |
| 2118 | { | 1456 | { |
| 2119 | struct trace_seq *s = &iter->seq; | 1457 | struct trace_seq *s = &iter->seq; |
| 2120 | struct trace_entry *entry; | 1458 | struct trace_entry *entry; |
| 2121 | int ret; | 1459 | struct trace_event *event; |
| 2122 | int S, T; | ||
| 2123 | 1460 | ||
| 2124 | entry = iter->ent; | 1461 | entry = iter->ent; |
| 2125 | 1462 | ||
| 2126 | if (entry->type == TRACE_CONT) | 1463 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 2127 | return TRACE_TYPE_HANDLED; | 1464 | if (!trace_seq_printf(s, "%d %d %llu ", |
| 2128 | 1465 | entry->pid, iter->cpu, iter->ts)) | |
| 2129 | ret = trace_seq_printf(s, "%d %d %llu ", | 1466 | goto partial; |
| 2130 | entry->pid, iter->cpu, iter->ts); | ||
| 2131 | if (!ret) | ||
| 2132 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2133 | |||
| 2134 | switch (entry->type) { | ||
| 2135 | case TRACE_FN: { | ||
| 2136 | struct ftrace_entry *field; | ||
| 2137 | |||
| 2138 | trace_assign_type(field, entry); | ||
| 2139 | |||
| 2140 | ret = trace_seq_printf(s, "%x %x\n", | ||
| 2141 | field->ip, | ||
| 2142 | field->parent_ip); | ||
| 2143 | if (!ret) | ||
| 2144 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2145 | break; | ||
| 2146 | } | ||
| 2147 | case TRACE_CTX: | ||
| 2148 | case TRACE_WAKE: { | ||
| 2149 | struct ctx_switch_entry *field; | ||
| 2150 | |||
| 2151 | trace_assign_type(field, entry); | ||
| 2152 | |||
| 2153 | T = task_state_char(field->next_state); | ||
| 2154 | S = entry->type == TRACE_WAKE ? '+' : | ||
| 2155 | task_state_char(field->prev_state); | ||
| 2156 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", | ||
| 2157 | field->prev_pid, | ||
| 2158 | field->prev_prio, | ||
| 2159 | S, | ||
| 2160 | field->next_cpu, | ||
| 2161 | field->next_pid, | ||
| 2162 | field->next_prio, | ||
| 2163 | T); | ||
| 2164 | if (!ret) | ||
| 2165 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2166 | break; | ||
| 2167 | } | ||
| 2168 | case TRACE_SPECIAL: | ||
| 2169 | case TRACE_USER_STACK: | ||
| 2170 | case TRACE_STACK: { | ||
| 2171 | struct special_entry *field; | ||
| 2172 | |||
| 2173 | trace_assign_type(field, entry); | ||
| 2174 | |||
| 2175 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | ||
| 2176 | field->arg1, | ||
| 2177 | field->arg2, | ||
| 2178 | field->arg3); | ||
| 2179 | if (!ret) | ||
| 2180 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2181 | break; | ||
| 2182 | } | 1467 | } |
| 2183 | case TRACE_PRINT: { | ||
| 2184 | struct print_entry *field; | ||
| 2185 | 1468 | ||
| 2186 | trace_assign_type(field, entry); | 1469 | event = ftrace_find_event(entry->type); |
| 1470 | if (event && event->raw) | ||
| 1471 | return event->raw(iter, 0); | ||
| 1472 | |||
| 1473 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) | ||
| 1474 | goto partial; | ||
| 2187 | 1475 | ||
| 2188 | trace_seq_printf(s, "# %lx %s", field->ip, field->buf); | ||
| 2189 | if (entry->flags & TRACE_FLAG_CONT) | ||
| 2190 | trace_seq_print_cont(s, iter); | ||
| 2191 | break; | ||
| 2192 | } | ||
| 2193 | } | ||
| 2194 | return TRACE_TYPE_HANDLED; | 1476 | return TRACE_TYPE_HANDLED; |
| 1477 | partial: | ||
| 1478 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2195 | } | 1479 | } |
| 2196 | 1480 | ||
| 2197 | #define SEQ_PUT_FIELD_RET(s, x) \ | ||
| 2198 | do { \ | ||
| 2199 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | ||
| 2200 | return 0; \ | ||
| 2201 | } while (0) | ||
| 2202 | |||
| 2203 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | ||
| 2204 | do { \ | ||
| 2205 | BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ | ||
| 2206 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | ||
| 2207 | return 0; \ | ||
| 2208 | } while (0) | ||
| 2209 | |||
| 2210 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 1481 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) |
| 2211 | { | 1482 | { |
| 2212 | struct trace_seq *s = &iter->seq; | 1483 | struct trace_seq *s = &iter->seq; |
| 2213 | unsigned char newline = '\n'; | 1484 | unsigned char newline = '\n'; |
| 2214 | struct trace_entry *entry; | 1485 | struct trace_entry *entry; |
| 2215 | int S, T; | 1486 | struct trace_event *event; |
| 2216 | 1487 | ||
| 2217 | entry = iter->ent; | 1488 | entry = iter->ent; |
| 2218 | 1489 | ||
| 2219 | if (entry->type == TRACE_CONT) | 1490 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 2220 | return TRACE_TYPE_HANDLED; | 1491 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); |
| 2221 | 1492 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | |
| 2222 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 1493 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); |
| 2223 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | ||
| 2224 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | ||
| 2225 | |||
| 2226 | switch (entry->type) { | ||
| 2227 | case TRACE_FN: { | ||
| 2228 | struct ftrace_entry *field; | ||
| 2229 | |||
| 2230 | trace_assign_type(field, entry); | ||
| 2231 | |||
| 2232 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | ||
| 2233 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | ||
| 2234 | break; | ||
| 2235 | } | 1494 | } |
| 2236 | case TRACE_CTX: | ||
| 2237 | case TRACE_WAKE: { | ||
| 2238 | struct ctx_switch_entry *field; | ||
| 2239 | |||
| 2240 | trace_assign_type(field, entry); | ||
| 2241 | |||
| 2242 | T = task_state_char(field->next_state); | ||
| 2243 | S = entry->type == TRACE_WAKE ? '+' : | ||
| 2244 | task_state_char(field->prev_state); | ||
| 2245 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | ||
| 2246 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | ||
| 2247 | SEQ_PUT_HEX_FIELD_RET(s, S); | ||
| 2248 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); | ||
| 2249 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); | ||
| 2250 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); | ||
| 2251 | SEQ_PUT_HEX_FIELD_RET(s, T); | ||
| 2252 | break; | ||
| 2253 | } | ||
| 2254 | case TRACE_SPECIAL: | ||
| 2255 | case TRACE_USER_STACK: | ||
| 2256 | case TRACE_STACK: { | ||
| 2257 | struct special_entry *field; | ||
| 2258 | |||
| 2259 | trace_assign_type(field, entry); | ||
| 2260 | 1495 | ||
| 2261 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | 1496 | event = ftrace_find_event(entry->type); |
| 2262 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | 1497 | if (event && event->hex) { |
| 2263 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | 1498 | enum print_line_t ret = event->hex(iter, 0); |
| 2264 | break; | 1499 | if (ret != TRACE_TYPE_HANDLED) |
| 2265 | } | 1500 | return ret; |
| 2266 | } | 1501 | } |
| 1502 | |||
| 2267 | SEQ_PUT_FIELD_RET(s, newline); | 1503 | SEQ_PUT_FIELD_RET(s, newline); |
| 2268 | 1504 | ||
| 2269 | return TRACE_TYPE_HANDLED; | 1505 | return TRACE_TYPE_HANDLED; |
| @@ -2278,13 +1514,10 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | |||
| 2278 | 1514 | ||
| 2279 | trace_assign_type(field, entry); | 1515 | trace_assign_type(field, entry); |
| 2280 | 1516 | ||
| 2281 | ret = trace_seq_printf(s, field->buf); | 1517 | ret = trace_seq_printf(s, "%s", field->buf); |
| 2282 | if (!ret) | 1518 | if (!ret) |
| 2283 | return TRACE_TYPE_PARTIAL_LINE; | 1519 | return TRACE_TYPE_PARTIAL_LINE; |
| 2284 | 1520 | ||
| 2285 | if (entry->flags & TRACE_FLAG_CONT) | ||
| 2286 | trace_seq_print_cont(s, iter); | ||
| 2287 | |||
| 2288 | return TRACE_TYPE_HANDLED; | 1521 | return TRACE_TYPE_HANDLED; |
| 2289 | } | 1522 | } |
| 2290 | 1523 | ||
| @@ -2292,53 +1525,21 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
| 2292 | { | 1525 | { |
| 2293 | struct trace_seq *s = &iter->seq; | 1526 | struct trace_seq *s = &iter->seq; |
| 2294 | struct trace_entry *entry; | 1527 | struct trace_entry *entry; |
| 1528 | struct trace_event *event; | ||
| 2295 | 1529 | ||
| 2296 | entry = iter->ent; | 1530 | entry = iter->ent; |
| 2297 | 1531 | ||
| 2298 | if (entry->type == TRACE_CONT) | 1532 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 2299 | return TRACE_TYPE_HANDLED; | 1533 | SEQ_PUT_FIELD_RET(s, entry->pid); |
| 2300 | 1534 | SEQ_PUT_FIELD_RET(s, entry->cpu); | |
| 2301 | SEQ_PUT_FIELD_RET(s, entry->pid); | 1535 | SEQ_PUT_FIELD_RET(s, iter->ts); |
| 2302 | SEQ_PUT_FIELD_RET(s, entry->cpu); | ||
| 2303 | SEQ_PUT_FIELD_RET(s, iter->ts); | ||
| 2304 | |||
| 2305 | switch (entry->type) { | ||
| 2306 | case TRACE_FN: { | ||
| 2307 | struct ftrace_entry *field; | ||
| 2308 | |||
| 2309 | trace_assign_type(field, entry); | ||
| 2310 | |||
| 2311 | SEQ_PUT_FIELD_RET(s, field->ip); | ||
| 2312 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | ||
| 2313 | break; | ||
| 2314 | } | ||
| 2315 | case TRACE_CTX: { | ||
| 2316 | struct ctx_switch_entry *field; | ||
| 2317 | |||
| 2318 | trace_assign_type(field, entry); | ||
| 2319 | |||
| 2320 | SEQ_PUT_FIELD_RET(s, field->prev_pid); | ||
| 2321 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | ||
| 2322 | SEQ_PUT_FIELD_RET(s, field->prev_state); | ||
| 2323 | SEQ_PUT_FIELD_RET(s, field->next_pid); | ||
| 2324 | SEQ_PUT_FIELD_RET(s, field->next_prio); | ||
| 2325 | SEQ_PUT_FIELD_RET(s, field->next_state); | ||
| 2326 | break; | ||
| 2327 | } | 1536 | } |
| 2328 | case TRACE_SPECIAL: | ||
| 2329 | case TRACE_USER_STACK: | ||
| 2330 | case TRACE_STACK: { | ||
| 2331 | struct special_entry *field; | ||
| 2332 | 1537 | ||
| 2333 | trace_assign_type(field, entry); | 1538 | event = ftrace_find_event(entry->type); |
| 1539 | if (event && event->binary) | ||
| 1540 | return event->binary(iter, 0); | ||
| 2334 | 1541 | ||
| 2335 | SEQ_PUT_FIELD_RET(s, field->arg1); | 1542 | return TRACE_TYPE_HANDLED; |
| 2336 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
| 2337 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
| 2338 | break; | ||
| 2339 | } | ||
| 2340 | } | ||
| 2341 | return 1; | ||
| 2342 | } | 1543 | } |
| 2343 | 1544 | ||
| 2344 | static int trace_empty(struct trace_iterator *iter) | 1545 | static int trace_empty(struct trace_iterator *iter) |
| @@ -2383,7 +1584,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 2383 | return print_raw_fmt(iter); | 1584 | return print_raw_fmt(iter); |
| 2384 | 1585 | ||
| 2385 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) | 1586 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) |
| 2386 | return print_lat_fmt(iter, iter->idx, iter->cpu); | 1587 | return print_lat_fmt(iter); |
| 2387 | 1588 | ||
| 2388 | return print_trace_fmt(iter); | 1589 | return print_trace_fmt(iter); |
| 2389 | } | 1590 | } |
| @@ -2985,7 +2186,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
| 2985 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2186 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
| 2986 | } | 2187 | } |
| 2987 | 2188 | ||
| 2988 | static int tracing_set_tracer(char *buf) | 2189 | static int tracing_set_tracer(const char *buf) |
| 2989 | { | 2190 | { |
| 2990 | struct trace_array *tr = &global_trace; | 2191 | struct trace_array *tr = &global_trace; |
| 2991 | struct tracer *t; | 2192 | struct tracer *t; |
| @@ -3691,6 +2892,15 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) | |||
| 3691 | } | 2892 | } |
| 3692 | EXPORT_SYMBOL_GPL(__ftrace_printk); | 2893 | EXPORT_SYMBOL_GPL(__ftrace_printk); |
| 3693 | 2894 | ||
| 2895 | int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) | ||
| 2896 | { | ||
| 2897 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
| 2898 | return 0; | ||
| 2899 | |||
| 2900 | return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
| 2901 | } | ||
| 2902 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | ||
| 2903 | |||
| 3694 | static int trace_panic_handler(struct notifier_block *this, | 2904 | static int trace_panic_handler(struct notifier_block *this, |
| 3695 | unsigned long event, void *unused) | 2905 | unsigned long event, void *unused) |
| 3696 | { | 2906 | { |
| @@ -3871,14 +3081,10 @@ __init static int tracer_alloc_buffers(void) | |||
| 3871 | trace_init_cmdlines(); | 3081 | trace_init_cmdlines(); |
| 3872 | 3082 | ||
| 3873 | register_tracer(&nop_trace); | 3083 | register_tracer(&nop_trace); |
| 3084 | current_trace = &nop_trace; | ||
| 3874 | #ifdef CONFIG_BOOT_TRACER | 3085 | #ifdef CONFIG_BOOT_TRACER |
| 3875 | register_tracer(&boot_tracer); | 3086 | register_tracer(&boot_tracer); |
| 3876 | current_trace = &boot_tracer; | ||
| 3877 | current_trace->init(&global_trace); | ||
| 3878 | #else | ||
| 3879 | current_trace = &nop_trace; | ||
| 3880 | #endif | 3087 | #endif |
| 3881 | |||
| 3882 | /* All seems OK, enable tracing */ | 3088 | /* All seems OK, enable tracing */ |
| 3883 | tracing_disabled = 0; | 3089 | tracing_disabled = 0; |
| 3884 | 3090 | ||
| @@ -3895,5 +3101,26 @@ out_free_buffer_mask: | |||
| 3895 | out: | 3101 | out: |
| 3896 | return ret; | 3102 | return ret; |
| 3897 | } | 3103 | } |
| 3104 | |||
| 3105 | __init static int clear_boot_tracer(void) | ||
| 3106 | { | ||
| 3107 | /* | ||
| 3108 | * The default tracer at boot buffer is an init section. | ||
| 3109 | * This function is called in lateinit. If we did not | ||
| 3110 | * find the boot tracer, then clear it out, to prevent | ||
| 3111 | * later registration from accessing the buffer that is | ||
| 3112 | * about to be freed. | ||
| 3113 | */ | ||
| 3114 | if (!default_bootup_tracer) | ||
| 3115 | return 0; | ||
| 3116 | |||
| 3117 | printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | ||
| 3118 | default_bootup_tracer); | ||
| 3119 | default_bootup_tracer = NULL; | ||
| 3120 | |||
| 3121 | return 0; | ||
| 3122 | } | ||
| 3123 | |||
| 3898 | early_initcall(tracer_alloc_buffers); | 3124 | early_initcall(tracer_alloc_buffers); |
| 3899 | fs_initcall(tracer_init_debugfs); | 3125 | fs_initcall(tracer_init_debugfs); |
| 3126 | late_initcall(clear_boot_tracer); | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4d3d381bfd95..f0c7a0f08cac 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
| 10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
| 11 | #include <trace/boot.h> | 11 | #include <trace/boot.h> |
| 12 | #include <trace/kmemtrace.h> | ||
| 12 | 13 | ||
| 13 | enum trace_type { | 14 | enum trace_type { |
| 14 | __TRACE_FIRST_TYPE = 0, | 15 | __TRACE_FIRST_TYPE = 0, |
| @@ -16,7 +17,6 @@ enum trace_type { | |||
| 16 | TRACE_FN, | 17 | TRACE_FN, |
| 17 | TRACE_CTX, | 18 | TRACE_CTX, |
| 18 | TRACE_WAKE, | 19 | TRACE_WAKE, |
| 19 | TRACE_CONT, | ||
| 20 | TRACE_STACK, | 20 | TRACE_STACK, |
| 21 | TRACE_PRINT, | 21 | TRACE_PRINT, |
| 22 | TRACE_SPECIAL, | 22 | TRACE_SPECIAL, |
| @@ -29,9 +29,12 @@ enum trace_type { | |||
| 29 | TRACE_GRAPH_ENT, | 29 | TRACE_GRAPH_ENT, |
| 30 | TRACE_USER_STACK, | 30 | TRACE_USER_STACK, |
| 31 | TRACE_HW_BRANCHES, | 31 | TRACE_HW_BRANCHES, |
| 32 | TRACE_KMEM_ALLOC, | ||
| 33 | TRACE_KMEM_FREE, | ||
| 32 | TRACE_POWER, | 34 | TRACE_POWER, |
| 35 | TRACE_BLK, | ||
| 33 | 36 | ||
| 34 | __TRACE_LAST_TYPE | 37 | __TRACE_LAST_TYPE, |
| 35 | }; | 38 | }; |
| 36 | 39 | ||
| 37 | /* | 40 | /* |
| @@ -170,6 +173,24 @@ struct trace_power { | |||
| 170 | struct power_trace state_data; | 173 | struct power_trace state_data; |
| 171 | }; | 174 | }; |
| 172 | 175 | ||
| 176 | struct kmemtrace_alloc_entry { | ||
| 177 | struct trace_entry ent; | ||
| 178 | enum kmemtrace_type_id type_id; | ||
| 179 | unsigned long call_site; | ||
| 180 | const void *ptr; | ||
| 181 | size_t bytes_req; | ||
| 182 | size_t bytes_alloc; | ||
| 183 | gfp_t gfp_flags; | ||
| 184 | int node; | ||
| 185 | }; | ||
| 186 | |||
| 187 | struct kmemtrace_free_entry { | ||
| 188 | struct trace_entry ent; | ||
| 189 | enum kmemtrace_type_id type_id; | ||
| 190 | unsigned long call_site; | ||
| 191 | const void *ptr; | ||
| 192 | }; | ||
| 193 | |||
| 173 | /* | 194 | /* |
| 174 | * trace_flag_type is an enumeration that holds different | 195 | * trace_flag_type is an enumeration that holds different |
| 175 | * states when a trace occurs. These are: | 196 | * states when a trace occurs. These are: |
| @@ -178,7 +199,6 @@ struct trace_power { | |||
| 178 | * NEED_RESCED - reschedule is requested | 199 | * NEED_RESCED - reschedule is requested |
| 179 | * HARDIRQ - inside an interrupt handler | 200 | * HARDIRQ - inside an interrupt handler |
| 180 | * SOFTIRQ - inside a softirq handler | 201 | * SOFTIRQ - inside a softirq handler |
| 181 | * CONT - multiple entries hold the trace item | ||
| 182 | */ | 202 | */ |
| 183 | enum trace_flag_type { | 203 | enum trace_flag_type { |
| 184 | TRACE_FLAG_IRQS_OFF = 0x01, | 204 | TRACE_FLAG_IRQS_OFF = 0x01, |
| @@ -186,7 +206,6 @@ enum trace_flag_type { | |||
| 186 | TRACE_FLAG_NEED_RESCHED = 0x04, | 206 | TRACE_FLAG_NEED_RESCHED = 0x04, |
| 187 | TRACE_FLAG_HARDIRQ = 0x08, | 207 | TRACE_FLAG_HARDIRQ = 0x08, |
| 188 | TRACE_FLAG_SOFTIRQ = 0x10, | 208 | TRACE_FLAG_SOFTIRQ = 0x10, |
| 189 | TRACE_FLAG_CONT = 0x20, | ||
| 190 | }; | 209 | }; |
| 191 | 210 | ||
| 192 | #define TRACE_BUF_SIZE 1024 | 211 | #define TRACE_BUF_SIZE 1024 |
| @@ -262,7 +281,6 @@ extern void __ftrace_bad_type(void); | |||
| 262 | do { \ | 281 | do { \ |
| 263 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | 282 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ |
| 264 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | 283 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
| 265 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ | ||
| 266 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | 284 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
| 267 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 285 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
| 268 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 286 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
| @@ -280,6 +298,10 @@ extern void __ftrace_bad_type(void); | |||
| 280 | TRACE_GRAPH_RET); \ | 298 | TRACE_GRAPH_RET); \ |
| 281 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | 299 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
| 282 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | 300 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ |
| 301 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | ||
| 302 | TRACE_KMEM_ALLOC); \ | ||
| 303 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | ||
| 304 | TRACE_KMEM_FREE); \ | ||
| 283 | __ftrace_bad_type(); \ | 305 | __ftrace_bad_type(); \ |
| 284 | } while (0) | 306 | } while (0) |
| 285 | 307 | ||
| @@ -313,6 +335,7 @@ struct tracer_flags { | |||
| 313 | /* Makes more easy to define a tracer opt */ | 335 | /* Makes more easy to define a tracer opt */ |
| 314 | #define TRACER_OPT(s, b) .name = #s, .bit = b | 336 | #define TRACER_OPT(s, b) .name = #s, .bit = b |
| 315 | 337 | ||
| 338 | |||
| 316 | /* | 339 | /* |
| 317 | * A specific tracer, represented by methods that operate on a trace array: | 340 | * A specific tracer, represented by methods that operate on a trace array: |
| 318 | */ | 341 | */ |
| @@ -340,6 +363,7 @@ struct tracer { | |||
| 340 | struct tracer *next; | 363 | struct tracer *next; |
| 341 | int print_max; | 364 | int print_max; |
| 342 | struct tracer_flags *flags; | 365 | struct tracer_flags *flags; |
| 366 | struct tracer_stat *stats; | ||
| 343 | }; | 367 | }; |
| 344 | 368 | ||
| 345 | struct trace_seq { | 369 | struct trace_seq { |
| @@ -381,6 +405,10 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | |||
| 381 | 405 | ||
| 382 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 406 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
| 383 | struct trace_array_cpu *data); | 407 | struct trace_array_cpu *data); |
| 408 | |||
| 409 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | ||
| 410 | int *ent_cpu, u64 *ent_ts); | ||
| 411 | |||
| 384 | void tracing_generic_entry_update(struct trace_entry *entry, | 412 | void tracing_generic_entry_update(struct trace_entry *entry, |
| 385 | unsigned long flags, | 413 | unsigned long flags, |
| 386 | int pc); | 414 | int pc); |
| @@ -415,7 +443,6 @@ void trace_function(struct trace_array *tr, | |||
| 415 | 443 | ||
| 416 | void trace_graph_return(struct ftrace_graph_ret *trace); | 444 | void trace_graph_return(struct ftrace_graph_ret *trace); |
| 417 | int trace_graph_entry(struct ftrace_graph_ent *trace); | 445 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
| 418 | void trace_hw_branch(struct trace_array *tr, u64 from, u64 to); | ||
| 419 | 446 | ||
| 420 | void tracing_start_cmdline_record(void); | 447 | void tracing_start_cmdline_record(void); |
| 421 | void tracing_stop_cmdline_record(void); | 448 | void tracing_stop_cmdline_record(void); |
| @@ -434,15 +461,12 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |||
| 434 | void update_max_tr_single(struct trace_array *tr, | 461 | void update_max_tr_single(struct trace_array *tr, |
| 435 | struct task_struct *tsk, int cpu); | 462 | struct task_struct *tsk, int cpu); |
| 436 | 463 | ||
| 437 | extern cycle_t ftrace_now(int cpu); | 464 | void __trace_stack(struct trace_array *tr, |
| 465 | struct trace_array_cpu *data, | ||
| 466 | unsigned long flags, | ||
| 467 | int skip, int pc); | ||
| 438 | 468 | ||
| 439 | #ifdef CONFIG_FUNCTION_TRACER | 469 | extern cycle_t ftrace_now(int cpu); |
| 440 | void tracing_start_function_trace(void); | ||
| 441 | void tracing_stop_function_trace(void); | ||
| 442 | #else | ||
| 443 | # define tracing_start_function_trace() do { } while (0) | ||
| 444 | # define tracing_stop_function_trace() do { } while (0) | ||
| 445 | #endif | ||
| 446 | 470 | ||
| 447 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 471 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
| 448 | typedef void | 472 | typedef void |
| @@ -456,10 +480,10 @@ struct tracer_switch_ops { | |||
| 456 | void *private; | 480 | void *private; |
| 457 | struct tracer_switch_ops *next; | 481 | struct tracer_switch_ops *next; |
| 458 | }; | 482 | }; |
| 459 | |||
| 460 | char *trace_find_cmdline(int pid); | ||
| 461 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 483 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
| 462 | 484 | ||
| 485 | extern char *trace_find_cmdline(int pid); | ||
| 486 | |||
| 463 | #ifdef CONFIG_DYNAMIC_FTRACE | 487 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 464 | extern unsigned long ftrace_update_tot_cnt; | 488 | extern unsigned long ftrace_update_tot_cnt; |
| 465 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 489 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
| @@ -488,15 +512,6 @@ extern int trace_selftest_startup_branch(struct tracer *trace, | |||
| 488 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 512 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
| 489 | 513 | ||
| 490 | extern void *head_page(struct trace_array_cpu *data); | 514 | extern void *head_page(struct trace_array_cpu *data); |
| 491 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | ||
| 492 | extern void trace_seq_print_cont(struct trace_seq *s, | ||
| 493 | struct trace_iterator *iter); | ||
| 494 | |||
| 495 | extern int | ||
| 496 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | ||
| 497 | unsigned long sym_flags); | ||
| 498 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
| 499 | size_t cnt); | ||
| 500 | extern long ns2usecs(cycle_t nsec); | 515 | extern long ns2usecs(cycle_t nsec); |
| 501 | extern int | 516 | extern int |
| 502 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); | 517 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); |
| @@ -580,7 +595,8 @@ enum trace_iterator_flags { | |||
| 580 | TRACE_ITER_ANNOTATE = 0x2000, | 595 | TRACE_ITER_ANNOTATE = 0x2000, |
| 581 | TRACE_ITER_USERSTACKTRACE = 0x4000, | 596 | TRACE_ITER_USERSTACKTRACE = 0x4000, |
| 582 | TRACE_ITER_SYM_USEROBJ = 0x8000, | 597 | TRACE_ITER_SYM_USEROBJ = 0x8000, |
| 583 | TRACE_ITER_PRINTK_MSGONLY = 0x10000 | 598 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, |
| 599 | TRACE_ITER_CONTEXT_INFO = 0x20000 /* Print pid/cpu/time */ | ||
| 584 | }; | 600 | }; |
| 585 | 601 | ||
| 586 | /* | 602 | /* |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 366c8c333e13..1f07895977a0 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/kallsyms.h> | 11 | #include <linux/kallsyms.h> |
| 12 | 12 | ||
| 13 | #include "trace.h" | 13 | #include "trace.h" |
| 14 | #include "trace_output.h" | ||
| 14 | 15 | ||
| 15 | static struct trace_array *boot_trace; | 16 | static struct trace_array *boot_trace; |
| 16 | static bool pre_initcalls_finished; | 17 | static bool pre_initcalls_finished; |
| @@ -27,13 +28,13 @@ void start_boot_trace(void) | |||
| 27 | 28 | ||
| 28 | void enable_boot_trace(void) | 29 | void enable_boot_trace(void) |
| 29 | { | 30 | { |
| 30 | if (pre_initcalls_finished) | 31 | if (boot_trace && pre_initcalls_finished) |
| 31 | tracing_start_sched_switch_record(); | 32 | tracing_start_sched_switch_record(); |
| 32 | } | 33 | } |
| 33 | 34 | ||
| 34 | void disable_boot_trace(void) | 35 | void disable_boot_trace(void) |
| 35 | { | 36 | { |
| 36 | if (pre_initcalls_finished) | 37 | if (boot_trace && pre_initcalls_finished) |
| 37 | tracing_stop_sched_switch_record(); | 38 | tracing_stop_sched_switch_record(); |
| 38 | } | 39 | } |
| 39 | 40 | ||
| @@ -42,6 +43,9 @@ static int boot_trace_init(struct trace_array *tr) | |||
| 42 | int cpu; | 43 | int cpu; |
| 43 | boot_trace = tr; | 44 | boot_trace = tr; |
| 44 | 45 | ||
| 46 | if (!tr) | ||
| 47 | return 0; | ||
| 48 | |||
| 45 | for_each_cpu(cpu, cpu_possible_mask) | 49 | for_each_cpu(cpu, cpu_possible_mask) |
| 46 | tracing_reset(tr, cpu); | 50 | tracing_reset(tr, cpu); |
| 47 | 51 | ||
| @@ -131,7 +135,7 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
| 131 | unsigned long irq_flags; | 135 | unsigned long irq_flags; |
| 132 | struct trace_array *tr = boot_trace; | 136 | struct trace_array *tr = boot_trace; |
| 133 | 137 | ||
| 134 | if (!pre_initcalls_finished) | 138 | if (!tr || !pre_initcalls_finished) |
| 135 | return; | 139 | return; |
| 136 | 140 | ||
| 137 | /* Get its name now since this function could | 141 | /* Get its name now since this function could |
| @@ -163,7 +167,7 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
| 163 | unsigned long irq_flags; | 167 | unsigned long irq_flags; |
| 164 | struct trace_array *tr = boot_trace; | 168 | struct trace_array *tr = boot_trace; |
| 165 | 169 | ||
| 166 | if (!pre_initcalls_finished) | 170 | if (!tr || !pre_initcalls_finished) |
| 167 | return; | 171 | return; |
| 168 | 172 | ||
| 169 | sprint_symbol(bt->func, (unsigned long)fn); | 173 | sprint_symbol(bt->func, (unsigned long)fn); |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 6c00feb3bac7..7ac72a44b2d3 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
| @@ -14,12 +14,17 @@ | |||
| 14 | #include <linux/hash.h> | 14 | #include <linux/hash.h> |
| 15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 16 | #include <asm/local.h> | 16 | #include <asm/local.h> |
| 17 | |||
| 17 | #include "trace.h" | 18 | #include "trace.h" |
| 19 | #include "trace_stat.h" | ||
| 20 | #include "trace_output.h" | ||
| 18 | 21 | ||
| 19 | #ifdef CONFIG_BRANCH_TRACER | 22 | #ifdef CONFIG_BRANCH_TRACER |
| 20 | 23 | ||
| 24 | static struct tracer branch_trace; | ||
| 21 | static int branch_tracing_enabled __read_mostly; | 25 | static int branch_tracing_enabled __read_mostly; |
| 22 | static DEFINE_MUTEX(branch_tracing_mutex); | 26 | static DEFINE_MUTEX(branch_tracing_mutex); |
| 27 | |||
| 23 | static struct trace_array *branch_tracer; | 28 | static struct trace_array *branch_tracer; |
| 24 | 29 | ||
| 25 | static void | 30 | static void |
| @@ -128,11 +133,7 @@ static void stop_branch_trace(struct trace_array *tr) | |||
| 128 | 133 | ||
| 129 | static int branch_trace_init(struct trace_array *tr) | 134 | static int branch_trace_init(struct trace_array *tr) |
| 130 | { | 135 | { |
| 131 | int cpu; | 136 | tracing_reset_online_cpus(tr); |
| 132 | |||
| 133 | for_each_online_cpu(cpu) | ||
| 134 | tracing_reset(tr, cpu); | ||
| 135 | |||
| 136 | start_branch_trace(tr); | 137 | start_branch_trace(tr); |
| 137 | return 0; | 138 | return 0; |
| 138 | } | 139 | } |
| @@ -142,22 +143,74 @@ static void branch_trace_reset(struct trace_array *tr) | |||
| 142 | stop_branch_trace(tr); | 143 | stop_branch_trace(tr); |
| 143 | } | 144 | } |
| 144 | 145 | ||
| 145 | struct tracer branch_trace __read_mostly = | 146 | static int |
| 147 | trace_print_print(struct trace_seq *s, struct trace_entry *entry, int flags) | ||
| 148 | { | ||
| 149 | struct print_entry *field; | ||
| 150 | |||
| 151 | trace_assign_type(field, entry); | ||
| 152 | |||
| 153 | if (seq_print_ip_sym(s, field->ip, flags)) | ||
| 154 | goto partial; | ||
| 155 | |||
| 156 | if (trace_seq_printf(s, ": %s", field->buf)) | ||
| 157 | goto partial; | ||
| 158 | |||
| 159 | partial: | ||
| 160 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 161 | } | ||
| 162 | |||
| 163 | static enum print_line_t trace_branch_print(struct trace_iterator *iter, | ||
| 164 | int flags) | ||
| 165 | { | ||
| 166 | struct trace_branch *field; | ||
| 167 | |||
| 168 | trace_assign_type(field, iter->ent); | ||
| 169 | |||
| 170 | if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", | ||
| 171 | field->correct ? " ok " : " MISS ", | ||
| 172 | field->func, | ||
| 173 | field->file, | ||
| 174 | field->line)) | ||
| 175 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 176 | |||
| 177 | return TRACE_TYPE_HANDLED; | ||
| 178 | } | ||
| 179 | |||
| 180 | |||
| 181 | static struct trace_event trace_branch_event = { | ||
| 182 | .type = TRACE_BRANCH, | ||
| 183 | .trace = trace_branch_print, | ||
| 184 | .latency_trace = trace_branch_print, | ||
| 185 | .raw = trace_nop_print, | ||
| 186 | .hex = trace_nop_print, | ||
| 187 | .binary = trace_nop_print, | ||
| 188 | }; | ||
| 189 | |||
| 190 | static struct tracer branch_trace __read_mostly = | ||
| 146 | { | 191 | { |
| 147 | .name = "branch", | 192 | .name = "branch", |
| 148 | .init = branch_trace_init, | 193 | .init = branch_trace_init, |
| 149 | .reset = branch_trace_reset, | 194 | .reset = branch_trace_reset, |
| 150 | #ifdef CONFIG_FTRACE_SELFTEST | 195 | #ifdef CONFIG_FTRACE_SELFTEST |
| 151 | .selftest = trace_selftest_startup_branch, | 196 | .selftest = trace_selftest_startup_branch, |
| 152 | #endif | 197 | #endif /* CONFIG_FTRACE_SELFTEST */ |
| 153 | }; | 198 | }; |
| 154 | 199 | ||
| 155 | __init static int init_branch_trace(void) | 200 | __init static int init_branch_tracer(void) |
| 156 | { | 201 | { |
| 202 | int ret; | ||
| 203 | |||
| 204 | ret = register_ftrace_event(&trace_branch_event); | ||
| 205 | if (!ret) { | ||
| 206 | printk(KERN_WARNING "Warning: could not register " | ||
| 207 | "branch events\n"); | ||
| 208 | return 1; | ||
| 209 | } | ||
| 157 | return register_tracer(&branch_trace); | 210 | return register_tracer(&branch_trace); |
| 158 | } | 211 | } |
| 212 | device_initcall(init_branch_tracer); | ||
| 159 | 213 | ||
| 160 | device_initcall(init_branch_trace); | ||
| 161 | #else | 214 | #else |
| 162 | static inline | 215 | static inline |
| 163 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | 216 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
| @@ -183,66 +236,39 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) | |||
| 183 | } | 236 | } |
| 184 | EXPORT_SYMBOL(ftrace_likely_update); | 237 | EXPORT_SYMBOL(ftrace_likely_update); |
| 185 | 238 | ||
| 186 | struct ftrace_pointer { | 239 | extern unsigned long __start_annotated_branch_profile[]; |
| 187 | void *start; | 240 | extern unsigned long __stop_annotated_branch_profile[]; |
| 188 | void *stop; | ||
| 189 | int hit; | ||
| 190 | }; | ||
| 191 | 241 | ||
| 192 | static void * | 242 | static int annotated_branch_stat_headers(struct seq_file *m) |
| 193 | t_next(struct seq_file *m, void *v, loff_t *pos) | ||
| 194 | { | 243 | { |
| 195 | const struct ftrace_pointer *f = m->private; | 244 | seq_printf(m, " correct incorrect %% "); |
| 196 | struct ftrace_branch_data *p = v; | 245 | seq_printf(m, " Function " |
| 197 | 246 | " File Line\n" | |
| 198 | (*pos)++; | 247 | " ------- --------- - " |
| 199 | 248 | " -------- " | |
| 200 | if (v == (void *)1) | 249 | " ---- ----\n"); |
| 201 | return f->start; | 250 | return 0; |
| 202 | |||
| 203 | ++p; | ||
| 204 | |||
| 205 | if ((void *)p >= (void *)f->stop) | ||
| 206 | return NULL; | ||
| 207 | |||
| 208 | return p; | ||
| 209 | } | 251 | } |
| 210 | 252 | ||
| 211 | static void *t_start(struct seq_file *m, loff_t *pos) | 253 | static inline long get_incorrect_percent(struct ftrace_branch_data *p) |
| 212 | { | 254 | { |
| 213 | void *t = (void *)1; | 255 | long percent; |
| 214 | loff_t l = 0; | ||
| 215 | |||
| 216 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
| 217 | ; | ||
| 218 | 256 | ||
| 219 | return t; | 257 | if (p->correct) { |
| 220 | } | 258 | percent = p->incorrect * 100; |
| 259 | percent /= p->correct + p->incorrect; | ||
| 260 | } else | ||
| 261 | percent = p->incorrect ? 100 : -1; | ||
| 221 | 262 | ||
| 222 | static void t_stop(struct seq_file *m, void *p) | 263 | return percent; |
| 223 | { | ||
| 224 | } | 264 | } |
| 225 | 265 | ||
| 226 | static int t_show(struct seq_file *m, void *v) | 266 | static int branch_stat_show(struct seq_file *m, void *v) |
| 227 | { | 267 | { |
| 228 | const struct ftrace_pointer *fp = m->private; | ||
| 229 | struct ftrace_branch_data *p = v; | 268 | struct ftrace_branch_data *p = v; |
| 230 | const char *f; | 269 | const char *f; |
| 231 | long percent; | 270 | long percent; |
| 232 | 271 | ||
| 233 | if (v == (void *)1) { | ||
| 234 | if (fp->hit) | ||
| 235 | seq_printf(m, " miss hit %% "); | ||
| 236 | else | ||
| 237 | seq_printf(m, " correct incorrect %% "); | ||
| 238 | seq_printf(m, " Function " | ||
| 239 | " File Line\n" | ||
| 240 | " ------- --------- - " | ||
| 241 | " -------- " | ||
| 242 | " ---- ----\n"); | ||
| 243 | return 0; | ||
| 244 | } | ||
| 245 | |||
| 246 | /* Only print the file, not the path */ | 272 | /* Only print the file, not the path */ |
| 247 | f = p->file + strlen(p->file); | 273 | f = p->file + strlen(p->file); |
| 248 | while (f >= p->file && *f != '/') | 274 | while (f >= p->file && *f != '/') |
| @@ -252,11 +278,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 252 | /* | 278 | /* |
| 253 | * The miss is overlayed on correct, and hit on incorrect. | 279 | * The miss is overlayed on correct, and hit on incorrect. |
| 254 | */ | 280 | */ |
| 255 | if (p->correct) { | 281 | percent = get_incorrect_percent(p); |
| 256 | percent = p->incorrect * 100; | ||
| 257 | percent /= p->correct + p->incorrect; | ||
| 258 | } else | ||
| 259 | percent = p->incorrect ? 100 : -1; | ||
| 260 | 282 | ||
| 261 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); | 283 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); |
| 262 | if (percent < 0) | 284 | if (percent < 0) |
| @@ -267,76 +289,118 @@ static int t_show(struct seq_file *m, void *v) | |||
| 267 | return 0; | 289 | return 0; |
| 268 | } | 290 | } |
| 269 | 291 | ||
| 270 | static struct seq_operations tracing_likely_seq_ops = { | 292 | static void *annotated_branch_stat_start(void) |
| 271 | .start = t_start, | 293 | { |
| 272 | .next = t_next, | 294 | return __start_annotated_branch_profile; |
| 273 | .stop = t_stop, | 295 | } |
| 274 | .show = t_show, | 296 | |
| 297 | static void * | ||
| 298 | annotated_branch_stat_next(void *v, int idx) | ||
| 299 | { | ||
| 300 | struct ftrace_branch_data *p = v; | ||
| 301 | |||
| 302 | ++p; | ||
| 303 | |||
| 304 | if ((void *)p >= (void *)__stop_annotated_branch_profile) | ||
| 305 | return NULL; | ||
| 306 | |||
| 307 | return p; | ||
| 308 | } | ||
| 309 | |||
| 310 | static int annotated_branch_stat_cmp(void *p1, void *p2) | ||
| 311 | { | ||
| 312 | struct ftrace_branch_data *a = p1; | ||
| 313 | struct ftrace_branch_data *b = p2; | ||
| 314 | |||
| 315 | long percent_a, percent_b; | ||
| 316 | |||
| 317 | percent_a = get_incorrect_percent(a); | ||
| 318 | percent_b = get_incorrect_percent(b); | ||
| 319 | |||
| 320 | if (percent_a < percent_b) | ||
| 321 | return -1; | ||
| 322 | if (percent_a > percent_b) | ||
| 323 | return 1; | ||
| 324 | else | ||
| 325 | return 0; | ||
| 326 | } | ||
| 327 | |||
| 328 | static struct tracer_stat annotated_branch_stats = { | ||
| 329 | .name = "branch_annotated", | ||
| 330 | .stat_start = annotated_branch_stat_start, | ||
| 331 | .stat_next = annotated_branch_stat_next, | ||
| 332 | .stat_cmp = annotated_branch_stat_cmp, | ||
| 333 | .stat_headers = annotated_branch_stat_headers, | ||
| 334 | .stat_show = branch_stat_show | ||
| 275 | }; | 335 | }; |
| 276 | 336 | ||
| 277 | static int tracing_branch_open(struct inode *inode, struct file *file) | 337 | __init static int init_annotated_branch_stats(void) |
| 278 | { | 338 | { |
| 279 | int ret; | 339 | int ret; |
| 280 | 340 | ||
| 281 | ret = seq_open(file, &tracing_likely_seq_ops); | 341 | ret = register_stat_tracer(&annotated_branch_stats); |
| 282 | if (!ret) { | 342 | if (!ret) { |
| 283 | struct seq_file *m = file->private_data; | 343 | printk(KERN_WARNING "Warning: could not register " |
| 284 | m->private = (void *)inode->i_private; | 344 | "annotated branches stats\n"); |
| 345 | return 1; | ||
| 285 | } | 346 | } |
| 286 | 347 | return 0; | |
| 287 | return ret; | ||
| 288 | } | 348 | } |
| 289 | 349 | fs_initcall(init_annotated_branch_stats); | |
| 290 | static const struct file_operations tracing_branch_fops = { | ||
| 291 | .open = tracing_branch_open, | ||
| 292 | .read = seq_read, | ||
| 293 | .llseek = seq_lseek, | ||
| 294 | }; | ||
| 295 | 350 | ||
| 296 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | 351 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
| 352 | |||
| 297 | extern unsigned long __start_branch_profile[]; | 353 | extern unsigned long __start_branch_profile[]; |
| 298 | extern unsigned long __stop_branch_profile[]; | 354 | extern unsigned long __stop_branch_profile[]; |
| 299 | 355 | ||
| 300 | static const struct ftrace_pointer ftrace_branch_pos = { | 356 | static int all_branch_stat_headers(struct seq_file *m) |
| 301 | .start = __start_branch_profile, | 357 | { |
| 302 | .stop = __stop_branch_profile, | 358 | seq_printf(m, " miss hit %% "); |
| 303 | .hit = 1, | 359 | seq_printf(m, " Function " |
| 304 | }; | 360 | " File Line\n" |
| 361 | " ------- --------- - " | ||
| 362 | " -------- " | ||
| 363 | " ---- ----\n"); | ||
| 364 | return 0; | ||
| 365 | } | ||
| 305 | 366 | ||
| 306 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | 367 | static void *all_branch_stat_start(void) |
| 368 | { | ||
| 369 | return __start_branch_profile; | ||
| 370 | } | ||
| 307 | 371 | ||
| 308 | extern unsigned long __start_annotated_branch_profile[]; | 372 | static void * |
| 309 | extern unsigned long __stop_annotated_branch_profile[]; | 373 | all_branch_stat_next(void *v, int idx) |
| 374 | { | ||
| 375 | struct ftrace_branch_data *p = v; | ||
| 310 | 376 | ||
| 311 | static const struct ftrace_pointer ftrace_annotated_branch_pos = { | 377 | ++p; |
| 312 | .start = __start_annotated_branch_profile, | ||
| 313 | .stop = __stop_annotated_branch_profile, | ||
| 314 | }; | ||
| 315 | 378 | ||
| 316 | static __init int ftrace_branch_init(void) | 379 | if ((void *)p >= (void *)__stop_branch_profile) |
| 317 | { | 380 | return NULL; |
| 318 | struct dentry *d_tracer; | ||
| 319 | struct dentry *entry; | ||
| 320 | 381 | ||
| 321 | d_tracer = tracing_init_dentry(); | 382 | return p; |
| 383 | } | ||
| 322 | 384 | ||
| 323 | entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer, | 385 | static struct tracer_stat all_branch_stats = { |
| 324 | (void *)&ftrace_annotated_branch_pos, | 386 | .name = "branch_all", |
| 325 | &tracing_branch_fops); | 387 | .stat_start = all_branch_stat_start, |
| 326 | if (!entry) | 388 | .stat_next = all_branch_stat_next, |
| 327 | pr_warning("Could not create debugfs " | 389 | .stat_headers = all_branch_stat_headers, |
| 328 | "'profile_annotatet_branch' entry\n"); | 390 | .stat_show = branch_stat_show |
| 391 | }; | ||
| 329 | 392 | ||
| 330 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | 393 | __init static int all_annotated_branch_stats(void) |
| 331 | entry = debugfs_create_file("profile_branch", 0444, d_tracer, | 394 | { |
| 332 | (void *)&ftrace_branch_pos, | 395 | int ret; |
| 333 | &tracing_branch_fops); | ||
| 334 | if (!entry) | ||
| 335 | pr_warning("Could not create debugfs" | ||
| 336 | " 'profile_branch' entry\n"); | ||
| 337 | #endif | ||
| 338 | 396 | ||
| 397 | ret = register_stat_tracer(&all_branch_stats); | ||
| 398 | if (!ret) { | ||
| 399 | printk(KERN_WARNING "Warning: could not register " | ||
| 400 | "all branches stats\n"); | ||
| 401 | return 1; | ||
| 402 | } | ||
| 339 | return 0; | 403 | return 0; |
| 340 | } | 404 | } |
| 341 | 405 | fs_initcall(all_annotated_branch_stats); | |
| 342 | device_initcall(ftrace_branch_init); | 406 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9236d7e25a16..b3a320f8aba7 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -16,8 +16,17 @@ | |||
| 16 | 16 | ||
| 17 | #include "trace.h" | 17 | #include "trace.h" |
| 18 | 18 | ||
| 19 | /* function tracing enabled */ | ||
| 20 | static int ftrace_function_enabled; | ||
| 21 | |||
| 22 | static struct trace_array *func_trace; | ||
| 23 | |||
| 24 | static void tracing_start_function_trace(void); | ||
| 25 | static void tracing_stop_function_trace(void); | ||
| 26 | |||
| 19 | static void start_function_trace(struct trace_array *tr) | 27 | static void start_function_trace(struct trace_array *tr) |
| 20 | { | 28 | { |
| 29 | func_trace = tr; | ||
| 21 | tr->cpu = get_cpu(); | 30 | tr->cpu = get_cpu(); |
| 22 | tracing_reset_online_cpus(tr); | 31 | tracing_reset_online_cpus(tr); |
| 23 | put_cpu(); | 32 | put_cpu(); |
| @@ -48,14 +57,188 @@ static void function_trace_start(struct trace_array *tr) | |||
| 48 | tracing_reset_online_cpus(tr); | 57 | tracing_reset_online_cpus(tr); |
| 49 | } | 58 | } |
| 50 | 59 | ||
| 60 | static void | ||
| 61 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | ||
| 62 | { | ||
| 63 | struct trace_array *tr = func_trace; | ||
| 64 | struct trace_array_cpu *data; | ||
| 65 | unsigned long flags; | ||
| 66 | long disabled; | ||
| 67 | int cpu, resched; | ||
| 68 | int pc; | ||
| 69 | |||
| 70 | if (unlikely(!ftrace_function_enabled)) | ||
| 71 | return; | ||
| 72 | |||
| 73 | pc = preempt_count(); | ||
| 74 | resched = ftrace_preempt_disable(); | ||
| 75 | local_save_flags(flags); | ||
| 76 | cpu = raw_smp_processor_id(); | ||
| 77 | data = tr->data[cpu]; | ||
| 78 | disabled = atomic_inc_return(&data->disabled); | ||
| 79 | |||
| 80 | if (likely(disabled == 1)) | ||
| 81 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
| 82 | |||
| 83 | atomic_dec(&data->disabled); | ||
| 84 | ftrace_preempt_enable(resched); | ||
| 85 | } | ||
| 86 | |||
| 87 | static void | ||
| 88 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
| 89 | { | ||
| 90 | struct trace_array *tr = func_trace; | ||
| 91 | struct trace_array_cpu *data; | ||
| 92 | unsigned long flags; | ||
| 93 | long disabled; | ||
| 94 | int cpu; | ||
| 95 | int pc; | ||
| 96 | |||
| 97 | if (unlikely(!ftrace_function_enabled)) | ||
| 98 | return; | ||
| 99 | |||
| 100 | /* | ||
| 101 | * Need to use raw, since this must be called before the | ||
| 102 | * recursive protection is performed. | ||
| 103 | */ | ||
| 104 | local_irq_save(flags); | ||
| 105 | cpu = raw_smp_processor_id(); | ||
| 106 | data = tr->data[cpu]; | ||
| 107 | disabled = atomic_inc_return(&data->disabled); | ||
| 108 | |||
| 109 | if (likely(disabled == 1)) { | ||
| 110 | pc = preempt_count(); | ||
| 111 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
| 112 | } | ||
| 113 | |||
| 114 | atomic_dec(&data->disabled); | ||
| 115 | local_irq_restore(flags); | ||
| 116 | } | ||
| 117 | |||
| 118 | static void | ||
| 119 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | ||
| 120 | { | ||
| 121 | struct trace_array *tr = func_trace; | ||
| 122 | struct trace_array_cpu *data; | ||
| 123 | unsigned long flags; | ||
| 124 | long disabled; | ||
| 125 | int cpu; | ||
| 126 | int pc; | ||
| 127 | |||
| 128 | if (unlikely(!ftrace_function_enabled)) | ||
| 129 | return; | ||
| 130 | |||
| 131 | /* | ||
| 132 | * Need to use raw, since this must be called before the | ||
| 133 | * recursive protection is performed. | ||
| 134 | */ | ||
| 135 | local_irq_save(flags); | ||
| 136 | cpu = raw_smp_processor_id(); | ||
| 137 | data = tr->data[cpu]; | ||
| 138 | disabled = atomic_inc_return(&data->disabled); | ||
| 139 | |||
| 140 | if (likely(disabled == 1)) { | ||
| 141 | pc = preempt_count(); | ||
| 142 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
| 143 | /* | ||
| 144 | * skip over 5 funcs: | ||
| 145 | * __ftrace_trace_stack, | ||
| 146 | * __trace_stack, | ||
| 147 | * function_stack_trace_call | ||
| 148 | * ftrace_list_func | ||
| 149 | * ftrace_call | ||
| 150 | */ | ||
| 151 | __trace_stack(tr, data, flags, 5, pc); | ||
| 152 | } | ||
| 153 | |||
| 154 | atomic_dec(&data->disabled); | ||
| 155 | local_irq_restore(flags); | ||
| 156 | } | ||
| 157 | |||
| 158 | |||
| 159 | static struct ftrace_ops trace_ops __read_mostly = | ||
| 160 | { | ||
| 161 | .func = function_trace_call, | ||
| 162 | }; | ||
| 163 | |||
| 164 | static struct ftrace_ops trace_stack_ops __read_mostly = | ||
| 165 | { | ||
| 166 | .func = function_stack_trace_call, | ||
| 167 | }; | ||
| 168 | |||
| 169 | /* Our two options */ | ||
| 170 | enum { | ||
| 171 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 172 | }; | ||
| 173 | |||
| 174 | static struct tracer_opt func_opts[] = { | ||
| 175 | #ifdef CONFIG_STACKTRACE | ||
| 176 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | ||
| 177 | #endif | ||
| 178 | { } /* Always set a last empty entry */ | ||
| 179 | }; | ||
| 180 | |||
| 181 | static struct tracer_flags func_flags = { | ||
| 182 | .val = 0, /* By default: all flags disabled */ | ||
| 183 | .opts = func_opts | ||
| 184 | }; | ||
| 185 | |||
| 186 | static void tracing_start_function_trace(void) | ||
| 187 | { | ||
| 188 | ftrace_function_enabled = 0; | ||
| 189 | |||
| 190 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
| 191 | trace_ops.func = function_trace_call_preempt_only; | ||
| 192 | else | ||
| 193 | trace_ops.func = function_trace_call; | ||
| 194 | |||
| 195 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 196 | register_ftrace_function(&trace_stack_ops); | ||
| 197 | else | ||
| 198 | register_ftrace_function(&trace_ops); | ||
| 199 | |||
| 200 | ftrace_function_enabled = 1; | ||
| 201 | } | ||
| 202 | |||
| 203 | static void tracing_stop_function_trace(void) | ||
| 204 | { | ||
| 205 | ftrace_function_enabled = 0; | ||
| 206 | /* OK if they are not registered */ | ||
| 207 | unregister_ftrace_function(&trace_stack_ops); | ||
| 208 | unregister_ftrace_function(&trace_ops); | ||
| 209 | } | ||
| 210 | |||
| 211 | static int func_set_flag(u32 old_flags, u32 bit, int set) | ||
| 212 | { | ||
| 213 | if (bit == TRACE_FUNC_OPT_STACK) { | ||
| 214 | /* do nothing if already set */ | ||
| 215 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | ||
| 216 | return 0; | ||
| 217 | |||
| 218 | if (set) { | ||
| 219 | unregister_ftrace_function(&trace_ops); | ||
| 220 | register_ftrace_function(&trace_stack_ops); | ||
| 221 | } else { | ||
| 222 | unregister_ftrace_function(&trace_stack_ops); | ||
| 223 | register_ftrace_function(&trace_ops); | ||
| 224 | } | ||
| 225 | |||
| 226 | return 0; | ||
| 227 | } | ||
| 228 | |||
| 229 | return -EINVAL; | ||
| 230 | } | ||
| 231 | |||
| 51 | static struct tracer function_trace __read_mostly = | 232 | static struct tracer function_trace __read_mostly = |
| 52 | { | 233 | { |
| 53 | .name = "function", | 234 | .name = "function", |
| 54 | .init = function_trace_init, | 235 | .init = function_trace_init, |
| 55 | .reset = function_trace_reset, | 236 | .reset = function_trace_reset, |
| 56 | .start = function_trace_start, | 237 | .start = function_trace_start, |
| 238 | .flags = &func_flags, | ||
| 239 | .set_flag = func_set_flag, | ||
| 57 | #ifdef CONFIG_FTRACE_SELFTEST | 240 | #ifdef CONFIG_FTRACE_SELFTEST |
| 58 | .selftest = trace_selftest_startup_function, | 241 | .selftest = trace_selftest_startup_function, |
| 59 | #endif | 242 | #endif |
| 60 | }; | 243 | }; |
| 61 | 244 | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 930c08e5b38e..c97594d826bc 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * | 2 | * |
| 3 | * Function graph tracer. | 3 | * Function graph tracer. |
| 4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
| 5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
| 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
| 7 | * | 7 | * |
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
| 13 | 13 | ||
| 14 | #include "trace.h" | 14 | #include "trace.h" |
| 15 | #include "trace_output.h" | ||
| 15 | 16 | ||
| 16 | #define TRACE_GRAPH_INDENT 2 | 17 | #define TRACE_GRAPH_INDENT 2 |
| 17 | 18 | ||
| @@ -20,9 +21,11 @@ | |||
| 20 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 21 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
| 21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 22 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
| 22 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 23 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
| 24 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | ||
| 25 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | ||
| 23 | 26 | ||
| 24 | static struct tracer_opt trace_opts[] = { | 27 | static struct tracer_opt trace_opts[] = { |
| 25 | /* Display overruns ? */ | 28 | /* Display overruns? (for self-debug purpose) */ |
| 26 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 29 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
| 27 | /* Display CPU ? */ | 30 | /* Display CPU ? */ |
| 28 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 31 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
| @@ -30,29 +33,30 @@ static struct tracer_opt trace_opts[] = { | |||
| 30 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 33 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
| 31 | /* Display proc name/pid */ | 34 | /* Display proc name/pid */ |
| 32 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 35 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
| 36 | /* Display duration of execution */ | ||
| 37 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | ||
| 38 | /* Display absolute time of an entry */ | ||
| 39 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | ||
| 33 | { } /* Empty entry */ | 40 | { } /* Empty entry */ |
| 34 | }; | 41 | }; |
| 35 | 42 | ||
| 36 | static struct tracer_flags tracer_flags = { | 43 | static struct tracer_flags tracer_flags = { |
| 37 | /* Don't display overruns and proc by default */ | 44 | /* Don't display overruns and proc by default */ |
| 38 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | 45 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
| 46 | TRACE_GRAPH_PRINT_DURATION, | ||
| 39 | .opts = trace_opts | 47 | .opts = trace_opts |
| 40 | }; | 48 | }; |
| 41 | 49 | ||
| 42 | /* pid on the last trace processed */ | 50 | /* pid on the last trace processed */ |
| 43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | 51 | |
| 44 | 52 | ||
| 45 | static int graph_trace_init(struct trace_array *tr) | 53 | static int graph_trace_init(struct trace_array *tr) |
| 46 | { | 54 | { |
| 47 | int cpu, ret; | 55 | int ret = register_ftrace_graph(&trace_graph_return, |
| 48 | |||
| 49 | for_each_online_cpu(cpu) | ||
| 50 | tracing_reset(tr, cpu); | ||
| 51 | |||
| 52 | ret = register_ftrace_graph(&trace_graph_return, | ||
| 53 | &trace_graph_entry); | 56 | &trace_graph_entry); |
| 54 | if (ret) | 57 | if (ret) |
| 55 | return ret; | 58 | return ret; |
| 59 | tracing_reset_online_cpus(tr); | ||
| 56 | tracing_start_cmdline_record(); | 60 | tracing_start_cmdline_record(); |
| 57 | 61 | ||
| 58 | return 0; | 62 | return 0; |
| @@ -153,17 +157,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
| 153 | 157 | ||
| 154 | /* If the pid changed since the last trace, output this event */ | 158 | /* If the pid changed since the last trace, output this event */ |
| 155 | static enum print_line_t | 159 | static enum print_line_t |
| 156 | verif_pid(struct trace_seq *s, pid_t pid, int cpu) | 160 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) |
| 157 | { | 161 | { |
| 158 | pid_t prev_pid; | 162 | pid_t prev_pid; |
| 163 | pid_t *last_pid; | ||
| 159 | int ret; | 164 | int ret; |
| 160 | 165 | ||
| 161 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | 166 | if (!last_pids_cpu) |
| 162 | return TRACE_TYPE_HANDLED; | 167 | return TRACE_TYPE_HANDLED; |
| 163 | 168 | ||
| 164 | prev_pid = last_pid[cpu]; | 169 | last_pid = per_cpu_ptr(last_pids_cpu, cpu); |
| 165 | last_pid[cpu] = pid; | 170 | |
| 171 | if (*last_pid == pid) | ||
| 172 | return TRACE_TYPE_HANDLED; | ||
| 166 | 173 | ||
| 174 | prev_pid = *last_pid; | ||
| 175 | *last_pid = pid; | ||
| 176 | |||
| 177 | if (prev_pid == -1) | ||
| 178 | return TRACE_TYPE_HANDLED; | ||
| 167 | /* | 179 | /* |
| 168 | * Context-switch trace line: | 180 | * Context-switch trace line: |
| 169 | 181 | ||
| @@ -231,9 +243,34 @@ trace_branch_is_leaf(struct trace_iterator *iter, | |||
| 231 | return true; | 243 | return true; |
| 232 | } | 244 | } |
| 233 | 245 | ||
| 246 | /* Signal a overhead of time execution to the output */ | ||
| 247 | static int | ||
| 248 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
| 249 | { | ||
| 250 | /* If duration disappear, we don't need anything */ | ||
| 251 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | ||
| 252 | return 1; | ||
| 253 | |||
| 254 | /* Non nested entry or return */ | ||
| 255 | if (duration == -1) | ||
| 256 | return trace_seq_printf(s, " "); | ||
| 257 | |||
| 258 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
| 259 | /* Duration exceeded 100 msecs */ | ||
| 260 | if (duration > 100000ULL) | ||
| 261 | return trace_seq_printf(s, "! "); | ||
| 262 | |||
| 263 | /* Duration exceeded 10 msecs */ | ||
| 264 | if (duration > 10000ULL) | ||
| 265 | return trace_seq_printf(s, "+ "); | ||
| 266 | } | ||
| 267 | |||
| 268 | return trace_seq_printf(s, " "); | ||
| 269 | } | ||
| 270 | |||
| 234 | static enum print_line_t | 271 | static enum print_line_t |
| 235 | print_graph_irq(struct trace_seq *s, unsigned long addr, | 272 | print_graph_irq(struct trace_seq *s, unsigned long addr, |
| 236 | enum trace_type type, int cpu, pid_t pid) | 273 | enum trace_type type, int cpu, pid_t pid) |
| 237 | { | 274 | { |
| 238 | int ret; | 275 | int ret; |
| 239 | 276 | ||
| @@ -241,35 +278,40 @@ print_graph_irq(struct trace_seq *s, unsigned long addr, | |||
| 241 | addr >= (unsigned long)__irqentry_text_end) | 278 | addr >= (unsigned long)__irqentry_text_end) |
| 242 | return TRACE_TYPE_UNHANDLED; | 279 | return TRACE_TYPE_UNHANDLED; |
| 243 | 280 | ||
| 244 | if (type == TRACE_GRAPH_ENT) { | 281 | /* Cpu */ |
| 245 | ret = trace_seq_printf(s, "==========> | "); | 282 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
| 246 | } else { | 283 | ret = print_graph_cpu(s, cpu); |
| 247 | /* Cpu */ | 284 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 248 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 285 | return TRACE_TYPE_PARTIAL_LINE; |
| 249 | ret = print_graph_cpu(s, cpu); | 286 | } |
| 250 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 287 | /* Proc */ |
| 251 | return TRACE_TYPE_PARTIAL_LINE; | 288 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
| 252 | } | 289 | ret = print_graph_proc(s, pid); |
| 253 | /* Proc */ | 290 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 254 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 291 | return TRACE_TYPE_PARTIAL_LINE; |
| 255 | ret = print_graph_proc(s, pid); | 292 | ret = trace_seq_printf(s, " | "); |
| 256 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 293 | if (!ret) |
| 257 | return TRACE_TYPE_PARTIAL_LINE; | 294 | return TRACE_TYPE_PARTIAL_LINE; |
| 295 | } | ||
| 258 | 296 | ||
| 259 | ret = trace_seq_printf(s, " | "); | 297 | /* No overhead */ |
| 260 | if (!ret) | 298 | ret = print_graph_overhead(-1, s); |
| 261 | return TRACE_TYPE_PARTIAL_LINE; | 299 | if (!ret) |
| 262 | } | 300 | return TRACE_TYPE_PARTIAL_LINE; |
| 263 | 301 | ||
| 264 | /* No overhead */ | 302 | if (type == TRACE_GRAPH_ENT) |
| 265 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 303 | ret = trace_seq_printf(s, "==========>"); |
| 266 | ret = trace_seq_printf(s, " "); | 304 | else |
| 267 | if (!ret) | 305 | ret = trace_seq_printf(s, "<=========="); |
| 268 | return TRACE_TYPE_PARTIAL_LINE; | 306 | |
| 269 | } | 307 | if (!ret) |
| 308 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 309 | |||
| 310 | /* Don't close the duration column if haven't one */ | ||
| 311 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | ||
| 312 | trace_seq_printf(s, " |"); | ||
| 313 | ret = trace_seq_printf(s, "\n"); | ||
| 270 | 314 | ||
| 271 | ret = trace_seq_printf(s, "<========== |\n"); | ||
| 272 | } | ||
| 273 | if (!ret) | 315 | if (!ret) |
| 274 | return TRACE_TYPE_PARTIAL_LINE; | 316 | return TRACE_TYPE_PARTIAL_LINE; |
| 275 | return TRACE_TYPE_HANDLED; | 317 | return TRACE_TYPE_HANDLED; |
| @@ -288,7 +330,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
| 288 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 330 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
| 289 | 331 | ||
| 290 | /* Print msecs */ | 332 | /* Print msecs */ |
| 291 | ret = trace_seq_printf(s, msecs_str); | 333 | ret = trace_seq_printf(s, "%s", msecs_str); |
| 292 | if (!ret) | 334 | if (!ret) |
| 293 | return TRACE_TYPE_PARTIAL_LINE; | 335 | return TRACE_TYPE_PARTIAL_LINE; |
| 294 | 336 | ||
| @@ -321,19 +363,15 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
| 321 | 363 | ||
| 322 | } | 364 | } |
| 323 | 365 | ||
| 324 | /* Signal a overhead of time execution to the output */ | 366 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
| 325 | static int | ||
| 326 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
| 327 | { | 367 | { |
| 328 | /* Duration exceeded 100 msecs */ | 368 | unsigned long usecs_rem; |
| 329 | if (duration > 100000ULL) | ||
| 330 | return trace_seq_printf(s, "! "); | ||
| 331 | 369 | ||
| 332 | /* Duration exceeded 10 msecs */ | 370 | usecs_rem = do_div(t, 1000000000); |
| 333 | if (duration > 10000ULL) | 371 | usecs_rem /= 1000; |
| 334 | return trace_seq_printf(s, "+ "); | ||
| 335 | 372 | ||
| 336 | return trace_seq_printf(s, " "); | 373 | return trace_seq_printf(s, "%5lu.%06lu | ", |
| 374 | (unsigned long)t, usecs_rem); | ||
| 337 | } | 375 | } |
| 338 | 376 | ||
| 339 | /* Case of a leaf function on its call entry */ | 377 | /* Case of a leaf function on its call entry */ |
| @@ -356,16 +394,16 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 356 | duration = graph_ret->rettime - graph_ret->calltime; | 394 | duration = graph_ret->rettime - graph_ret->calltime; |
| 357 | 395 | ||
| 358 | /* Overhead */ | 396 | /* Overhead */ |
| 359 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 397 | ret = print_graph_overhead(duration, s); |
| 360 | ret = print_graph_overhead(duration, s); | 398 | if (!ret) |
| 361 | if (!ret) | 399 | return TRACE_TYPE_PARTIAL_LINE; |
| 362 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 363 | } | ||
| 364 | 400 | ||
| 365 | /* Duration */ | 401 | /* Duration */ |
| 366 | ret = print_graph_duration(duration, s); | 402 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
| 367 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 403 | ret = print_graph_duration(duration, s); |
| 368 | return TRACE_TYPE_PARTIAL_LINE; | 404 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 405 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 406 | } | ||
| 369 | 407 | ||
| 370 | /* Function */ | 408 | /* Function */ |
| 371 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 409 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
| @@ -394,25 +432,17 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
| 394 | struct ftrace_graph_ent *call = &entry->graph_ent; | 432 | struct ftrace_graph_ent *call = &entry->graph_ent; |
| 395 | 433 | ||
| 396 | /* No overhead */ | 434 | /* No overhead */ |
| 397 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 435 | ret = print_graph_overhead(-1, s); |
| 398 | ret = trace_seq_printf(s, " "); | 436 | if (!ret) |
| 399 | if (!ret) | 437 | return TRACE_TYPE_PARTIAL_LINE; |
| 400 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 401 | } | ||
| 402 | 438 | ||
| 403 | /* Interrupt */ | 439 | /* No time */ |
| 404 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid); | 440 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
| 405 | if (ret == TRACE_TYPE_UNHANDLED) { | ||
| 406 | /* No time */ | ||
| 407 | ret = trace_seq_printf(s, " | "); | 441 | ret = trace_seq_printf(s, " | "); |
| 408 | if (!ret) | 442 | if (!ret) |
| 409 | return TRACE_TYPE_PARTIAL_LINE; | 443 | return TRACE_TYPE_PARTIAL_LINE; |
| 410 | } else { | ||
| 411 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 412 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 413 | } | 444 | } |
| 414 | 445 | ||
| 415 | |||
| 416 | /* Function */ | 446 | /* Function */ |
| 417 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 447 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
| 418 | ret = trace_seq_printf(s, " "); | 448 | ret = trace_seq_printf(s, " "); |
| @@ -433,15 +463,30 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
| 433 | 463 | ||
| 434 | static enum print_line_t | 464 | static enum print_line_t |
| 435 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 465 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
| 436 | struct trace_iterator *iter, int cpu) | 466 | struct trace_iterator *iter) |
| 437 | { | 467 | { |
| 438 | int ret; | 468 | int ret; |
| 469 | int cpu = iter->cpu; | ||
| 470 | pid_t *last_entry = iter->private; | ||
| 439 | struct trace_entry *ent = iter->ent; | 471 | struct trace_entry *ent = iter->ent; |
| 472 | struct ftrace_graph_ent *call = &field->graph_ent; | ||
| 440 | 473 | ||
| 441 | /* Pid */ | 474 | /* Pid */ |
| 442 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 475 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) |
| 476 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 477 | |||
| 478 | /* Interrupt */ | ||
| 479 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); | ||
| 480 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 443 | return TRACE_TYPE_PARTIAL_LINE; | 481 | return TRACE_TYPE_PARTIAL_LINE; |
| 444 | 482 | ||
| 483 | /* Absolute time */ | ||
| 484 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
| 485 | ret = print_graph_abs_time(iter->ts, s); | ||
| 486 | if (!ret) | ||
| 487 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 488 | } | ||
| 489 | |||
| 445 | /* Cpu */ | 490 | /* Cpu */ |
| 446 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 491 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
| 447 | ret = print_graph_cpu(s, cpu); | 492 | ret = print_graph_cpu(s, cpu); |
| @@ -469,16 +514,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
| 469 | 514 | ||
| 470 | static enum print_line_t | 515 | static enum print_line_t |
| 471 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 516 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
| 472 | struct trace_entry *ent, int cpu) | 517 | struct trace_entry *ent, struct trace_iterator *iter) |
| 473 | { | 518 | { |
| 474 | int i; | 519 | int i; |
| 475 | int ret; | 520 | int ret; |
| 521 | int cpu = iter->cpu; | ||
| 522 | pid_t *last_pid = iter->private; | ||
| 476 | unsigned long long duration = trace->rettime - trace->calltime; | 523 | unsigned long long duration = trace->rettime - trace->calltime; |
| 477 | 524 | ||
| 478 | /* Pid */ | 525 | /* Pid */ |
| 479 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 526 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
| 480 | return TRACE_TYPE_PARTIAL_LINE; | 527 | return TRACE_TYPE_PARTIAL_LINE; |
| 481 | 528 | ||
| 529 | /* Absolute time */ | ||
| 530 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
| 531 | ret = print_graph_abs_time(iter->ts, s); | ||
| 532 | if (!ret) | ||
| 533 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 534 | } | ||
| 535 | |||
| 482 | /* Cpu */ | 536 | /* Cpu */ |
| 483 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 537 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
| 484 | ret = print_graph_cpu(s, cpu); | 538 | ret = print_graph_cpu(s, cpu); |
| @@ -498,16 +552,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 498 | } | 552 | } |
| 499 | 553 | ||
| 500 | /* Overhead */ | 554 | /* Overhead */ |
| 501 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 555 | ret = print_graph_overhead(duration, s); |
| 502 | ret = print_graph_overhead(duration, s); | 556 | if (!ret) |
| 503 | if (!ret) | 557 | return TRACE_TYPE_PARTIAL_LINE; |
| 504 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 505 | } | ||
| 506 | 558 | ||
| 507 | /* Duration */ | 559 | /* Duration */ |
| 508 | ret = print_graph_duration(duration, s); | 560 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
| 509 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 561 | ret = print_graph_duration(duration, s); |
| 510 | return TRACE_TYPE_PARTIAL_LINE; | 562 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 563 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 564 | } | ||
| 511 | 565 | ||
| 512 | /* Closing brace */ | 566 | /* Closing brace */ |
| 513 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 567 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
| @@ -541,14 +595,23 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
| 541 | { | 595 | { |
| 542 | int i; | 596 | int i; |
| 543 | int ret; | 597 | int ret; |
| 598 | int cpu = iter->cpu; | ||
| 599 | pid_t *last_pid = iter->private; | ||
| 600 | |||
| 601 | /* Absolute time */ | ||
| 602 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
| 603 | ret = print_graph_abs_time(iter->ts, s); | ||
| 604 | if (!ret) | ||
| 605 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 606 | } | ||
| 544 | 607 | ||
| 545 | /* Pid */ | 608 | /* Pid */ |
| 546 | if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) | 609 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
| 547 | return TRACE_TYPE_PARTIAL_LINE; | 610 | return TRACE_TYPE_PARTIAL_LINE; |
| 548 | 611 | ||
| 549 | /* Cpu */ | 612 | /* Cpu */ |
| 550 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 613 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
| 551 | ret = print_graph_cpu(s, iter->cpu); | 614 | ret = print_graph_cpu(s, cpu); |
| 552 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 615 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 553 | return TRACE_TYPE_PARTIAL_LINE; | 616 | return TRACE_TYPE_PARTIAL_LINE; |
| 554 | } | 617 | } |
| @@ -565,17 +628,17 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
| 565 | } | 628 | } |
| 566 | 629 | ||
| 567 | /* No overhead */ | 630 | /* No overhead */ |
| 568 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 631 | ret = print_graph_overhead(-1, s); |
| 569 | ret = trace_seq_printf(s, " "); | 632 | if (!ret) |
| 633 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 634 | |||
| 635 | /* No time */ | ||
| 636 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | ||
| 637 | ret = trace_seq_printf(s, " | "); | ||
| 570 | if (!ret) | 638 | if (!ret) |
| 571 | return TRACE_TYPE_PARTIAL_LINE; | 639 | return TRACE_TYPE_PARTIAL_LINE; |
| 572 | } | 640 | } |
| 573 | 641 | ||
| 574 | /* No time */ | ||
| 575 | ret = trace_seq_printf(s, " | "); | ||
| 576 | if (!ret) | ||
| 577 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 578 | |||
| 579 | /* Indentation */ | 642 | /* Indentation */ |
| 580 | if (trace->depth > 0) | 643 | if (trace->depth > 0) |
| 581 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | 644 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { |
| @@ -589,8 +652,11 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
| 589 | if (!ret) | 652 | if (!ret) |
| 590 | return TRACE_TYPE_PARTIAL_LINE; | 653 | return TRACE_TYPE_PARTIAL_LINE; |
| 591 | 654 | ||
| 592 | if (ent->flags & TRACE_FLAG_CONT) | 655 | /* Strip ending newline */ |
| 593 | trace_seq_print_cont(s, iter); | 656 | if (s->buffer[s->len - 1] == '\n') { |
| 657 | s->buffer[s->len - 1] = '\0'; | ||
| 658 | s->len--; | ||
| 659 | } | ||
| 594 | 660 | ||
| 595 | ret = trace_seq_printf(s, " */\n"); | 661 | ret = trace_seq_printf(s, " */\n"); |
| 596 | if (!ret) | 662 | if (!ret) |
| @@ -610,13 +676,12 @@ print_graph_function(struct trace_iterator *iter) | |||
| 610 | case TRACE_GRAPH_ENT: { | 676 | case TRACE_GRAPH_ENT: { |
| 611 | struct ftrace_graph_ent_entry *field; | 677 | struct ftrace_graph_ent_entry *field; |
| 612 | trace_assign_type(field, entry); | 678 | trace_assign_type(field, entry); |
| 613 | return print_graph_entry(field, s, iter, | 679 | return print_graph_entry(field, s, iter); |
| 614 | iter->cpu); | ||
| 615 | } | 680 | } |
| 616 | case TRACE_GRAPH_RET: { | 681 | case TRACE_GRAPH_RET: { |
| 617 | struct ftrace_graph_ret_entry *field; | 682 | struct ftrace_graph_ret_entry *field; |
| 618 | trace_assign_type(field, entry); | 683 | trace_assign_type(field, entry); |
| 619 | return print_graph_return(&field->ret, s, entry, iter->cpu); | 684 | return print_graph_return(&field->ret, s, entry, iter); |
| 620 | } | 685 | } |
| 621 | case TRACE_PRINT: { | 686 | case TRACE_PRINT: { |
| 622 | struct print_entry *field; | 687 | struct print_entry *field; |
| @@ -632,28 +697,55 @@ static void print_graph_headers(struct seq_file *s) | |||
| 632 | { | 697 | { |
| 633 | /* 1st line */ | 698 | /* 1st line */ |
| 634 | seq_printf(s, "# "); | 699 | seq_printf(s, "# "); |
| 700 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
| 701 | seq_printf(s, " TIME "); | ||
| 635 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 702 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
| 636 | seq_printf(s, "CPU "); | 703 | seq_printf(s, "CPU"); |
| 637 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 704 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
| 638 | seq_printf(s, "TASK/PID "); | 705 | seq_printf(s, " TASK/PID "); |
| 639 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) | 706 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
| 640 | seq_printf(s, "OVERHEAD/"); | 707 | seq_printf(s, " DURATION "); |
| 641 | seq_printf(s, "DURATION FUNCTION CALLS\n"); | 708 | seq_printf(s, " FUNCTION CALLS\n"); |
| 642 | 709 | ||
| 643 | /* 2nd line */ | 710 | /* 2nd line */ |
| 644 | seq_printf(s, "# "); | 711 | seq_printf(s, "# "); |
| 712 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
| 713 | seq_printf(s, " | "); | ||
| 645 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 714 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
| 646 | seq_printf(s, "| "); | 715 | seq_printf(s, "| "); |
| 647 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 716 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
| 648 | seq_printf(s, "| | "); | 717 | seq_printf(s, " | | "); |
| 649 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 718 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
| 650 | seq_printf(s, "| "); | 719 | seq_printf(s, " | | "); |
| 651 | seq_printf(s, "| | | | |\n"); | 720 | seq_printf(s, " | | | |\n"); |
| 652 | } else | 721 | } |
| 653 | seq_printf(s, " | | | | |\n"); | 722 | |
| 723 | static void graph_trace_open(struct trace_iterator *iter) | ||
| 724 | { | ||
| 725 | /* pid on the last trace processed */ | ||
| 726 | pid_t *last_pid = alloc_percpu(pid_t); | ||
| 727 | int cpu; | ||
| 728 | |||
| 729 | if (!last_pid) | ||
| 730 | pr_warning("function graph tracer: not enough memory\n"); | ||
| 731 | else | ||
| 732 | for_each_possible_cpu(cpu) { | ||
| 733 | pid_t *pid = per_cpu_ptr(last_pid, cpu); | ||
| 734 | *pid = -1; | ||
| 735 | } | ||
| 736 | |||
| 737 | iter->private = last_pid; | ||
| 654 | } | 738 | } |
| 739 | |||
| 740 | static void graph_trace_close(struct trace_iterator *iter) | ||
| 741 | { | ||
| 742 | percpu_free(iter->private); | ||
| 743 | } | ||
| 744 | |||
| 655 | static struct tracer graph_trace __read_mostly = { | 745 | static struct tracer graph_trace __read_mostly = { |
| 656 | .name = "function_graph", | 746 | .name = "function_graph", |
| 747 | .open = graph_trace_open, | ||
| 748 | .close = graph_trace_close, | ||
| 657 | .init = graph_trace_init, | 749 | .init = graph_trace_init, |
| 658 | .reset = graph_trace_reset, | 750 | .reset = graph_trace_reset, |
| 659 | .print_line = print_graph_function, | 751 | .print_line = print_graph_function, |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 649df22d435f..fff3545fc866 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
| @@ -1,7 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * h/w branch tracer for x86 based on bts | 2 | * h/w branch tracer for x86 based on bts |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com> | 4 | * Copyright (C) 2008-2009 Intel Corporation. |
| 5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 | ||
| 5 | * | 6 | * |
| 6 | */ | 7 | */ |
| 7 | 8 | ||
| @@ -10,21 +11,44 @@ | |||
| 10 | #include <linux/debugfs.h> | 11 | #include <linux/debugfs.h> |
| 11 | #include <linux/ftrace.h> | 12 | #include <linux/ftrace.h> |
| 12 | #include <linux/kallsyms.h> | 13 | #include <linux/kallsyms.h> |
| 14 | #include <linux/mutex.h> | ||
| 15 | #include <linux/cpu.h> | ||
| 16 | #include <linux/smp.h> | ||
| 13 | 17 | ||
| 14 | #include <asm/ds.h> | 18 | #include <asm/ds.h> |
| 15 | 19 | ||
| 16 | #include "trace.h" | 20 | #include "trace.h" |
| 21 | #include "trace_output.h" | ||
| 17 | 22 | ||
| 18 | 23 | ||
| 19 | #define SIZEOF_BTS (1 << 13) | 24 | #define SIZEOF_BTS (1 << 13) |
| 20 | 25 | ||
| 26 | /* The tracer mutex protects the below per-cpu tracer array. | ||
| 27 | It needs to be held to: | ||
| 28 | - start tracing on all cpus | ||
| 29 | - stop tracing on all cpus | ||
| 30 | - start tracing on a single hotplug cpu | ||
| 31 | - stop tracing on a single hotplug cpu | ||
| 32 | - read the trace from all cpus | ||
| 33 | - read the trace from a single cpu | ||
| 34 | */ | ||
| 35 | static DEFINE_MUTEX(bts_tracer_mutex); | ||
| 21 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 36 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); |
| 22 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); | 37 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); |
| 23 | 38 | ||
| 24 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 39 | #define this_tracer per_cpu(tracer, smp_processor_id()) |
| 25 | #define this_buffer per_cpu(buffer, smp_processor_id()) | 40 | #define this_buffer per_cpu(buffer, smp_processor_id()) |
| 26 | 41 | ||
| 42 | static int __read_mostly trace_hw_branches_enabled; | ||
| 43 | static struct trace_array *hw_branch_trace __read_mostly; | ||
| 27 | 44 | ||
| 45 | |||
| 46 | /* | ||
| 47 | * Start tracing on the current cpu. | ||
| 48 | * The argument is ignored. | ||
| 49 | * | ||
| 50 | * pre: bts_tracer_mutex must be locked. | ||
| 51 | */ | ||
| 28 | static void bts_trace_start_cpu(void *arg) | 52 | static void bts_trace_start_cpu(void *arg) |
| 29 | { | 53 | { |
| 30 | if (this_tracer) | 54 | if (this_tracer) |
| @@ -42,14 +66,20 @@ static void bts_trace_start_cpu(void *arg) | |||
| 42 | 66 | ||
| 43 | static void bts_trace_start(struct trace_array *tr) | 67 | static void bts_trace_start(struct trace_array *tr) |
| 44 | { | 68 | { |
| 45 | int cpu; | 69 | mutex_lock(&bts_tracer_mutex); |
| 46 | 70 | ||
| 47 | tracing_reset_online_cpus(tr); | 71 | on_each_cpu(bts_trace_start_cpu, NULL, 1); |
| 72 | trace_hw_branches_enabled = 1; | ||
| 48 | 73 | ||
| 49 | for_each_cpu(cpu, cpu_possible_mask) | 74 | mutex_unlock(&bts_tracer_mutex); |
| 50 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | ||
| 51 | } | 75 | } |
| 52 | 76 | ||
| 77 | /* | ||
| 78 | * Start tracing on the current cpu. | ||
| 79 | * The argument is ignored. | ||
| 80 | * | ||
| 81 | * pre: bts_tracer_mutex must be locked. | ||
| 82 | */ | ||
| 53 | static void bts_trace_stop_cpu(void *arg) | 83 | static void bts_trace_stop_cpu(void *arg) |
| 54 | { | 84 | { |
| 55 | if (this_tracer) { | 85 | if (this_tracer) { |
| @@ -60,26 +90,63 @@ static void bts_trace_stop_cpu(void *arg) | |||
| 60 | 90 | ||
| 61 | static void bts_trace_stop(struct trace_array *tr) | 91 | static void bts_trace_stop(struct trace_array *tr) |
| 62 | { | 92 | { |
| 63 | int cpu; | 93 | mutex_lock(&bts_tracer_mutex); |
| 94 | |||
| 95 | trace_hw_branches_enabled = 0; | ||
| 96 | on_each_cpu(bts_trace_stop_cpu, NULL, 1); | ||
| 64 | 97 | ||
| 65 | for_each_cpu(cpu, cpu_possible_mask) | 98 | mutex_unlock(&bts_tracer_mutex); |
| 99 | } | ||
| 100 | |||
| 101 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | ||
| 102 | unsigned long action, void *hcpu) | ||
| 103 | { | ||
| 104 | unsigned int cpu = (unsigned long)hcpu; | ||
| 105 | |||
| 106 | mutex_lock(&bts_tracer_mutex); | ||
| 107 | |||
| 108 | if (!trace_hw_branches_enabled) | ||
| 109 | goto out; | ||
| 110 | |||
| 111 | switch (action) { | ||
| 112 | case CPU_ONLINE: | ||
| 113 | case CPU_DOWN_FAILED: | ||
| 114 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | ||
| 115 | break; | ||
| 116 | case CPU_DOWN_PREPARE: | ||
| 66 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); | 117 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); |
| 118 | break; | ||
| 119 | } | ||
| 120 | |||
| 121 | out: | ||
| 122 | mutex_unlock(&bts_tracer_mutex); | ||
| 123 | return NOTIFY_DONE; | ||
| 67 | } | 124 | } |
| 68 | 125 | ||
| 126 | static struct notifier_block bts_hotcpu_notifier __cpuinitdata = { | ||
| 127 | .notifier_call = bts_hotcpu_handler | ||
| 128 | }; | ||
| 129 | |||
| 69 | static int bts_trace_init(struct trace_array *tr) | 130 | static int bts_trace_init(struct trace_array *tr) |
| 70 | { | 131 | { |
| 132 | hw_branch_trace = tr; | ||
| 133 | |||
| 134 | register_hotcpu_notifier(&bts_hotcpu_notifier); | ||
| 71 | tracing_reset_online_cpus(tr); | 135 | tracing_reset_online_cpus(tr); |
| 72 | bts_trace_start(tr); | 136 | bts_trace_start(tr); |
| 73 | 137 | ||
| 74 | return 0; | 138 | return 0; |
| 75 | } | 139 | } |
| 76 | 140 | ||
| 141 | static void bts_trace_reset(struct trace_array *tr) | ||
| 142 | { | ||
| 143 | bts_trace_stop(tr); | ||
| 144 | unregister_hotcpu_notifier(&bts_hotcpu_notifier); | ||
| 145 | } | ||
| 146 | |||
| 77 | static void bts_trace_print_header(struct seq_file *m) | 147 | static void bts_trace_print_header(struct seq_file *m) |
| 78 | { | 148 | { |
| 79 | seq_puts(m, | 149 | seq_puts(m, "# CPU# TO <- FROM\n"); |
| 80 | "# CPU# FROM TO FUNCTION\n"); | ||
| 81 | seq_puts(m, | ||
| 82 | "# | | | |\n"); | ||
| 83 | } | 150 | } |
| 84 | 151 | ||
| 85 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | 152 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) |
| @@ -87,15 +154,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | |||
| 87 | struct trace_entry *entry = iter->ent; | 154 | struct trace_entry *entry = iter->ent; |
| 88 | struct trace_seq *seq = &iter->seq; | 155 | struct trace_seq *seq = &iter->seq; |
| 89 | struct hw_branch_entry *it; | 156 | struct hw_branch_entry *it; |
| 157 | unsigned long symflags = TRACE_ITER_SYM_OFFSET; | ||
| 90 | 158 | ||
| 91 | trace_assign_type(it, entry); | 159 | trace_assign_type(it, entry); |
| 92 | 160 | ||
| 93 | if (entry->type == TRACE_HW_BRANCHES) { | 161 | if (entry->type == TRACE_HW_BRANCHES) { |
| 94 | if (trace_seq_printf(seq, "%4d ", entry->cpu) && | 162 | if (trace_seq_printf(seq, "%4d ", entry->cpu) && |
| 95 | trace_seq_printf(seq, "0x%016llx -> 0x%016llx ", | 163 | seq_print_ip_sym(seq, it->to, symflags) && |
| 96 | it->from, it->to) && | 164 | trace_seq_printf(seq, "\t <- ") && |
| 97 | (!it->from || | 165 | seq_print_ip_sym(seq, it->from, symflags) && |
| 98 | seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) && | ||
| 99 | trace_seq_printf(seq, "\n")) | 166 | trace_seq_printf(seq, "\n")) |
| 100 | return TRACE_TYPE_HANDLED; | 167 | return TRACE_TYPE_HANDLED; |
| 101 | return TRACE_TYPE_PARTIAL_LINE;; | 168 | return TRACE_TYPE_PARTIAL_LINE;; |
| @@ -103,26 +170,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | |||
| 103 | return TRACE_TYPE_UNHANDLED; | 170 | return TRACE_TYPE_UNHANDLED; |
| 104 | } | 171 | } |
| 105 | 172 | ||
| 106 | void trace_hw_branch(struct trace_array *tr, u64 from, u64 to) | 173 | void trace_hw_branch(u64 from, u64 to) |
| 107 | { | 174 | { |
| 175 | struct trace_array *tr = hw_branch_trace; | ||
| 108 | struct ring_buffer_event *event; | 176 | struct ring_buffer_event *event; |
| 109 | struct hw_branch_entry *entry; | 177 | struct hw_branch_entry *entry; |
| 110 | unsigned long irq; | 178 | unsigned long irq1, irq2; |
| 179 | int cpu; | ||
| 111 | 180 | ||
| 112 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); | 181 | if (unlikely(!tr)) |
| 113 | if (!event) | ||
| 114 | return; | 182 | return; |
| 183 | |||
| 184 | if (unlikely(!trace_hw_branches_enabled)) | ||
| 185 | return; | ||
| 186 | |||
| 187 | local_irq_save(irq1); | ||
| 188 | cpu = raw_smp_processor_id(); | ||
| 189 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | ||
| 190 | goto out; | ||
| 191 | |||
| 192 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq2); | ||
| 193 | if (!event) | ||
| 194 | goto out; | ||
| 115 | entry = ring_buffer_event_data(event); | 195 | entry = ring_buffer_event_data(event); |
| 116 | tracing_generic_entry_update(&entry->ent, 0, from); | 196 | tracing_generic_entry_update(&entry->ent, 0, from); |
| 117 | entry->ent.type = TRACE_HW_BRANCHES; | 197 | entry->ent.type = TRACE_HW_BRANCHES; |
| 118 | entry->ent.cpu = smp_processor_id(); | 198 | entry->ent.cpu = cpu; |
| 119 | entry->from = from; | 199 | entry->from = from; |
| 120 | entry->to = to; | 200 | entry->to = to; |
| 121 | ring_buffer_unlock_commit(tr->buffer, event, irq); | 201 | ring_buffer_unlock_commit(tr->buffer, event, irq2); |
| 202 | |||
| 203 | out: | ||
| 204 | atomic_dec(&tr->data[cpu]->disabled); | ||
| 205 | local_irq_restore(irq1); | ||
| 122 | } | 206 | } |
| 123 | 207 | ||
| 124 | static void trace_bts_at(struct trace_array *tr, | 208 | static void trace_bts_at(const struct bts_trace *trace, void *at) |
| 125 | const struct bts_trace *trace, void *at) | ||
| 126 | { | 209 | { |
| 127 | struct bts_struct bts; | 210 | struct bts_struct bts; |
| 128 | int err = 0; | 211 | int err = 0; |
| @@ -137,18 +220,29 @@ static void trace_bts_at(struct trace_array *tr, | |||
| 137 | 220 | ||
| 138 | switch (bts.qualifier) { | 221 | switch (bts.qualifier) { |
| 139 | case BTS_BRANCH: | 222 | case BTS_BRANCH: |
| 140 | trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to); | 223 | trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to); |
| 141 | break; | 224 | break; |
| 142 | } | 225 | } |
| 143 | } | 226 | } |
| 144 | 227 | ||
| 228 | /* | ||
| 229 | * Collect the trace on the current cpu and write it into the ftrace buffer. | ||
| 230 | * | ||
| 231 | * pre: bts_tracer_mutex must be locked | ||
| 232 | */ | ||
| 145 | static void trace_bts_cpu(void *arg) | 233 | static void trace_bts_cpu(void *arg) |
| 146 | { | 234 | { |
| 147 | struct trace_array *tr = (struct trace_array *) arg; | 235 | struct trace_array *tr = (struct trace_array *) arg; |
| 148 | const struct bts_trace *trace; | 236 | const struct bts_trace *trace; |
| 149 | unsigned char *at; | 237 | unsigned char *at; |
| 150 | 238 | ||
| 151 | if (!this_tracer) | 239 | if (unlikely(!tr)) |
| 240 | return; | ||
| 241 | |||
| 242 | if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled))) | ||
| 243 | return; | ||
| 244 | |||
| 245 | if (unlikely(!this_tracer)) | ||
| 152 | return; | 246 | return; |
| 153 | 247 | ||
| 154 | ds_suspend_bts(this_tracer); | 248 | ds_suspend_bts(this_tracer); |
| @@ -158,11 +252,11 @@ static void trace_bts_cpu(void *arg) | |||
| 158 | 252 | ||
| 159 | for (at = trace->ds.top; (void *)at < trace->ds.end; | 253 | for (at = trace->ds.top; (void *)at < trace->ds.end; |
| 160 | at += trace->ds.size) | 254 | at += trace->ds.size) |
| 161 | trace_bts_at(tr, trace, at); | 255 | trace_bts_at(trace, at); |
| 162 | 256 | ||
| 163 | for (at = trace->ds.begin; (void *)at < trace->ds.top; | 257 | for (at = trace->ds.begin; (void *)at < trace->ds.top; |
| 164 | at += trace->ds.size) | 258 | at += trace->ds.size) |
| 165 | trace_bts_at(tr, trace, at); | 259 | trace_bts_at(trace, at); |
| 166 | 260 | ||
| 167 | out: | 261 | out: |
| 168 | ds_resume_bts(this_tracer); | 262 | ds_resume_bts(this_tracer); |
| @@ -170,22 +264,38 @@ out: | |||
| 170 | 264 | ||
| 171 | static void trace_bts_prepare(struct trace_iterator *iter) | 265 | static void trace_bts_prepare(struct trace_iterator *iter) |
| 172 | { | 266 | { |
| 173 | int cpu; | 267 | mutex_lock(&bts_tracer_mutex); |
| 268 | |||
| 269 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | ||
| 270 | |||
| 271 | mutex_unlock(&bts_tracer_mutex); | ||
| 272 | } | ||
| 273 | |||
| 274 | static void trace_bts_close(struct trace_iterator *iter) | ||
| 275 | { | ||
| 276 | tracing_reset_online_cpus(iter->tr); | ||
| 277 | } | ||
| 278 | |||
| 279 | void trace_hw_branch_oops(void) | ||
| 280 | { | ||
| 281 | mutex_lock(&bts_tracer_mutex); | ||
| 282 | |||
| 283 | trace_bts_cpu(hw_branch_trace); | ||
| 174 | 284 | ||
| 175 | for_each_cpu(cpu, cpu_possible_mask) | 285 | mutex_unlock(&bts_tracer_mutex); |
| 176 | smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); | ||
| 177 | } | 286 | } |
| 178 | 287 | ||
| 179 | struct tracer bts_tracer __read_mostly = | 288 | struct tracer bts_tracer __read_mostly = |
| 180 | { | 289 | { |
| 181 | .name = "hw-branch-tracer", | 290 | .name = "hw-branch-tracer", |
| 182 | .init = bts_trace_init, | 291 | .init = bts_trace_init, |
| 183 | .reset = bts_trace_stop, | 292 | .reset = bts_trace_reset, |
| 184 | .print_header = bts_trace_print_header, | 293 | .print_header = bts_trace_print_header, |
| 185 | .print_line = bts_trace_print_line, | 294 | .print_line = bts_trace_print_line, |
| 186 | .start = bts_trace_start, | 295 | .start = bts_trace_start, |
| 187 | .stop = bts_trace_stop, | 296 | .stop = bts_trace_stop, |
| 188 | .open = trace_bts_prepare | 297 | .open = trace_bts_prepare, |
| 298 | .close = trace_bts_close | ||
| 189 | }; | 299 | }; |
| 190 | 300 | ||
| 191 | __init static int init_bts_trace(void) | 301 | __init static int init_bts_trace(void) |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 62a78d943534..ed344b022a14 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -353,28 +353,18 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) | |||
| 353 | } | 353 | } |
| 354 | #endif /* CONFIG_PREEMPT_TRACER */ | 354 | #endif /* CONFIG_PREEMPT_TRACER */ |
| 355 | 355 | ||
| 356 | /* | ||
| 357 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
| 358 | * variable when we disable it when we open a trace output file. | ||
| 359 | */ | ||
| 360 | static int save_tracer_enabled; | ||
| 361 | |||
| 362 | static void start_irqsoff_tracer(struct trace_array *tr) | 356 | static void start_irqsoff_tracer(struct trace_array *tr) |
| 363 | { | 357 | { |
| 364 | register_ftrace_function(&trace_ops); | 358 | register_ftrace_function(&trace_ops); |
| 365 | if (tracing_is_enabled()) { | 359 | if (tracing_is_enabled()) |
| 366 | tracer_enabled = 1; | 360 | tracer_enabled = 1; |
| 367 | save_tracer_enabled = 1; | 361 | else |
| 368 | } else { | ||
| 369 | tracer_enabled = 0; | 362 | tracer_enabled = 0; |
| 370 | save_tracer_enabled = 0; | ||
| 371 | } | ||
| 372 | } | 363 | } |
| 373 | 364 | ||
| 374 | static void stop_irqsoff_tracer(struct trace_array *tr) | 365 | static void stop_irqsoff_tracer(struct trace_array *tr) |
| 375 | { | 366 | { |
| 376 | tracer_enabled = 0; | 367 | tracer_enabled = 0; |
| 377 | save_tracer_enabled = 0; | ||
| 378 | unregister_ftrace_function(&trace_ops); | 368 | unregister_ftrace_function(&trace_ops); |
| 379 | } | 369 | } |
| 380 | 370 | ||
| @@ -395,25 +385,11 @@ static void irqsoff_tracer_reset(struct trace_array *tr) | |||
| 395 | static void irqsoff_tracer_start(struct trace_array *tr) | 385 | static void irqsoff_tracer_start(struct trace_array *tr) |
| 396 | { | 386 | { |
| 397 | tracer_enabled = 1; | 387 | tracer_enabled = 1; |
| 398 | save_tracer_enabled = 1; | ||
| 399 | } | 388 | } |
| 400 | 389 | ||
| 401 | static void irqsoff_tracer_stop(struct trace_array *tr) | 390 | static void irqsoff_tracer_stop(struct trace_array *tr) |
| 402 | { | 391 | { |
| 403 | tracer_enabled = 0; | 392 | tracer_enabled = 0; |
| 404 | save_tracer_enabled = 0; | ||
| 405 | } | ||
| 406 | |||
| 407 | static void irqsoff_tracer_open(struct trace_iterator *iter) | ||
| 408 | { | ||
| 409 | /* stop the trace while dumping */ | ||
| 410 | tracer_enabled = 0; | ||
| 411 | } | ||
| 412 | |||
| 413 | static void irqsoff_tracer_close(struct trace_iterator *iter) | ||
| 414 | { | ||
| 415 | /* restart tracing */ | ||
| 416 | tracer_enabled = save_tracer_enabled; | ||
| 417 | } | 393 | } |
| 418 | 394 | ||
| 419 | #ifdef CONFIG_IRQSOFF_TRACER | 395 | #ifdef CONFIG_IRQSOFF_TRACER |
| @@ -431,8 +407,6 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
| 431 | .reset = irqsoff_tracer_reset, | 407 | .reset = irqsoff_tracer_reset, |
| 432 | .start = irqsoff_tracer_start, | 408 | .start = irqsoff_tracer_start, |
| 433 | .stop = irqsoff_tracer_stop, | 409 | .stop = irqsoff_tracer_stop, |
| 434 | .open = irqsoff_tracer_open, | ||
| 435 | .close = irqsoff_tracer_close, | ||
| 436 | .print_max = 1, | 410 | .print_max = 1, |
| 437 | #ifdef CONFIG_FTRACE_SELFTEST | 411 | #ifdef CONFIG_FTRACE_SELFTEST |
| 438 | .selftest = trace_selftest_startup_irqsoff, | 412 | .selftest = trace_selftest_startup_irqsoff, |
| @@ -459,8 +433,6 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
| 459 | .reset = irqsoff_tracer_reset, | 433 | .reset = irqsoff_tracer_reset, |
| 460 | .start = irqsoff_tracer_start, | 434 | .start = irqsoff_tracer_start, |
| 461 | .stop = irqsoff_tracer_stop, | 435 | .stop = irqsoff_tracer_stop, |
| 462 | .open = irqsoff_tracer_open, | ||
| 463 | .close = irqsoff_tracer_close, | ||
| 464 | .print_max = 1, | 436 | .print_max = 1, |
| 465 | #ifdef CONFIG_FTRACE_SELFTEST | 437 | #ifdef CONFIG_FTRACE_SELFTEST |
| 466 | .selftest = trace_selftest_startup_preemptoff, | 438 | .selftest = trace_selftest_startup_preemptoff, |
| @@ -489,8 +461,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
| 489 | .reset = irqsoff_tracer_reset, | 461 | .reset = irqsoff_tracer_reset, |
| 490 | .start = irqsoff_tracer_start, | 462 | .start = irqsoff_tracer_start, |
| 491 | .stop = irqsoff_tracer_stop, | 463 | .stop = irqsoff_tracer_stop, |
| 492 | .open = irqsoff_tracer_open, | ||
| 493 | .close = irqsoff_tracer_close, | ||
| 494 | .print_max = 1, | 464 | .print_max = 1, |
| 495 | #ifdef CONFIG_FTRACE_SELFTEST | 465 | #ifdef CONFIG_FTRACE_SELFTEST |
| 496 | .selftest = trace_selftest_startup_preemptirqsoff, | 466 | .selftest = trace_selftest_startup_preemptirqsoff, |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index fffcb069f1dc..ec78e244242e 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
| @@ -9,8 +9,10 @@ | |||
| 9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
| 11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
| 12 | #include <asm/atomic.h> | ||
| 12 | 13 | ||
| 13 | #include "trace.h" | 14 | #include "trace.h" |
| 15 | #include "trace_output.h" | ||
| 14 | 16 | ||
| 15 | struct header_iter { | 17 | struct header_iter { |
| 16 | struct pci_dev *dev; | 18 | struct pci_dev *dev; |
| @@ -19,6 +21,7 @@ struct header_iter { | |||
| 19 | static struct trace_array *mmio_trace_array; | 21 | static struct trace_array *mmio_trace_array; |
| 20 | static bool overrun_detected; | 22 | static bool overrun_detected; |
| 21 | static unsigned long prev_overruns; | 23 | static unsigned long prev_overruns; |
| 24 | static atomic_t dropped_count; | ||
| 22 | 25 | ||
| 23 | static void mmio_reset_data(struct trace_array *tr) | 26 | static void mmio_reset_data(struct trace_array *tr) |
| 24 | { | 27 | { |
| @@ -121,11 +124,11 @@ static void mmio_close(struct trace_iterator *iter) | |||
| 121 | 124 | ||
| 122 | static unsigned long count_overruns(struct trace_iterator *iter) | 125 | static unsigned long count_overruns(struct trace_iterator *iter) |
| 123 | { | 126 | { |
| 124 | unsigned long cnt = 0; | 127 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
| 125 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); | 128 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
| 126 | 129 | ||
| 127 | if (over > prev_overruns) | 130 | if (over > prev_overruns) |
| 128 | cnt = over - prev_overruns; | 131 | cnt += over - prev_overruns; |
| 129 | prev_overruns = over; | 132 | prev_overruns = over; |
| 130 | return cnt; | 133 | return cnt; |
| 131 | } | 134 | } |
| @@ -181,21 +184,22 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
| 181 | switch (rw->opcode) { | 184 | switch (rw->opcode) { |
| 182 | case MMIO_READ: | 185 | case MMIO_READ: |
| 183 | ret = trace_seq_printf(s, | 186 | ret = trace_seq_printf(s, |
| 184 | "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 187 | "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
| 185 | rw->width, secs, usec_rem, rw->map_id, | 188 | rw->width, secs, usec_rem, rw->map_id, |
| 186 | (unsigned long long)rw->phys, | 189 | (unsigned long long)rw->phys, |
| 187 | rw->value, rw->pc, 0); | 190 | rw->value, rw->pc, 0); |
| 188 | break; | 191 | break; |
| 189 | case MMIO_WRITE: | 192 | case MMIO_WRITE: |
| 190 | ret = trace_seq_printf(s, | 193 | ret = trace_seq_printf(s, |
| 191 | "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 194 | "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
| 192 | rw->width, secs, usec_rem, rw->map_id, | 195 | rw->width, secs, usec_rem, rw->map_id, |
| 193 | (unsigned long long)rw->phys, | 196 | (unsigned long long)rw->phys, |
| 194 | rw->value, rw->pc, 0); | 197 | rw->value, rw->pc, 0); |
| 195 | break; | 198 | break; |
| 196 | case MMIO_UNKNOWN_OP: | 199 | case MMIO_UNKNOWN_OP: |
| 197 | ret = trace_seq_printf(s, | 200 | ret = trace_seq_printf(s, |
| 198 | "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n", | 201 | "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," |
| 202 | "%02lx 0x%lx %d\n", | ||
| 199 | secs, usec_rem, rw->map_id, | 203 | secs, usec_rem, rw->map_id, |
| 200 | (unsigned long long)rw->phys, | 204 | (unsigned long long)rw->phys, |
| 201 | (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, | 205 | (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, |
| @@ -227,14 +231,14 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
| 227 | switch (m->opcode) { | 231 | switch (m->opcode) { |
| 228 | case MMIO_PROBE: | 232 | case MMIO_PROBE: |
| 229 | ret = trace_seq_printf(s, | 233 | ret = trace_seq_printf(s, |
| 230 | "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", | 234 | "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
| 231 | secs, usec_rem, m->map_id, | 235 | secs, usec_rem, m->map_id, |
| 232 | (unsigned long long)m->phys, m->virt, m->len, | 236 | (unsigned long long)m->phys, m->virt, m->len, |
| 233 | 0UL, 0); | 237 | 0UL, 0); |
| 234 | break; | 238 | break; |
| 235 | case MMIO_UNPROBE: | 239 | case MMIO_UNPROBE: |
| 236 | ret = trace_seq_printf(s, | 240 | ret = trace_seq_printf(s, |
| 237 | "UNMAP %lu.%06lu %d 0x%lx %d\n", | 241 | "UNMAP %u.%06lu %d 0x%lx %d\n", |
| 238 | secs, usec_rem, m->map_id, 0UL, 0); | 242 | secs, usec_rem, m->map_id, 0UL, 0); |
| 239 | break; | 243 | break; |
| 240 | default: | 244 | default: |
| @@ -258,13 +262,10 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) | |||
| 258 | int ret; | 262 | int ret; |
| 259 | 263 | ||
| 260 | /* The trailing newline must be in the message. */ | 264 | /* The trailing newline must be in the message. */ |
| 261 | ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg); | 265 | ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); |
| 262 | if (!ret) | 266 | if (!ret) |
| 263 | return TRACE_TYPE_PARTIAL_LINE; | 267 | return TRACE_TYPE_PARTIAL_LINE; |
| 264 | 268 | ||
| 265 | if (entry->flags & TRACE_FLAG_CONT) | ||
| 266 | trace_seq_print_cont(s, iter); | ||
| 267 | |||
| 268 | return TRACE_TYPE_HANDLED; | 269 | return TRACE_TYPE_HANDLED; |
| 269 | } | 270 | } |
| 270 | 271 | ||
| @@ -310,8 +311,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
| 310 | 311 | ||
| 311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 312 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
| 312 | &irq_flags); | 313 | &irq_flags); |
| 313 | if (!event) | 314 | if (!event) { |
| 315 | atomic_inc(&dropped_count); | ||
| 314 | return; | 316 | return; |
| 317 | } | ||
| 315 | entry = ring_buffer_event_data(event); | 318 | entry = ring_buffer_event_data(event); |
| 316 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 319 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
| 317 | entry->ent.type = TRACE_MMIO_RW; | 320 | entry->ent.type = TRACE_MMIO_RW; |
| @@ -338,8 +341,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
| 338 | 341 | ||
| 339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 342 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
| 340 | &irq_flags); | 343 | &irq_flags); |
| 341 | if (!event) | 344 | if (!event) { |
| 345 | atomic_inc(&dropped_count); | ||
| 342 | return; | 346 | return; |
| 347 | } | ||
| 343 | entry = ring_buffer_event_data(event); | 348 | entry = ring_buffer_event_data(event); |
| 344 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 349 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
| 345 | entry->ent.type = TRACE_MMIO_MAP; | 350 | entry->ent.type = TRACE_MMIO_MAP; |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index b9767acd30ac..087b6cbf4ea5 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
| @@ -47,12 +47,8 @@ static void stop_nop_trace(struct trace_array *tr) | |||
| 47 | 47 | ||
| 48 | static int nop_trace_init(struct trace_array *tr) | 48 | static int nop_trace_init(struct trace_array *tr) |
| 49 | { | 49 | { |
| 50 | int cpu; | ||
| 51 | ctx_trace = tr; | 50 | ctx_trace = tr; |
| 52 | 51 | tracing_reset_online_cpus(tr); | |
| 53 | for_each_online_cpu(cpu) | ||
| 54 | tracing_reset(tr, cpu); | ||
| 55 | |||
| 56 | start_nop_trace(tr); | 52 | start_nop_trace(tr); |
| 57 | return 0; | 53 | return 0; |
| 58 | } | 54 | } |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c new file mode 100644 index 000000000000..b7380eee9fa1 --- /dev/null +++ b/kernel/trace/trace_output.c | |||
| @@ -0,0 +1,910 @@ | |||
| 1 | /* | ||
| 2 | * trace_output.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/mutex.h> | ||
| 10 | #include <linux/ftrace.h> | ||
| 11 | |||
| 12 | #include "trace_output.h" | ||
| 13 | |||
| 14 | /* must be a power of 2 */ | ||
| 15 | #define EVENT_HASHSIZE 128 | ||
| 16 | |||
| 17 | static DEFINE_MUTEX(trace_event_mutex); | ||
| 18 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | ||
| 19 | |||
| 20 | static int next_event_type = __TRACE_LAST_TYPE + 1; | ||
| 21 | |||
| 22 | /** | ||
| 23 | * trace_seq_printf - sequence printing of trace information | ||
| 24 | * @s: trace sequence descriptor | ||
| 25 | * @fmt: printf format string | ||
| 26 | * | ||
| 27 | * The tracer may use either sequence operations or its own | ||
| 28 | * copy to user routines. To simplify formating of a trace | ||
| 29 | * trace_seq_printf is used to store strings into a special | ||
| 30 | * buffer (@s). Then the output may be either used by | ||
| 31 | * the sequencer or pulled into another buffer. | ||
| 32 | */ | ||
| 33 | int | ||
| 34 | trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
| 35 | { | ||
| 36 | int len = (PAGE_SIZE - 1) - s->len; | ||
| 37 | va_list ap; | ||
| 38 | int ret; | ||
| 39 | |||
| 40 | if (!len) | ||
| 41 | return 0; | ||
| 42 | |||
| 43 | va_start(ap, fmt); | ||
| 44 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | ||
| 45 | va_end(ap); | ||
| 46 | |||
| 47 | /* If we can't write it all, don't bother writing anything */ | ||
| 48 | if (ret >= len) | ||
| 49 | return 0; | ||
| 50 | |||
| 51 | s->len += ret; | ||
| 52 | |||
| 53 | return len; | ||
| 54 | } | ||
| 55 | |||
| 56 | /** | ||
| 57 | * trace_seq_puts - trace sequence printing of simple string | ||
| 58 | * @s: trace sequence descriptor | ||
| 59 | * @str: simple string to record | ||
| 60 | * | ||
| 61 | * The tracer may use either the sequence operations or its own | ||
| 62 | * copy to user routines. This function records a simple string | ||
| 63 | * into a special buffer (@s) for later retrieval by a sequencer | ||
| 64 | * or other mechanism. | ||
| 65 | */ | ||
| 66 | int trace_seq_puts(struct trace_seq *s, const char *str) | ||
| 67 | { | ||
| 68 | int len = strlen(str); | ||
| 69 | |||
| 70 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
| 71 | return 0; | ||
| 72 | |||
| 73 | memcpy(s->buffer + s->len, str, len); | ||
| 74 | s->len += len; | ||
| 75 | |||
| 76 | return len; | ||
| 77 | } | ||
| 78 | |||
| 79 | int trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
| 80 | { | ||
| 81 | if (s->len >= (PAGE_SIZE - 1)) | ||
| 82 | return 0; | ||
| 83 | |||
| 84 | s->buffer[s->len++] = c; | ||
| 85 | |||
| 86 | return 1; | ||
| 87 | } | ||
| 88 | |||
| 89 | int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) | ||
| 90 | { | ||
| 91 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
| 92 | return 0; | ||
| 93 | |||
| 94 | memcpy(s->buffer + s->len, mem, len); | ||
| 95 | s->len += len; | ||
| 96 | |||
| 97 | return len; | ||
| 98 | } | ||
| 99 | |||
| 100 | int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) | ||
| 101 | { | ||
| 102 | unsigned char hex[HEX_CHARS]; | ||
| 103 | unsigned char *data = mem; | ||
| 104 | int i, j; | ||
| 105 | |||
| 106 | #ifdef __BIG_ENDIAN | ||
| 107 | for (i = 0, j = 0; i < len; i++) { | ||
| 108 | #else | ||
| 109 | for (i = len-1, j = 0; i >= 0; i--) { | ||
| 110 | #endif | ||
| 111 | hex[j++] = hex_asc_hi(data[i]); | ||
| 112 | hex[j++] = hex_asc_lo(data[i]); | ||
| 113 | } | ||
| 114 | hex[j++] = ' '; | ||
| 115 | |||
| 116 | return trace_seq_putmem(s, hex, j); | ||
| 117 | } | ||
| 118 | |||
| 119 | int trace_seq_path(struct trace_seq *s, struct path *path) | ||
| 120 | { | ||
| 121 | unsigned char *p; | ||
| 122 | |||
| 123 | if (s->len >= (PAGE_SIZE - 1)) | ||
| 124 | return 0; | ||
| 125 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | ||
| 126 | if (!IS_ERR(p)) { | ||
| 127 | p = mangle_path(s->buffer + s->len, p, "\n"); | ||
| 128 | if (p) { | ||
| 129 | s->len = p - s->buffer; | ||
| 130 | return 1; | ||
| 131 | } | ||
| 132 | } else { | ||
| 133 | s->buffer[s->len++] = '?'; | ||
| 134 | return 1; | ||
| 135 | } | ||
| 136 | |||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | |||
| 140 | #ifdef CONFIG_KRETPROBES | ||
| 141 | static inline const char *kretprobed(const char *name) | ||
| 142 | { | ||
| 143 | static const char tramp_name[] = "kretprobe_trampoline"; | ||
| 144 | int size = sizeof(tramp_name); | ||
| 145 | |||
| 146 | if (strncmp(tramp_name, name, size) == 0) | ||
| 147 | return "[unknown/kretprobe'd]"; | ||
| 148 | return name; | ||
| 149 | } | ||
| 150 | #else | ||
| 151 | static inline const char *kretprobed(const char *name) | ||
| 152 | { | ||
| 153 | return name; | ||
| 154 | } | ||
| 155 | #endif /* CONFIG_KRETPROBES */ | ||
| 156 | |||
| 157 | static int | ||
| 158 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | ||
| 159 | { | ||
| 160 | #ifdef CONFIG_KALLSYMS | ||
| 161 | char str[KSYM_SYMBOL_LEN]; | ||
| 162 | const char *name; | ||
| 163 | |||
| 164 | kallsyms_lookup(address, NULL, NULL, NULL, str); | ||
| 165 | |||
| 166 | name = kretprobed(str); | ||
| 167 | |||
| 168 | return trace_seq_printf(s, fmt, name); | ||
| 169 | #endif | ||
| 170 | return 1; | ||
| 171 | } | ||
| 172 | |||
| 173 | static int | ||
| 174 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | ||
| 175 | unsigned long address) | ||
| 176 | { | ||
| 177 | #ifdef CONFIG_KALLSYMS | ||
| 178 | char str[KSYM_SYMBOL_LEN]; | ||
| 179 | const char *name; | ||
| 180 | |||
| 181 | sprint_symbol(str, address); | ||
| 182 | name = kretprobed(str); | ||
| 183 | |||
| 184 | return trace_seq_printf(s, fmt, name); | ||
| 185 | #endif | ||
| 186 | return 1; | ||
| 187 | } | ||
| 188 | |||
| 189 | #ifndef CONFIG_64BIT | ||
| 190 | # define IP_FMT "%08lx" | ||
| 191 | #else | ||
| 192 | # define IP_FMT "%016lx" | ||
| 193 | #endif | ||
| 194 | |||
| 195 | int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
| 196 | unsigned long ip, unsigned long sym_flags) | ||
| 197 | { | ||
| 198 | struct file *file = NULL; | ||
| 199 | unsigned long vmstart = 0; | ||
| 200 | int ret = 1; | ||
| 201 | |||
| 202 | if (mm) { | ||
| 203 | const struct vm_area_struct *vma; | ||
| 204 | |||
| 205 | down_read(&mm->mmap_sem); | ||
| 206 | vma = find_vma(mm, ip); | ||
| 207 | if (vma) { | ||
| 208 | file = vma->vm_file; | ||
| 209 | vmstart = vma->vm_start; | ||
| 210 | } | ||
| 211 | if (file) { | ||
| 212 | ret = trace_seq_path(s, &file->f_path); | ||
| 213 | if (ret) | ||
| 214 | ret = trace_seq_printf(s, "[+0x%lx]", | ||
| 215 | ip - vmstart); | ||
| 216 | } | ||
| 217 | up_read(&mm->mmap_sem); | ||
| 218 | } | ||
| 219 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | ||
| 220 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
| 221 | return ret; | ||
| 222 | } | ||
| 223 | |||
| 224 | int | ||
| 225 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
| 226 | unsigned long sym_flags) | ||
| 227 | { | ||
| 228 | struct mm_struct *mm = NULL; | ||
| 229 | int ret = 1; | ||
| 230 | unsigned int i; | ||
| 231 | |||
| 232 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | ||
| 233 | struct task_struct *task; | ||
| 234 | /* | ||
| 235 | * we do the lookup on the thread group leader, | ||
| 236 | * since individual threads might have already quit! | ||
| 237 | */ | ||
| 238 | rcu_read_lock(); | ||
| 239 | task = find_task_by_vpid(entry->ent.tgid); | ||
| 240 | if (task) | ||
| 241 | mm = get_task_mm(task); | ||
| 242 | rcu_read_unlock(); | ||
| 243 | } | ||
| 244 | |||
| 245 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
| 246 | unsigned long ip = entry->caller[i]; | ||
| 247 | |||
| 248 | if (ip == ULONG_MAX || !ret) | ||
| 249 | break; | ||
| 250 | if (i && ret) | ||
| 251 | ret = trace_seq_puts(s, " <- "); | ||
| 252 | if (!ip) { | ||
| 253 | if (ret) | ||
| 254 | ret = trace_seq_puts(s, "??"); | ||
| 255 | continue; | ||
| 256 | } | ||
| 257 | if (!ret) | ||
| 258 | break; | ||
| 259 | if (ret) | ||
| 260 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
| 261 | } | ||
| 262 | |||
| 263 | if (mm) | ||
| 264 | mmput(mm); | ||
| 265 | return ret; | ||
| 266 | } | ||
| 267 | |||
| 268 | int | ||
| 269 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | ||
| 270 | { | ||
| 271 | int ret; | ||
| 272 | |||
| 273 | if (!ip) | ||
| 274 | return trace_seq_printf(s, "0"); | ||
| 275 | |||
| 276 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | ||
| 277 | ret = seq_print_sym_offset(s, "%s", ip); | ||
| 278 | else | ||
| 279 | ret = seq_print_sym_short(s, "%s", ip); | ||
| 280 | |||
| 281 | if (!ret) | ||
| 282 | return 0; | ||
| 283 | |||
| 284 | if (sym_flags & TRACE_ITER_SYM_ADDR) | ||
| 285 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
| 286 | return ret; | ||
| 287 | } | ||
| 288 | |||
| 289 | static int | ||
| 290 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | ||
| 291 | { | ||
| 292 | int hardirq, softirq; | ||
| 293 | char *comm; | ||
| 294 | |||
| 295 | comm = trace_find_cmdline(entry->pid); | ||
| 296 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | ||
| 297 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | ||
| 298 | |||
| 299 | if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c", | ||
| 300 | comm, entry->pid, cpu, | ||
| 301 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | ||
| 302 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? | ||
| 303 | 'X' : '.', | ||
| 304 | (entry->flags & TRACE_FLAG_NEED_RESCHED) ? | ||
| 305 | 'N' : '.', | ||
| 306 | (hardirq && softirq) ? 'H' : | ||
| 307 | hardirq ? 'h' : softirq ? 's' : '.')) | ||
| 308 | return 0; | ||
| 309 | |||
| 310 | if (entry->preempt_count) | ||
| 311 | return trace_seq_printf(s, "%x", entry->preempt_count); | ||
| 312 | return trace_seq_puts(s, "."); | ||
| 313 | } | ||
| 314 | |||
| 315 | static unsigned long preempt_mark_thresh = 100; | ||
| 316 | |||
| 317 | static int | ||
| 318 | lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, | ||
| 319 | unsigned long rel_usecs) | ||
| 320 | { | ||
| 321 | return trace_seq_printf(s, " %4lldus%c: ", abs_usecs, | ||
| 322 | rel_usecs > preempt_mark_thresh ? '!' : | ||
| 323 | rel_usecs > 1 ? '+' : ' '); | ||
| 324 | } | ||
| 325 | |||
| 326 | int trace_print_context(struct trace_iterator *iter) | ||
| 327 | { | ||
| 328 | struct trace_seq *s = &iter->seq; | ||
| 329 | struct trace_entry *entry = iter->ent; | ||
| 330 | char *comm = trace_find_cmdline(entry->pid); | ||
| 331 | unsigned long long t = ns2usecs(iter->ts); | ||
| 332 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); | ||
| 333 | unsigned long secs = (unsigned long)t; | ||
| 334 | |||
| 335 | return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ", | ||
| 336 | comm, entry->pid, entry->cpu, secs, usec_rem); | ||
| 337 | } | ||
| 338 | |||
| 339 | int trace_print_lat_context(struct trace_iterator *iter) | ||
| 340 | { | ||
| 341 | u64 next_ts; | ||
| 342 | int ret; | ||
| 343 | struct trace_seq *s = &iter->seq; | ||
| 344 | struct trace_entry *entry = iter->ent, | ||
| 345 | *next_entry = trace_find_next_entry(iter, NULL, | ||
| 346 | &next_ts); | ||
| 347 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); | ||
| 348 | unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); | ||
| 349 | unsigned long rel_usecs; | ||
| 350 | |||
| 351 | if (!next_entry) | ||
| 352 | next_ts = iter->ts; | ||
| 353 | rel_usecs = ns2usecs(next_ts - iter->ts); | ||
| 354 | |||
| 355 | if (verbose) { | ||
| 356 | char *comm = trace_find_cmdline(entry->pid); | ||
| 357 | ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]" | ||
| 358 | " %ld.%03ldms (+%ld.%03ldms): ", comm, | ||
| 359 | entry->pid, entry->cpu, entry->flags, | ||
| 360 | entry->preempt_count, iter->idx, | ||
| 361 | ns2usecs(iter->ts), | ||
| 362 | abs_usecs / USEC_PER_MSEC, | ||
| 363 | abs_usecs % USEC_PER_MSEC, | ||
| 364 | rel_usecs / USEC_PER_MSEC, | ||
| 365 | rel_usecs % USEC_PER_MSEC); | ||
| 366 | } else { | ||
| 367 | ret = lat_print_generic(s, entry, entry->cpu); | ||
| 368 | if (ret) | ||
| 369 | ret = lat_print_timestamp(s, abs_usecs, rel_usecs); | ||
| 370 | } | ||
| 371 | |||
| 372 | return ret; | ||
| 373 | } | ||
| 374 | |||
| 375 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | ||
| 376 | |||
| 377 | static int task_state_char(unsigned long state) | ||
| 378 | { | ||
| 379 | int bit = state ? __ffs(state) + 1 : 0; | ||
| 380 | |||
| 381 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | ||
| 382 | } | ||
| 383 | |||
| 384 | /** | ||
| 385 | * ftrace_find_event - find a registered event | ||
| 386 | * @type: the type of event to look for | ||
| 387 | * | ||
| 388 | * Returns an event of type @type otherwise NULL | ||
| 389 | */ | ||
| 390 | struct trace_event *ftrace_find_event(int type) | ||
| 391 | { | ||
| 392 | struct trace_event *event; | ||
| 393 | struct hlist_node *n; | ||
| 394 | unsigned key; | ||
| 395 | |||
| 396 | key = type & (EVENT_HASHSIZE - 1); | ||
| 397 | |||
| 398 | hlist_for_each_entry_rcu(event, n, &event_hash[key], node) { | ||
| 399 | if (event->type == type) | ||
| 400 | return event; | ||
| 401 | } | ||
| 402 | |||
| 403 | return NULL; | ||
| 404 | } | ||
| 405 | |||
| 406 | /** | ||
| 407 | * register_ftrace_event - register output for an event type | ||
| 408 | * @event: the event type to register | ||
| 409 | * | ||
| 410 | * Event types are stored in a hash and this hash is used to | ||
| 411 | * find a way to print an event. If the @event->type is set | ||
| 412 | * then it will use that type, otherwise it will assign a | ||
| 413 | * type to use. | ||
| 414 | * | ||
| 415 | * If you assign your own type, please make sure it is added | ||
| 416 | * to the trace_type enum in trace.h, to avoid collisions | ||
| 417 | * with the dynamic types. | ||
| 418 | * | ||
| 419 | * Returns the event type number or zero on error. | ||
| 420 | */ | ||
| 421 | int register_ftrace_event(struct trace_event *event) | ||
| 422 | { | ||
| 423 | unsigned key; | ||
| 424 | int ret = 0; | ||
| 425 | |||
| 426 | mutex_lock(&trace_event_mutex); | ||
| 427 | |||
| 428 | if (!event->type) | ||
| 429 | event->type = next_event_type++; | ||
| 430 | else if (event->type > __TRACE_LAST_TYPE) { | ||
| 431 | printk(KERN_WARNING "Need to add type to trace.h\n"); | ||
| 432 | WARN_ON(1); | ||
| 433 | } | ||
| 434 | |||
| 435 | if (ftrace_find_event(event->type)) | ||
| 436 | goto out; | ||
| 437 | |||
| 438 | key = event->type & (EVENT_HASHSIZE - 1); | ||
| 439 | |||
| 440 | hlist_add_head_rcu(&event->node, &event_hash[key]); | ||
| 441 | |||
| 442 | ret = event->type; | ||
| 443 | out: | ||
| 444 | mutex_unlock(&trace_event_mutex); | ||
| 445 | |||
| 446 | return ret; | ||
| 447 | } | ||
| 448 | |||
| 449 | /** | ||
| 450 | * unregister_ftrace_event - remove a no longer used event | ||
| 451 | * @event: the event to remove | ||
| 452 | */ | ||
| 453 | int unregister_ftrace_event(struct trace_event *event) | ||
| 454 | { | ||
| 455 | mutex_lock(&trace_event_mutex); | ||
| 456 | hlist_del(&event->node); | ||
| 457 | mutex_unlock(&trace_event_mutex); | ||
| 458 | |||
| 459 | return 0; | ||
| 460 | } | ||
| 461 | |||
| 462 | /* | ||
| 463 | * Standard events | ||
| 464 | */ | ||
| 465 | |||
| 466 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags) | ||
| 467 | { | ||
| 468 | return TRACE_TYPE_HANDLED; | ||
| 469 | } | ||
| 470 | |||
| 471 | /* TRACE_FN */ | ||
| 472 | static enum print_line_t trace_fn_latency(struct trace_iterator *iter, | ||
| 473 | int flags) | ||
| 474 | { | ||
| 475 | struct ftrace_entry *field; | ||
| 476 | struct trace_seq *s = &iter->seq; | ||
| 477 | |||
| 478 | trace_assign_type(field, iter->ent); | ||
| 479 | |||
| 480 | if (!seq_print_ip_sym(s, field->ip, flags)) | ||
| 481 | goto partial; | ||
| 482 | if (!trace_seq_puts(s, " (")) | ||
| 483 | goto partial; | ||
| 484 | if (!seq_print_ip_sym(s, field->parent_ip, flags)) | ||
| 485 | goto partial; | ||
| 486 | if (!trace_seq_puts(s, ")\n")) | ||
| 487 | goto partial; | ||
| 488 | |||
| 489 | return TRACE_TYPE_HANDLED; | ||
| 490 | |||
| 491 | partial: | ||
| 492 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 493 | } | ||
| 494 | |||
| 495 | static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags) | ||
| 496 | { | ||
| 497 | struct ftrace_entry *field; | ||
| 498 | struct trace_seq *s = &iter->seq; | ||
| 499 | |||
| 500 | trace_assign_type(field, iter->ent); | ||
| 501 | |||
| 502 | if (!seq_print_ip_sym(s, field->ip, flags)) | ||
| 503 | goto partial; | ||
| 504 | |||
| 505 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { | ||
| 506 | if (!trace_seq_printf(s, " <-")) | ||
| 507 | goto partial; | ||
| 508 | if (!seq_print_ip_sym(s, | ||
| 509 | field->parent_ip, | ||
| 510 | flags)) | ||
| 511 | goto partial; | ||
| 512 | } | ||
| 513 | if (!trace_seq_printf(s, "\n")) | ||
| 514 | goto partial; | ||
| 515 | |||
| 516 | return TRACE_TYPE_HANDLED; | ||
| 517 | |||
| 518 | partial: | ||
| 519 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 520 | } | ||
| 521 | |||
| 522 | static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags) | ||
| 523 | { | ||
| 524 | struct ftrace_entry *field; | ||
| 525 | |||
| 526 | trace_assign_type(field, iter->ent); | ||
| 527 | |||
| 528 | if (!trace_seq_printf(&iter->seq, "%lx %lx\n", | ||
| 529 | field->ip, | ||
| 530 | field->parent_ip)) | ||
| 531 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 532 | |||
| 533 | return TRACE_TYPE_HANDLED; | ||
| 534 | } | ||
| 535 | |||
| 536 | static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags) | ||
| 537 | { | ||
| 538 | struct ftrace_entry *field; | ||
| 539 | struct trace_seq *s = &iter->seq; | ||
| 540 | |||
| 541 | trace_assign_type(field, iter->ent); | ||
| 542 | |||
| 543 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | ||
| 544 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | ||
| 545 | |||
| 546 | return TRACE_TYPE_HANDLED; | ||
| 547 | } | ||
| 548 | |||
| 549 | static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags) | ||
| 550 | { | ||
| 551 | struct ftrace_entry *field; | ||
| 552 | struct trace_seq *s = &iter->seq; | ||
| 553 | |||
| 554 | trace_assign_type(field, iter->ent); | ||
| 555 | |||
| 556 | SEQ_PUT_FIELD_RET(s, field->ip); | ||
| 557 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | ||
| 558 | |||
| 559 | return TRACE_TYPE_HANDLED; | ||
| 560 | } | ||
| 561 | |||
| 562 | static struct trace_event trace_fn_event = { | ||
| 563 | .type = TRACE_FN, | ||
| 564 | .trace = trace_fn_trace, | ||
| 565 | .latency_trace = trace_fn_latency, | ||
| 566 | .raw = trace_fn_raw, | ||
| 567 | .hex = trace_fn_hex, | ||
| 568 | .binary = trace_fn_bin, | ||
| 569 | }; | ||
| 570 | |||
| 571 | /* TRACE_CTX an TRACE_WAKE */ | ||
| 572 | static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, | ||
| 573 | char *delim) | ||
| 574 | { | ||
| 575 | struct ctx_switch_entry *field; | ||
| 576 | char *comm; | ||
| 577 | int S, T; | ||
| 578 | |||
| 579 | trace_assign_type(field, iter->ent); | ||
| 580 | |||
| 581 | T = task_state_char(field->next_state); | ||
| 582 | S = task_state_char(field->prev_state); | ||
| 583 | comm = trace_find_cmdline(field->next_pid); | ||
| 584 | if (!trace_seq_printf(&iter->seq, | ||
| 585 | " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | ||
| 586 | field->prev_pid, | ||
| 587 | field->prev_prio, | ||
| 588 | S, delim, | ||
| 589 | field->next_cpu, | ||
| 590 | field->next_pid, | ||
| 591 | field->next_prio, | ||
| 592 | T, comm)) | ||
| 593 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 594 | |||
| 595 | return TRACE_TYPE_HANDLED; | ||
| 596 | } | ||
| 597 | |||
| 598 | static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags) | ||
| 599 | { | ||
| 600 | return trace_ctxwake_print(iter, "==>"); | ||
| 601 | } | ||
| 602 | |||
| 603 | static enum print_line_t trace_wake_print(struct trace_iterator *iter, | ||
| 604 | int flags) | ||
| 605 | { | ||
| 606 | return trace_ctxwake_print(iter, " +"); | ||
| 607 | } | ||
| 608 | |||
| 609 | static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | ||
| 610 | { | ||
| 611 | struct ctx_switch_entry *field; | ||
| 612 | int T; | ||
| 613 | |||
| 614 | trace_assign_type(field, iter->ent); | ||
| 615 | |||
| 616 | if (!S) | ||
| 617 | task_state_char(field->prev_state); | ||
| 618 | T = task_state_char(field->next_state); | ||
| 619 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | ||
| 620 | field->prev_pid, | ||
| 621 | field->prev_prio, | ||
| 622 | S, | ||
| 623 | field->next_cpu, | ||
| 624 | field->next_pid, | ||
| 625 | field->next_prio, | ||
| 626 | T)) | ||
| 627 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 628 | |||
| 629 | return TRACE_TYPE_HANDLED; | ||
| 630 | } | ||
| 631 | |||
| 632 | static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags) | ||
| 633 | { | ||
| 634 | return trace_ctxwake_raw(iter, 0); | ||
| 635 | } | ||
| 636 | |||
| 637 | static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags) | ||
| 638 | { | ||
| 639 | return trace_ctxwake_raw(iter, '+'); | ||
| 640 | } | ||
| 641 | |||
| 642 | |||
| 643 | static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | ||
| 644 | { | ||
| 645 | struct ctx_switch_entry *field; | ||
| 646 | struct trace_seq *s = &iter->seq; | ||
| 647 | int T; | ||
| 648 | |||
| 649 | trace_assign_type(field, iter->ent); | ||
| 650 | |||
| 651 | if (!S) | ||
| 652 | task_state_char(field->prev_state); | ||
| 653 | T = task_state_char(field->next_state); | ||
| 654 | |||
| 655 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | ||
| 656 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | ||
| 657 | SEQ_PUT_HEX_FIELD_RET(s, S); | ||
| 658 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); | ||
| 659 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); | ||
| 660 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); | ||
| 661 | SEQ_PUT_HEX_FIELD_RET(s, T); | ||
| 662 | |||
| 663 | return TRACE_TYPE_HANDLED; | ||
| 664 | } | ||
| 665 | |||
| 666 | static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags) | ||
| 667 | { | ||
| 668 | return trace_ctxwake_hex(iter, 0); | ||
| 669 | } | ||
| 670 | |||
| 671 | static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags) | ||
| 672 | { | ||
| 673 | return trace_ctxwake_hex(iter, '+'); | ||
| 674 | } | ||
| 675 | |||
| 676 | static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, | ||
| 677 | int flags) | ||
| 678 | { | ||
| 679 | struct ctx_switch_entry *field; | ||
| 680 | struct trace_seq *s = &iter->seq; | ||
| 681 | |||
| 682 | trace_assign_type(field, iter->ent); | ||
| 683 | |||
| 684 | SEQ_PUT_FIELD_RET(s, field->prev_pid); | ||
| 685 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | ||
| 686 | SEQ_PUT_FIELD_RET(s, field->prev_state); | ||
| 687 | SEQ_PUT_FIELD_RET(s, field->next_pid); | ||
| 688 | SEQ_PUT_FIELD_RET(s, field->next_prio); | ||
| 689 | SEQ_PUT_FIELD_RET(s, field->next_state); | ||
| 690 | |||
| 691 | return TRACE_TYPE_HANDLED; | ||
| 692 | } | ||
| 693 | |||
| 694 | static struct trace_event trace_ctx_event = { | ||
| 695 | .type = TRACE_CTX, | ||
| 696 | .trace = trace_ctx_print, | ||
| 697 | .latency_trace = trace_ctx_print, | ||
| 698 | .raw = trace_ctx_raw, | ||
| 699 | .hex = trace_ctx_hex, | ||
| 700 | .binary = trace_ctxwake_bin, | ||
| 701 | }; | ||
| 702 | |||
| 703 | static struct trace_event trace_wake_event = { | ||
| 704 | .type = TRACE_WAKE, | ||
| 705 | .trace = trace_wake_print, | ||
| 706 | .latency_trace = trace_wake_print, | ||
| 707 | .raw = trace_wake_raw, | ||
| 708 | .hex = trace_wake_hex, | ||
| 709 | .binary = trace_ctxwake_bin, | ||
| 710 | }; | ||
| 711 | |||
| 712 | /* TRACE_SPECIAL */ | ||
| 713 | static enum print_line_t trace_special_print(struct trace_iterator *iter, | ||
| 714 | int flags) | ||
| 715 | { | ||
| 716 | struct special_entry *field; | ||
| 717 | |||
| 718 | trace_assign_type(field, iter->ent); | ||
| 719 | |||
| 720 | if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n", | ||
| 721 | field->arg1, | ||
| 722 | field->arg2, | ||
| 723 | field->arg3)) | ||
| 724 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 725 | |||
| 726 | return TRACE_TYPE_HANDLED; | ||
| 727 | } | ||
| 728 | |||
| 729 | static enum print_line_t trace_special_hex(struct trace_iterator *iter, | ||
| 730 | int flags) | ||
| 731 | { | ||
| 732 | struct special_entry *field; | ||
| 733 | struct trace_seq *s = &iter->seq; | ||
| 734 | |||
| 735 | trace_assign_type(field, iter->ent); | ||
| 736 | |||
| 737 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | ||
| 738 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | ||
| 739 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | ||
| 740 | |||
| 741 | return TRACE_TYPE_HANDLED; | ||
| 742 | } | ||
| 743 | |||
| 744 | static enum print_line_t trace_special_bin(struct trace_iterator *iter, | ||
| 745 | int flags) | ||
| 746 | { | ||
| 747 | struct special_entry *field; | ||
| 748 | struct trace_seq *s = &iter->seq; | ||
| 749 | |||
| 750 | trace_assign_type(field, iter->ent); | ||
| 751 | |||
| 752 | SEQ_PUT_FIELD_RET(s, field->arg1); | ||
| 753 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
| 754 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
| 755 | |||
| 756 | return TRACE_TYPE_HANDLED; | ||
| 757 | } | ||
| 758 | |||
| 759 | static struct trace_event trace_special_event = { | ||
| 760 | .type = TRACE_SPECIAL, | ||
| 761 | .trace = trace_special_print, | ||
| 762 | .latency_trace = trace_special_print, | ||
| 763 | .raw = trace_special_print, | ||
| 764 | .hex = trace_special_hex, | ||
| 765 | .binary = trace_special_bin, | ||
| 766 | }; | ||
| 767 | |||
| 768 | /* TRACE_STACK */ | ||
| 769 | |||
| 770 | static enum print_line_t trace_stack_print(struct trace_iterator *iter, | ||
| 771 | int flags) | ||
| 772 | { | ||
| 773 | struct stack_entry *field; | ||
| 774 | struct trace_seq *s = &iter->seq; | ||
| 775 | int i; | ||
| 776 | |||
| 777 | trace_assign_type(field, iter->ent); | ||
| 778 | |||
| 779 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
| 780 | if (i) { | ||
| 781 | if (!trace_seq_puts(s, " <= ")) | ||
| 782 | goto partial; | ||
| 783 | |||
| 784 | if (!seq_print_ip_sym(s, field->caller[i], flags)) | ||
| 785 | goto partial; | ||
| 786 | } | ||
| 787 | if (!trace_seq_puts(s, "\n")) | ||
| 788 | goto partial; | ||
| 789 | } | ||
| 790 | |||
| 791 | return TRACE_TYPE_HANDLED; | ||
| 792 | |||
| 793 | partial: | ||
| 794 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 795 | } | ||
| 796 | |||
| 797 | static struct trace_event trace_stack_event = { | ||
| 798 | .type = TRACE_STACK, | ||
| 799 | .trace = trace_stack_print, | ||
| 800 | .latency_trace = trace_stack_print, | ||
| 801 | .raw = trace_special_print, | ||
| 802 | .hex = trace_special_hex, | ||
| 803 | .binary = trace_special_bin, | ||
| 804 | }; | ||
| 805 | |||
| 806 | /* TRACE_USER_STACK */ | ||
| 807 | static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | ||
| 808 | int flags) | ||
| 809 | { | ||
| 810 | struct userstack_entry *field; | ||
| 811 | struct trace_seq *s = &iter->seq; | ||
| 812 | |||
| 813 | trace_assign_type(field, iter->ent); | ||
| 814 | |||
| 815 | if (!seq_print_userip_objs(field, s, flags)) | ||
| 816 | goto partial; | ||
| 817 | |||
| 818 | if (!trace_seq_putc(s, '\n')) | ||
| 819 | goto partial; | ||
| 820 | |||
| 821 | return TRACE_TYPE_HANDLED; | ||
| 822 | |||
| 823 | partial: | ||
| 824 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 825 | } | ||
| 826 | |||
| 827 | static struct trace_event trace_user_stack_event = { | ||
| 828 | .type = TRACE_USER_STACK, | ||
| 829 | .trace = trace_user_stack_print, | ||
| 830 | .latency_trace = trace_user_stack_print, | ||
| 831 | .raw = trace_special_print, | ||
| 832 | .hex = trace_special_hex, | ||
| 833 | .binary = trace_special_bin, | ||
| 834 | }; | ||
| 835 | |||
| 836 | /* TRACE_PRINT */ | ||
| 837 | static enum print_line_t trace_print_print(struct trace_iterator *iter, | ||
| 838 | int flags) | ||
| 839 | { | ||
| 840 | struct print_entry *field; | ||
| 841 | struct trace_seq *s = &iter->seq; | ||
| 842 | |||
| 843 | trace_assign_type(field, iter->ent); | ||
| 844 | |||
| 845 | if (!seq_print_ip_sym(s, field->ip, flags)) | ||
| 846 | goto partial; | ||
| 847 | |||
| 848 | if (!trace_seq_printf(s, ": %s", field->buf)) | ||
| 849 | goto partial; | ||
| 850 | |||
| 851 | return TRACE_TYPE_HANDLED; | ||
| 852 | |||
| 853 | partial: | ||
| 854 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 855 | } | ||
| 856 | |||
| 857 | static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) | ||
| 858 | { | ||
| 859 | struct print_entry *field; | ||
| 860 | |||
| 861 | trace_assign_type(field, iter->ent); | ||
| 862 | |||
| 863 | if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) | ||
| 864 | goto partial; | ||
| 865 | |||
| 866 | return TRACE_TYPE_HANDLED; | ||
| 867 | |||
| 868 | partial: | ||
| 869 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 870 | } | ||
| 871 | |||
| 872 | static struct trace_event trace_print_event = { | ||
| 873 | .type = TRACE_PRINT, | ||
| 874 | .trace = trace_print_print, | ||
| 875 | .latency_trace = trace_print_print, | ||
| 876 | .raw = trace_print_raw, | ||
| 877 | .hex = trace_nop_print, | ||
| 878 | .binary = trace_nop_print, | ||
| 879 | }; | ||
| 880 | |||
| 881 | static struct trace_event *events[] __initdata = { | ||
| 882 | &trace_fn_event, | ||
| 883 | &trace_ctx_event, | ||
| 884 | &trace_wake_event, | ||
| 885 | &trace_special_event, | ||
| 886 | &trace_stack_event, | ||
| 887 | &trace_user_stack_event, | ||
| 888 | &trace_print_event, | ||
| 889 | NULL | ||
| 890 | }; | ||
| 891 | |||
| 892 | __init static int init_events(void) | ||
| 893 | { | ||
| 894 | struct trace_event *event; | ||
| 895 | int i, ret; | ||
| 896 | |||
| 897 | for (i = 0; events[i]; i++) { | ||
| 898 | event = events[i]; | ||
| 899 | |||
| 900 | ret = register_ftrace_event(event); | ||
| 901 | if (!ret) { | ||
| 902 | printk(KERN_WARNING "event %d failed to register\n", | ||
| 903 | event->type); | ||
| 904 | WARN_ON_ONCE(1); | ||
| 905 | } | ||
| 906 | } | ||
| 907 | |||
| 908 | return 0; | ||
| 909 | } | ||
| 910 | device_initcall(init_events); | ||
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h new file mode 100644 index 000000000000..551a25a72217 --- /dev/null +++ b/kernel/trace/trace_output.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | #ifndef __TRACE_EVENTS_H | ||
| 2 | #define __TRACE_EVENTS_H | ||
| 3 | |||
| 4 | #include "trace.h" | ||
| 5 | |||
| 6 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, | ||
| 7 | int flags); | ||
| 8 | |||
| 9 | struct trace_event { | ||
| 10 | struct hlist_node node; | ||
| 11 | int type; | ||
| 12 | trace_print_func trace; | ||
| 13 | trace_print_func latency_trace; | ||
| 14 | trace_print_func raw; | ||
| 15 | trace_print_func hex; | ||
| 16 | trace_print_func binary; | ||
| 17 | }; | ||
| 18 | |||
| 19 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
| 20 | __attribute__ ((format (printf, 2, 3))); | ||
| 21 | extern int | ||
| 22 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | ||
| 23 | unsigned long sym_flags); | ||
| 24 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
| 25 | size_t cnt); | ||
| 26 | int trace_seq_puts(struct trace_seq *s, const char *str); | ||
| 27 | int trace_seq_putc(struct trace_seq *s, unsigned char c); | ||
| 28 | int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len); | ||
| 29 | int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len); | ||
| 30 | int trace_seq_path(struct trace_seq *s, struct path *path); | ||
| 31 | int seq_print_userip_objs(const struct userstack_entry *entry, | ||
| 32 | struct trace_seq *s, unsigned long sym_flags); | ||
| 33 | int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
| 34 | unsigned long ip, unsigned long sym_flags); | ||
| 35 | |||
| 36 | int trace_print_context(struct trace_iterator *iter); | ||
| 37 | int trace_print_lat_context(struct trace_iterator *iter); | ||
| 38 | |||
| 39 | struct trace_event *ftrace_find_event(int type); | ||
| 40 | int register_ftrace_event(struct trace_event *event); | ||
| 41 | int unregister_ftrace_event(struct trace_event *event); | ||
| 42 | |||
| 43 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags); | ||
| 44 | |||
| 45 | #define MAX_MEMHEX_BYTES 8 | ||
| 46 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
| 47 | |||
| 48 | #define SEQ_PUT_FIELD_RET(s, x) \ | ||
| 49 | do { \ | ||
| 50 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | ||
| 51 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
| 52 | } while (0) | ||
| 53 | |||
| 54 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | ||
| 55 | do { \ | ||
| 56 | BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ | ||
| 57 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | ||
| 58 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
| 59 | } while (0) | ||
| 60 | |||
| 61 | #endif | ||
| 62 | |||
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 7bda248daf55..faa6ab7a1f5c 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | 17 | ||
| 18 | #include "trace.h" | 18 | #include "trace.h" |
| 19 | #include "trace_output.h" | ||
| 19 | 20 | ||
| 20 | static struct trace_array *power_trace; | 21 | static struct trace_array *power_trace; |
| 21 | static int __read_mostly trace_power_enabled; | 22 | static int __read_mostly trace_power_enabled; |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 42ae1e77b6b3..a48c9b4b0c85 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -25,6 +25,7 @@ static int __read_mostly tracer_enabled; | |||
| 25 | static struct task_struct *wakeup_task; | 25 | static struct task_struct *wakeup_task; |
| 26 | static int wakeup_cpu; | 26 | static int wakeup_cpu; |
| 27 | static unsigned wakeup_prio = -1; | 27 | static unsigned wakeup_prio = -1; |
| 28 | static int wakeup_rt; | ||
| 28 | 29 | ||
| 29 | static raw_spinlock_t wakeup_lock = | 30 | static raw_spinlock_t wakeup_lock = |
| 30 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 31 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
| @@ -152,6 +153,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
| 152 | goto out_unlock; | 153 | goto out_unlock; |
| 153 | 154 | ||
| 154 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 155 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
| 156 | tracing_sched_switch_trace(wakeup_trace, data, prev, next, flags, pc); | ||
| 155 | 157 | ||
| 156 | /* | 158 | /* |
| 157 | * usecs conversion is slow so we try to delay the conversion | 159 | * usecs conversion is slow so we try to delay the conversion |
| @@ -182,13 +184,10 @@ out: | |||
| 182 | 184 | ||
| 183 | static void __wakeup_reset(struct trace_array *tr) | 185 | static void __wakeup_reset(struct trace_array *tr) |
| 184 | { | 186 | { |
| 185 | struct trace_array_cpu *data; | ||
| 186 | int cpu; | 187 | int cpu; |
| 187 | 188 | ||
| 188 | for_each_possible_cpu(cpu) { | 189 | for_each_possible_cpu(cpu) |
| 189 | data = tr->data[cpu]; | ||
| 190 | tracing_reset(tr, cpu); | 190 | tracing_reset(tr, cpu); |
| 191 | } | ||
| 192 | 191 | ||
| 193 | wakeup_cpu = -1; | 192 | wakeup_cpu = -1; |
| 194 | wakeup_prio = -1; | 193 | wakeup_prio = -1; |
| @@ -213,6 +212,7 @@ static void wakeup_reset(struct trace_array *tr) | |||
| 213 | static void | 212 | static void |
| 214 | probe_wakeup(struct rq *rq, struct task_struct *p, int success) | 213 | probe_wakeup(struct rq *rq, struct task_struct *p, int success) |
| 215 | { | 214 | { |
| 215 | struct trace_array_cpu *data; | ||
| 216 | int cpu = smp_processor_id(); | 216 | int cpu = smp_processor_id(); |
| 217 | unsigned long flags; | 217 | unsigned long flags; |
| 218 | long disabled; | 218 | long disabled; |
| @@ -224,7 +224,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
| 224 | tracing_record_cmdline(p); | 224 | tracing_record_cmdline(p); |
| 225 | tracing_record_cmdline(current); | 225 | tracing_record_cmdline(current); |
| 226 | 226 | ||
| 227 | if (likely(!rt_task(p)) || | 227 | if ((wakeup_rt && !rt_task(p)) || |
| 228 | p->prio >= wakeup_prio || | 228 | p->prio >= wakeup_prio || |
| 229 | p->prio >= current->prio) | 229 | p->prio >= current->prio) |
| 230 | return; | 230 | return; |
| @@ -252,9 +252,12 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
| 252 | 252 | ||
| 253 | local_save_flags(flags); | 253 | local_save_flags(flags); |
| 254 | 254 | ||
| 255 | wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); | 255 | data = wakeup_trace->data[wakeup_cpu]; |
| 256 | trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], | 256 | data->preempt_timestamp = ftrace_now(cpu); |
| 257 | CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 257 | tracing_sched_wakeup_trace(wakeup_trace, data, p, current, |
| 258 | flags, pc); | ||
| 259 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, | ||
| 260 | flags, pc); | ||
| 258 | 261 | ||
| 259 | out_locked: | 262 | out_locked: |
| 260 | __raw_spin_unlock(&wakeup_lock); | 263 | __raw_spin_unlock(&wakeup_lock); |
| @@ -262,12 +265,6 @@ out: | |||
| 262 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 265 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
| 263 | } | 266 | } |
| 264 | 267 | ||
| 265 | /* | ||
| 266 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
| 267 | * variable when we disable it when we open a trace output file. | ||
| 268 | */ | ||
| 269 | static int save_tracer_enabled; | ||
| 270 | |||
| 271 | static void start_wakeup_tracer(struct trace_array *tr) | 268 | static void start_wakeup_tracer(struct trace_array *tr) |
| 272 | { | 269 | { |
| 273 | int ret; | 270 | int ret; |
| @@ -306,13 +303,10 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
| 306 | 303 | ||
| 307 | register_ftrace_function(&trace_ops); | 304 | register_ftrace_function(&trace_ops); |
| 308 | 305 | ||
| 309 | if (tracing_is_enabled()) { | 306 | if (tracing_is_enabled()) |
| 310 | tracer_enabled = 1; | 307 | tracer_enabled = 1; |
| 311 | save_tracer_enabled = 1; | 308 | else |
| 312 | } else { | ||
| 313 | tracer_enabled = 0; | 309 | tracer_enabled = 0; |
| 314 | save_tracer_enabled = 0; | ||
| 315 | } | ||
| 316 | 310 | ||
| 317 | return; | 311 | return; |
| 318 | fail_deprobe_wake_new: | 312 | fail_deprobe_wake_new: |
| @@ -324,14 +318,13 @@ fail_deprobe: | |||
| 324 | static void stop_wakeup_tracer(struct trace_array *tr) | 318 | static void stop_wakeup_tracer(struct trace_array *tr) |
| 325 | { | 319 | { |
| 326 | tracer_enabled = 0; | 320 | tracer_enabled = 0; |
| 327 | save_tracer_enabled = 0; | ||
| 328 | unregister_ftrace_function(&trace_ops); | 321 | unregister_ftrace_function(&trace_ops); |
| 329 | unregister_trace_sched_switch(probe_wakeup_sched_switch); | 322 | unregister_trace_sched_switch(probe_wakeup_sched_switch); |
| 330 | unregister_trace_sched_wakeup_new(probe_wakeup); | 323 | unregister_trace_sched_wakeup_new(probe_wakeup); |
| 331 | unregister_trace_sched_wakeup(probe_wakeup); | 324 | unregister_trace_sched_wakeup(probe_wakeup); |
| 332 | } | 325 | } |
| 333 | 326 | ||
| 334 | static int wakeup_tracer_init(struct trace_array *tr) | 327 | static int __wakeup_tracer_init(struct trace_array *tr) |
| 335 | { | 328 | { |
| 336 | tracing_max_latency = 0; | 329 | tracing_max_latency = 0; |
| 337 | wakeup_trace = tr; | 330 | wakeup_trace = tr; |
| @@ -339,6 +332,18 @@ static int wakeup_tracer_init(struct trace_array *tr) | |||
| 339 | return 0; | 332 | return 0; |
| 340 | } | 333 | } |
| 341 | 334 | ||
| 335 | static int wakeup_tracer_init(struct trace_array *tr) | ||
| 336 | { | ||
| 337 | wakeup_rt = 0; | ||
| 338 | return __wakeup_tracer_init(tr); | ||
| 339 | } | ||
| 340 | |||
| 341 | static int wakeup_rt_tracer_init(struct trace_array *tr) | ||
| 342 | { | ||
| 343 | wakeup_rt = 1; | ||
| 344 | return __wakeup_tracer_init(tr); | ||
| 345 | } | ||
| 346 | |||
| 342 | static void wakeup_tracer_reset(struct trace_array *tr) | 347 | static void wakeup_tracer_reset(struct trace_array *tr) |
| 343 | { | 348 | { |
| 344 | stop_wakeup_tracer(tr); | 349 | stop_wakeup_tracer(tr); |
| @@ -350,28 +355,11 @@ static void wakeup_tracer_start(struct trace_array *tr) | |||
| 350 | { | 355 | { |
| 351 | wakeup_reset(tr); | 356 | wakeup_reset(tr); |
| 352 | tracer_enabled = 1; | 357 | tracer_enabled = 1; |
| 353 | save_tracer_enabled = 1; | ||
| 354 | } | 358 | } |
| 355 | 359 | ||
| 356 | static void wakeup_tracer_stop(struct trace_array *tr) | 360 | static void wakeup_tracer_stop(struct trace_array *tr) |
| 357 | { | 361 | { |
| 358 | tracer_enabled = 0; | 362 | tracer_enabled = 0; |
| 359 | save_tracer_enabled = 0; | ||
| 360 | } | ||
| 361 | |||
| 362 | static void wakeup_tracer_open(struct trace_iterator *iter) | ||
| 363 | { | ||
| 364 | /* stop the trace while dumping */ | ||
| 365 | tracer_enabled = 0; | ||
| 366 | } | ||
| 367 | |||
| 368 | static void wakeup_tracer_close(struct trace_iterator *iter) | ||
| 369 | { | ||
| 370 | /* forget about any processes we were recording */ | ||
| 371 | if (save_tracer_enabled) { | ||
| 372 | wakeup_reset(iter->tr); | ||
| 373 | tracer_enabled = 1; | ||
| 374 | } | ||
| 375 | } | 363 | } |
| 376 | 364 | ||
| 377 | static struct tracer wakeup_tracer __read_mostly = | 365 | static struct tracer wakeup_tracer __read_mostly = |
| @@ -381,8 +369,19 @@ static struct tracer wakeup_tracer __read_mostly = | |||
| 381 | .reset = wakeup_tracer_reset, | 369 | .reset = wakeup_tracer_reset, |
| 382 | .start = wakeup_tracer_start, | 370 | .start = wakeup_tracer_start, |
| 383 | .stop = wakeup_tracer_stop, | 371 | .stop = wakeup_tracer_stop, |
| 384 | .open = wakeup_tracer_open, | 372 | .print_max = 1, |
| 385 | .close = wakeup_tracer_close, | 373 | #ifdef CONFIG_FTRACE_SELFTEST |
| 374 | .selftest = trace_selftest_startup_wakeup, | ||
| 375 | #endif | ||
| 376 | }; | ||
| 377 | |||
| 378 | static struct tracer wakeup_rt_tracer __read_mostly = | ||
| 379 | { | ||
| 380 | .name = "wakeup_rt", | ||
| 381 | .init = wakeup_rt_tracer_init, | ||
| 382 | .reset = wakeup_tracer_reset, | ||
| 383 | .start = wakeup_tracer_start, | ||
| 384 | .stop = wakeup_tracer_stop, | ||
| 386 | .print_max = 1, | 385 | .print_max = 1, |
| 387 | #ifdef CONFIG_FTRACE_SELFTEST | 386 | #ifdef CONFIG_FTRACE_SELFTEST |
| 388 | .selftest = trace_selftest_startup_wakeup, | 387 | .selftest = trace_selftest_startup_wakeup, |
| @@ -397,6 +396,10 @@ __init static int init_wakeup_tracer(void) | |||
| 397 | if (ret) | 396 | if (ret) |
| 398 | return ret; | 397 | return ret; |
| 399 | 398 | ||
| 399 | ret = register_tracer(&wakeup_rt_tracer); | ||
| 400 | if (ret) | ||
| 401 | return ret; | ||
| 402 | |||
| 400 | return 0; | 403 | return 0; |
| 401 | } | 404 | } |
| 402 | device_initcall(init_wakeup_tracer); | 405 | device_initcall(init_wakeup_tracer); |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 88c8eb70f54a..5013812578b1 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -9,7 +9,6 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
| 9 | case TRACE_FN: | 9 | case TRACE_FN: |
| 10 | case TRACE_CTX: | 10 | case TRACE_CTX: |
| 11 | case TRACE_WAKE: | 11 | case TRACE_WAKE: |
| 12 | case TRACE_CONT: | ||
| 13 | case TRACE_STACK: | 12 | case TRACE_STACK: |
| 14 | case TRACE_PRINT: | 13 | case TRACE_PRINT: |
| 15 | case TRACE_SPECIAL: | 14 | case TRACE_SPECIAL: |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c new file mode 100644 index 000000000000..eae9cef39291 --- /dev/null +++ b/kernel/trace/trace_stat.c | |||
| @@ -0,0 +1,319 @@ | |||
| 1 | /* | ||
| 2 | * Infrastructure for statistic tracing (histogram output). | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
| 5 | * | ||
| 6 | * Based on the code from trace_branch.c which is | ||
| 7 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | ||
| 8 | * | ||
| 9 | */ | ||
| 10 | |||
| 11 | |||
| 12 | #include <linux/list.h> | ||
| 13 | #include <linux/debugfs.h> | ||
| 14 | #include "trace_stat.h" | ||
| 15 | #include "trace.h" | ||
| 16 | |||
| 17 | |||
| 18 | /* List of stat entries from a tracer */ | ||
| 19 | struct trace_stat_list { | ||
| 20 | struct list_head list; | ||
| 21 | void *stat; | ||
| 22 | }; | ||
| 23 | |||
| 24 | /* A stat session is the stats output in one file */ | ||
| 25 | struct tracer_stat_session { | ||
| 26 | struct list_head session_list; | ||
| 27 | struct tracer_stat *ts; | ||
| 28 | struct list_head stat_list; | ||
| 29 | struct mutex stat_mutex; | ||
| 30 | struct dentry *file; | ||
| 31 | }; | ||
| 32 | |||
| 33 | /* All of the sessions currently in use. Each stat file embeed one session */ | ||
| 34 | static LIST_HEAD(all_stat_sessions); | ||
| 35 | static DEFINE_MUTEX(all_stat_sessions_mutex); | ||
| 36 | |||
| 37 | /* The root directory for all stat files */ | ||
| 38 | static struct dentry *stat_dir; | ||
| 39 | |||
| 40 | |||
| 41 | static void reset_stat_session(struct tracer_stat_session *session) | ||
| 42 | { | ||
| 43 | struct trace_stat_list *node, *next; | ||
| 44 | |||
| 45 | list_for_each_entry_safe(node, next, &session->stat_list, list) | ||
| 46 | kfree(node); | ||
| 47 | |||
| 48 | INIT_LIST_HEAD(&session->stat_list); | ||
| 49 | } | ||
| 50 | |||
| 51 | static void destroy_session(struct tracer_stat_session *session) | ||
| 52 | { | ||
| 53 | debugfs_remove(session->file); | ||
| 54 | reset_stat_session(session); | ||
| 55 | mutex_destroy(&session->stat_mutex); | ||
| 56 | kfree(session); | ||
| 57 | } | ||
| 58 | |||
| 59 | /* | ||
| 60 | * For tracers that don't provide a stat_cmp callback. | ||
| 61 | * This one will force an immediate insertion on tail of | ||
| 62 | * the list. | ||
| 63 | */ | ||
| 64 | static int dummy_cmp(void *p1, void *p2) | ||
| 65 | { | ||
| 66 | return 1; | ||
| 67 | } | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Initialize the stat list at each trace_stat file opening. | ||
| 71 | * All of these copies and sorting are required on all opening | ||
| 72 | * since the stats could have changed between two file sessions. | ||
| 73 | */ | ||
| 74 | static int stat_seq_init(struct tracer_stat_session *session) | ||
| 75 | { | ||
| 76 | struct trace_stat_list *iter_entry, *new_entry; | ||
| 77 | struct tracer_stat *ts = session->ts; | ||
| 78 | void *prev_stat; | ||
| 79 | int ret = 0; | ||
| 80 | int i; | ||
| 81 | |||
| 82 | mutex_lock(&session->stat_mutex); | ||
| 83 | reset_stat_session(session); | ||
| 84 | |||
| 85 | if (!ts->stat_cmp) | ||
| 86 | ts->stat_cmp = dummy_cmp; | ||
| 87 | |||
| 88 | /* | ||
| 89 | * The first entry. Actually this is the second, but the first | ||
| 90 | * one (the stat_list head) is pointless. | ||
| 91 | */ | ||
| 92 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | ||
| 93 | if (!new_entry) { | ||
| 94 | ret = -ENOMEM; | ||
| 95 | goto exit; | ||
| 96 | } | ||
| 97 | |||
| 98 | INIT_LIST_HEAD(&new_entry->list); | ||
| 99 | |||
| 100 | list_add(&new_entry->list, &session->stat_list); | ||
| 101 | |||
| 102 | new_entry->stat = ts->stat_start(); | ||
| 103 | prev_stat = new_entry->stat; | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Iterate over the tracer stat entries and store them in a sorted | ||
| 107 | * list. | ||
| 108 | */ | ||
| 109 | for (i = 1; ; i++) { | ||
| 110 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | ||
| 111 | if (!new_entry) { | ||
| 112 | ret = -ENOMEM; | ||
| 113 | goto exit_free_list; | ||
| 114 | } | ||
| 115 | |||
| 116 | INIT_LIST_HEAD(&new_entry->list); | ||
| 117 | new_entry->stat = ts->stat_next(prev_stat, i); | ||
| 118 | |||
| 119 | /* End of insertion */ | ||
| 120 | if (!new_entry->stat) | ||
| 121 | break; | ||
| 122 | |||
| 123 | list_for_each_entry(iter_entry, &session->stat_list, list) { | ||
| 124 | |||
| 125 | /* Insertion with a descendent sorting */ | ||
| 126 | if (ts->stat_cmp(new_entry->stat, | ||
| 127 | iter_entry->stat) > 0) { | ||
| 128 | |||
| 129 | list_add_tail(&new_entry->list, | ||
| 130 | &iter_entry->list); | ||
| 131 | break; | ||
| 132 | |||
| 133 | /* The current smaller value */ | ||
| 134 | } else if (list_is_last(&iter_entry->list, | ||
| 135 | &session->stat_list)) { | ||
| 136 | list_add(&new_entry->list, &iter_entry->list); | ||
| 137 | break; | ||
| 138 | } | ||
| 139 | } | ||
| 140 | |||
| 141 | prev_stat = new_entry->stat; | ||
| 142 | } | ||
| 143 | exit: | ||
| 144 | mutex_unlock(&session->stat_mutex); | ||
| 145 | return ret; | ||
| 146 | |||
| 147 | exit_free_list: | ||
| 148 | reset_stat_session(session); | ||
| 149 | mutex_unlock(&session->stat_mutex); | ||
| 150 | return ret; | ||
| 151 | } | ||
| 152 | |||
| 153 | |||
| 154 | static void *stat_seq_start(struct seq_file *s, loff_t *pos) | ||
| 155 | { | ||
| 156 | struct tracer_stat_session *session = s->private; | ||
| 157 | |||
| 158 | /* Prevent from tracer switch or stat_list modification */ | ||
| 159 | mutex_lock(&session->stat_mutex); | ||
| 160 | |||
| 161 | /* If we are in the beginning of the file, print the headers */ | ||
| 162 | if (!*pos && session->ts->stat_headers) | ||
| 163 | session->ts->stat_headers(s); | ||
| 164 | |||
| 165 | return seq_list_start(&session->stat_list, *pos); | ||
| 166 | } | ||
| 167 | |||
| 168 | static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) | ||
| 169 | { | ||
| 170 | struct tracer_stat_session *session = s->private; | ||
| 171 | |||
| 172 | return seq_list_next(p, &session->stat_list, pos); | ||
| 173 | } | ||
| 174 | |||
| 175 | static void stat_seq_stop(struct seq_file *s, void *p) | ||
| 176 | { | ||
| 177 | struct tracer_stat_session *session = s->private; | ||
| 178 | mutex_unlock(&session->stat_mutex); | ||
| 179 | } | ||
| 180 | |||
| 181 | static int stat_seq_show(struct seq_file *s, void *v) | ||
| 182 | { | ||
| 183 | struct tracer_stat_session *session = s->private; | ||
| 184 | struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list); | ||
| 185 | |||
| 186 | return session->ts->stat_show(s, l->stat); | ||
| 187 | } | ||
| 188 | |||
| 189 | static const struct seq_operations trace_stat_seq_ops = { | ||
| 190 | .start = stat_seq_start, | ||
| 191 | .next = stat_seq_next, | ||
| 192 | .stop = stat_seq_stop, | ||
| 193 | .show = stat_seq_show | ||
| 194 | }; | ||
| 195 | |||
| 196 | /* The session stat is refilled and resorted at each stat file opening */ | ||
| 197 | static int tracing_stat_open(struct inode *inode, struct file *file) | ||
| 198 | { | ||
| 199 | int ret; | ||
| 200 | |||
| 201 | struct tracer_stat_session *session = inode->i_private; | ||
| 202 | |||
| 203 | ret = seq_open(file, &trace_stat_seq_ops); | ||
| 204 | if (!ret) { | ||
| 205 | struct seq_file *m = file->private_data; | ||
| 206 | m->private = session; | ||
| 207 | ret = stat_seq_init(session); | ||
| 208 | } | ||
| 209 | |||
| 210 | return ret; | ||
| 211 | } | ||
| 212 | |||
| 213 | /* | ||
| 214 | * Avoid consuming memory with our now useless list. | ||
| 215 | */ | ||
| 216 | static int tracing_stat_release(struct inode *i, struct file *f) | ||
| 217 | { | ||
| 218 | struct tracer_stat_session *session = i->i_private; | ||
| 219 | |||
| 220 | mutex_lock(&session->stat_mutex); | ||
| 221 | reset_stat_session(session); | ||
| 222 | mutex_unlock(&session->stat_mutex); | ||
| 223 | |||
| 224 | return 0; | ||
| 225 | } | ||
| 226 | |||
| 227 | static const struct file_operations tracing_stat_fops = { | ||
| 228 | .open = tracing_stat_open, | ||
| 229 | .read = seq_read, | ||
| 230 | .llseek = seq_lseek, | ||
| 231 | .release = tracing_stat_release | ||
| 232 | }; | ||
| 233 | |||
| 234 | static int tracing_stat_init(void) | ||
| 235 | { | ||
| 236 | struct dentry *d_tracing; | ||
| 237 | |||
| 238 | d_tracing = tracing_init_dentry(); | ||
| 239 | |||
| 240 | stat_dir = debugfs_create_dir("trace_stat", d_tracing); | ||
| 241 | if (!stat_dir) | ||
| 242 | pr_warning("Could not create debugfs " | ||
| 243 | "'trace_stat' entry\n"); | ||
| 244 | return 0; | ||
| 245 | } | ||
| 246 | |||
| 247 | static int init_stat_file(struct tracer_stat_session *session) | ||
| 248 | { | ||
| 249 | if (!stat_dir && tracing_stat_init()) | ||
| 250 | return -ENODEV; | ||
| 251 | |||
| 252 | session->file = debugfs_create_file(session->ts->name, 0644, | ||
| 253 | stat_dir, | ||
| 254 | session, &tracing_stat_fops); | ||
| 255 | if (!session->file) | ||
| 256 | return -ENOMEM; | ||
| 257 | return 0; | ||
| 258 | } | ||
| 259 | |||
| 260 | int register_stat_tracer(struct tracer_stat *trace) | ||
| 261 | { | ||
| 262 | struct tracer_stat_session *session, *node, *tmp; | ||
| 263 | int ret; | ||
| 264 | |||
| 265 | if (!trace) | ||
| 266 | return -EINVAL; | ||
| 267 | |||
| 268 | if (!trace->stat_start || !trace->stat_next || !trace->stat_show) | ||
| 269 | return -EINVAL; | ||
| 270 | |||
| 271 | /* Already registered? */ | ||
| 272 | mutex_lock(&all_stat_sessions_mutex); | ||
| 273 | list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { | ||
| 274 | if (node->ts == trace) { | ||
| 275 | mutex_unlock(&all_stat_sessions_mutex); | ||
| 276 | return -EINVAL; | ||
| 277 | } | ||
| 278 | } | ||
| 279 | mutex_unlock(&all_stat_sessions_mutex); | ||
| 280 | |||
| 281 | /* Init the session */ | ||
| 282 | session = kmalloc(sizeof(struct tracer_stat_session), GFP_KERNEL); | ||
| 283 | if (!session) | ||
| 284 | return -ENOMEM; | ||
| 285 | |||
| 286 | session->ts = trace; | ||
| 287 | INIT_LIST_HEAD(&session->session_list); | ||
| 288 | INIT_LIST_HEAD(&session->stat_list); | ||
| 289 | mutex_init(&session->stat_mutex); | ||
| 290 | session->file = NULL; | ||
| 291 | |||
| 292 | ret = init_stat_file(session); | ||
| 293 | if (ret) { | ||
| 294 | destroy_session(session); | ||
| 295 | return ret; | ||
| 296 | } | ||
| 297 | |||
| 298 | /* Register */ | ||
| 299 | mutex_lock(&all_stat_sessions_mutex); | ||
| 300 | list_add_tail(&session->session_list, &all_stat_sessions); | ||
| 301 | mutex_unlock(&all_stat_sessions_mutex); | ||
| 302 | |||
| 303 | return 0; | ||
| 304 | } | ||
| 305 | |||
| 306 | void unregister_stat_tracer(struct tracer_stat *trace) | ||
| 307 | { | ||
| 308 | struct tracer_stat_session *node, *tmp; | ||
| 309 | |||
| 310 | mutex_lock(&all_stat_sessions_mutex); | ||
| 311 | list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { | ||
| 312 | if (node->ts == trace) { | ||
| 313 | list_del(&node->session_list); | ||
| 314 | destroy_session(node); | ||
| 315 | break; | ||
| 316 | } | ||
| 317 | } | ||
| 318 | mutex_unlock(&all_stat_sessions_mutex); | ||
| 319 | } | ||
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h new file mode 100644 index 000000000000..202274cf7f3d --- /dev/null +++ b/kernel/trace/trace_stat.h | |||
| @@ -0,0 +1,31 @@ | |||
| 1 | #ifndef __TRACE_STAT_H | ||
| 2 | #define __TRACE_STAT_H | ||
| 3 | |||
| 4 | #include <linux/seq_file.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * If you want to provide a stat file (one-shot statistics), fill | ||
| 8 | * an iterator with stat_start/stat_next and a stat_show callbacks. | ||
| 9 | * The others callbacks are optional. | ||
| 10 | */ | ||
| 11 | struct tracer_stat { | ||
| 12 | /* The name of your stat file */ | ||
| 13 | const char *name; | ||
| 14 | /* Iteration over statistic entries */ | ||
| 15 | void *(*stat_start)(void); | ||
| 16 | void *(*stat_next)(void *prev, int idx); | ||
| 17 | /* Compare two entries for stats sorting */ | ||
| 18 | int (*stat_cmp)(void *p1, void *p2); | ||
| 19 | /* Print a stat entry */ | ||
| 20 | int (*stat_show)(struct seq_file *s, void *p); | ||
| 21 | /* Print the headers of your stat entries */ | ||
| 22 | int (*stat_headers)(struct seq_file *s); | ||
| 23 | }; | ||
| 24 | |||
| 25 | /* | ||
| 26 | * Destroy or create a stat file | ||
| 27 | */ | ||
| 28 | extern int register_stat_tracer(struct tracer_stat *trace); | ||
| 29 | extern void unregister_stat_tracer(struct tracer_stat *trace); | ||
| 30 | |||
| 31 | #endif /* __TRACE_STAT_H */ | ||
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c new file mode 100644 index 000000000000..4664990fe9c5 --- /dev/null +++ b/kernel/trace/trace_workqueue.c | |||
| @@ -0,0 +1,281 @@ | |||
| 1 | /* | ||
| 2 | * Workqueue statistical tracer. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | |||
| 8 | |||
| 9 | #include <trace/workqueue.h> | ||
| 10 | #include <linux/list.h> | ||
| 11 | #include <linux/percpu.h> | ||
| 12 | #include "trace_stat.h" | ||
| 13 | #include "trace.h" | ||
| 14 | |||
| 15 | |||
| 16 | /* A cpu workqueue thread */ | ||
| 17 | struct cpu_workqueue_stats { | ||
| 18 | struct list_head list; | ||
| 19 | /* Useful to know if we print the cpu headers */ | ||
| 20 | bool first_entry; | ||
| 21 | int cpu; | ||
| 22 | pid_t pid; | ||
| 23 | /* Can be inserted from interrupt or user context, need to be atomic */ | ||
| 24 | atomic_t inserted; | ||
| 25 | /* | ||
| 26 | * Don't need to be atomic, works are serialized in a single workqueue thread | ||
| 27 | * on a single CPU. | ||
| 28 | */ | ||
| 29 | unsigned int executed; | ||
| 30 | }; | ||
| 31 | |||
| 32 | /* List of workqueue threads on one cpu */ | ||
| 33 | struct workqueue_global_stats { | ||
| 34 | struct list_head list; | ||
| 35 | spinlock_t lock; | ||
| 36 | }; | ||
| 37 | |||
| 38 | /* Don't need a global lock because allocated before the workqueues, and | ||
| 39 | * never freed. | ||
| 40 | */ | ||
| 41 | static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); | ||
| 42 | #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu)) | ||
| 43 | |||
| 44 | /* Insertion of a work */ | ||
| 45 | static void | ||
| 46 | probe_workqueue_insertion(struct task_struct *wq_thread, | ||
| 47 | struct work_struct *work) | ||
| 48 | { | ||
| 49 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | ||
| 50 | struct cpu_workqueue_stats *node, *next; | ||
| 51 | unsigned long flags; | ||
| 52 | |||
| 53 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 54 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | ||
| 55 | list) { | ||
| 56 | if (node->pid == wq_thread->pid) { | ||
| 57 | atomic_inc(&node->inserted); | ||
| 58 | goto found; | ||
| 59 | } | ||
| 60 | } | ||
| 61 | pr_debug("trace_workqueue: entry not found\n"); | ||
| 62 | found: | ||
| 63 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 64 | } | ||
| 65 | |||
| 66 | /* Execution of a work */ | ||
| 67 | static void | ||
| 68 | probe_workqueue_execution(struct task_struct *wq_thread, | ||
| 69 | struct work_struct *work) | ||
| 70 | { | ||
| 71 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | ||
| 72 | struct cpu_workqueue_stats *node, *next; | ||
| 73 | unsigned long flags; | ||
| 74 | |||
| 75 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 76 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | ||
| 77 | list) { | ||
| 78 | if (node->pid == wq_thread->pid) { | ||
| 79 | node->executed++; | ||
| 80 | goto found; | ||
| 81 | } | ||
| 82 | } | ||
| 83 | pr_debug("trace_workqueue: entry not found\n"); | ||
| 84 | found: | ||
| 85 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 86 | } | ||
| 87 | |||
| 88 | /* Creation of a cpu workqueue thread */ | ||
| 89 | static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) | ||
| 90 | { | ||
| 91 | struct cpu_workqueue_stats *cws; | ||
| 92 | unsigned long flags; | ||
| 93 | |||
| 94 | WARN_ON(cpu < 0 || cpu >= num_possible_cpus()); | ||
| 95 | |||
| 96 | /* Workqueues are sometimes created in atomic context */ | ||
| 97 | cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); | ||
| 98 | if (!cws) { | ||
| 99 | pr_warning("trace_workqueue: not enough memory\n"); | ||
| 100 | return; | ||
| 101 | } | ||
| 102 | tracing_record_cmdline(wq_thread); | ||
| 103 | |||
| 104 | INIT_LIST_HEAD(&cws->list); | ||
| 105 | cws->cpu = cpu; | ||
| 106 | |||
| 107 | cws->pid = wq_thread->pid; | ||
| 108 | |||
| 109 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 110 | if (list_empty(&workqueue_cpu_stat(cpu)->list)) | ||
| 111 | cws->first_entry = true; | ||
| 112 | list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); | ||
| 113 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 114 | } | ||
| 115 | |||
| 116 | /* Destruction of a cpu workqueue thread */ | ||
| 117 | static void probe_workqueue_destruction(struct task_struct *wq_thread) | ||
| 118 | { | ||
| 119 | /* Workqueue only execute on one cpu */ | ||
| 120 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | ||
| 121 | struct cpu_workqueue_stats *node, *next; | ||
| 122 | unsigned long flags; | ||
| 123 | |||
| 124 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 125 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | ||
| 126 | list) { | ||
| 127 | if (node->pid == wq_thread->pid) { | ||
| 128 | list_del(&node->list); | ||
| 129 | kfree(node); | ||
| 130 | goto found; | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | pr_debug("trace_workqueue: don't find workqueue to destroy\n"); | ||
| 135 | found: | ||
| 136 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 137 | |||
| 138 | } | ||
| 139 | |||
| 140 | static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) | ||
| 141 | { | ||
| 142 | unsigned long flags; | ||
| 143 | struct cpu_workqueue_stats *ret = NULL; | ||
| 144 | |||
| 145 | |||
| 146 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 147 | |||
| 148 | if (!list_empty(&workqueue_cpu_stat(cpu)->list)) | ||
| 149 | ret = list_entry(workqueue_cpu_stat(cpu)->list.next, | ||
| 150 | struct cpu_workqueue_stats, list); | ||
| 151 | |||
| 152 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 153 | |||
| 154 | return ret; | ||
| 155 | } | ||
| 156 | |||
| 157 | static void *workqueue_stat_start(void) | ||
| 158 | { | ||
| 159 | int cpu; | ||
| 160 | void *ret = NULL; | ||
| 161 | |||
| 162 | for_each_possible_cpu(cpu) { | ||
| 163 | ret = workqueue_stat_start_cpu(cpu); | ||
| 164 | if (ret) | ||
| 165 | return ret; | ||
| 166 | } | ||
| 167 | return NULL; | ||
| 168 | } | ||
| 169 | |||
| 170 | static void *workqueue_stat_next(void *prev, int idx) | ||
| 171 | { | ||
| 172 | struct cpu_workqueue_stats *prev_cws = prev; | ||
| 173 | int cpu = prev_cws->cpu; | ||
| 174 | unsigned long flags; | ||
| 175 | void *ret = NULL; | ||
| 176 | |||
| 177 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 178 | if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { | ||
| 179 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 180 | for (++cpu ; cpu < num_possible_cpus(); cpu++) { | ||
| 181 | ret = workqueue_stat_start_cpu(cpu); | ||
| 182 | if (ret) | ||
| 183 | return ret; | ||
| 184 | } | ||
| 185 | return NULL; | ||
| 186 | } | ||
| 187 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 188 | |||
| 189 | return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, | ||
| 190 | list); | ||
| 191 | } | ||
| 192 | |||
| 193 | static int workqueue_stat_show(struct seq_file *s, void *p) | ||
| 194 | { | ||
| 195 | struct cpu_workqueue_stats *cws = p; | ||
| 196 | unsigned long flags; | ||
| 197 | int cpu = cws->cpu; | ||
| 198 | |||
| 199 | seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, | ||
| 200 | atomic_read(&cws->inserted), | ||
| 201 | cws->executed, | ||
| 202 | trace_find_cmdline(cws->pid)); | ||
| 203 | |||
| 204 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 205 | if (&cws->list == workqueue_cpu_stat(cpu)->list.next) | ||
| 206 | seq_printf(s, "\n"); | ||
| 207 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 208 | |||
| 209 | return 0; | ||
| 210 | } | ||
| 211 | |||
| 212 | static int workqueue_stat_headers(struct seq_file *s) | ||
| 213 | { | ||
| 214 | seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); | ||
| 215 | seq_printf(s, "# | | | |\n\n"); | ||
| 216 | return 0; | ||
| 217 | } | ||
| 218 | |||
| 219 | struct tracer_stat workqueue_stats __read_mostly = { | ||
| 220 | .name = "workqueues", | ||
| 221 | .stat_start = workqueue_stat_start, | ||
| 222 | .stat_next = workqueue_stat_next, | ||
| 223 | .stat_show = workqueue_stat_show, | ||
| 224 | .stat_headers = workqueue_stat_headers | ||
| 225 | }; | ||
| 226 | |||
| 227 | |||
| 228 | int __init stat_workqueue_init(void) | ||
| 229 | { | ||
| 230 | if (register_stat_tracer(&workqueue_stats)) { | ||
| 231 | pr_warning("Unable to register workqueue stat tracer\n"); | ||
| 232 | return 1; | ||
| 233 | } | ||
| 234 | |||
| 235 | return 0; | ||
| 236 | } | ||
| 237 | fs_initcall(stat_workqueue_init); | ||
| 238 | |||
| 239 | /* | ||
| 240 | * Workqueues are created very early, just after pre-smp initcalls. | ||
| 241 | * So we must register our tracepoints at this stage. | ||
| 242 | */ | ||
| 243 | int __init trace_workqueue_early_init(void) | ||
| 244 | { | ||
| 245 | int ret, cpu; | ||
| 246 | |||
| 247 | ret = register_trace_workqueue_insertion(probe_workqueue_insertion); | ||
| 248 | if (ret) | ||
| 249 | goto out; | ||
| 250 | |||
| 251 | ret = register_trace_workqueue_execution(probe_workqueue_execution); | ||
| 252 | if (ret) | ||
| 253 | goto no_insertion; | ||
| 254 | |||
| 255 | ret = register_trace_workqueue_creation(probe_workqueue_creation); | ||
| 256 | if (ret) | ||
| 257 | goto no_execution; | ||
| 258 | |||
| 259 | ret = register_trace_workqueue_destruction(probe_workqueue_destruction); | ||
| 260 | if (ret) | ||
| 261 | goto no_creation; | ||
| 262 | |||
| 263 | for_each_possible_cpu(cpu) { | ||
| 264 | spin_lock_init(&workqueue_cpu_stat(cpu)->lock); | ||
| 265 | INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); | ||
| 266 | } | ||
| 267 | |||
| 268 | return 0; | ||
| 269 | |||
| 270 | no_creation: | ||
| 271 | unregister_trace_workqueue_creation(probe_workqueue_creation); | ||
| 272 | no_execution: | ||
| 273 | unregister_trace_workqueue_execution(probe_workqueue_execution); | ||
| 274 | no_insertion: | ||
| 275 | unregister_trace_workqueue_insertion(probe_workqueue_insertion); | ||
| 276 | out: | ||
| 277 | pr_warning("trace_workqueue: unable to trace workqueues\n"); | ||
| 278 | |||
| 279 | return 1; | ||
| 280 | } | ||
| 281 | early_initcall(trace_workqueue_early_init); | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1f0c509b40d3..e53ee18ef431 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
| 34 | #include <linux/debug_locks.h> | 34 | #include <linux/debug_locks.h> |
| 35 | #include <linux/lockdep.h> | 35 | #include <linux/lockdep.h> |
| 36 | #include <trace/workqueue.h> | ||
| 36 | 37 | ||
| 37 | /* | 38 | /* |
| 38 | * The per-CPU workqueue (if single thread, we always use the first | 39 | * The per-CPU workqueue (if single thread, we always use the first |
| @@ -125,9 +126,13 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | |||
| 125 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 126 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); |
| 126 | } | 127 | } |
| 127 | 128 | ||
| 129 | DEFINE_TRACE(workqueue_insertion); | ||
| 130 | |||
| 128 | static void insert_work(struct cpu_workqueue_struct *cwq, | 131 | static void insert_work(struct cpu_workqueue_struct *cwq, |
| 129 | struct work_struct *work, struct list_head *head) | 132 | struct work_struct *work, struct list_head *head) |
| 130 | { | 133 | { |
| 134 | trace_workqueue_insertion(cwq->thread, work); | ||
| 135 | |||
| 131 | set_wq_data(work, cwq); | 136 | set_wq_data(work, cwq); |
| 132 | /* | 137 | /* |
| 133 | * Ensure that we get the right work->data if we see the | 138 | * Ensure that we get the right work->data if we see the |
| @@ -259,6 +264,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 259 | } | 264 | } |
| 260 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 265 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); |
| 261 | 266 | ||
| 267 | DEFINE_TRACE(workqueue_execution); | ||
| 268 | |||
| 262 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 269 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
| 263 | { | 270 | { |
| 264 | spin_lock_irq(&cwq->lock); | 271 | spin_lock_irq(&cwq->lock); |
| @@ -284,7 +291,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 284 | */ | 291 | */ |
| 285 | struct lockdep_map lockdep_map = work->lockdep_map; | 292 | struct lockdep_map lockdep_map = work->lockdep_map; |
| 286 | #endif | 293 | #endif |
| 287 | 294 | trace_workqueue_execution(cwq->thread, work); | |
| 288 | cwq->current_work = work; | 295 | cwq->current_work = work; |
| 289 | list_del_init(cwq->worklist.next); | 296 | list_del_init(cwq->worklist.next); |
| 290 | spin_unlock_irq(&cwq->lock); | 297 | spin_unlock_irq(&cwq->lock); |
| @@ -765,6 +772,8 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | |||
| 765 | return cwq; | 772 | return cwq; |
| 766 | } | 773 | } |
| 767 | 774 | ||
| 775 | DEFINE_TRACE(workqueue_creation); | ||
| 776 | |||
| 768 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 777 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) |
| 769 | { | 778 | { |
| 770 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 779 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
| @@ -787,6 +796,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
| 787 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 796 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
| 788 | cwq->thread = p; | 797 | cwq->thread = p; |
| 789 | 798 | ||
| 799 | trace_workqueue_creation(cwq->thread, cpu); | ||
| 800 | |||
| 790 | return 0; | 801 | return 0; |
| 791 | } | 802 | } |
| 792 | 803 | ||
| @@ -868,6 +879,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
| 868 | } | 879 | } |
| 869 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 880 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
| 870 | 881 | ||
| 882 | DEFINE_TRACE(workqueue_destruction); | ||
| 883 | |||
| 871 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 884 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
| 872 | { | 885 | { |
| 873 | /* | 886 | /* |
| @@ -891,6 +904,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | |||
| 891 | * checks list_empty(), and a "normal" queue_work() can't use | 904 | * checks list_empty(), and a "normal" queue_work() can't use |
| 892 | * a dead CPU. | 905 | * a dead CPU. |
| 893 | */ | 906 | */ |
| 907 | trace_workqueue_destruction(cwq->thread); | ||
| 894 | kthread_stop(cwq->thread); | 908 | kthread_stop(cwq->thread); |
| 895 | cwq->thread = NULL; | 909 | cwq->thread = NULL; |
| 896 | } | 910 | } |
