diff options
| author | Ingo Molnar <mingo@kernel.org> | 2013-02-03 05:14:06 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-02-03 05:14:06 -0500 |
| commit | f7355a5e7c722452e2cd80d6b83acb2f4423c4a2 (patch) | |
| tree | 2ee172a5607de2baf25b8e9b99acd181a34676a6 | |
| parent | 9c4c5fd9e6207f04dbf59c5a9699fded144542e6 (diff) | |
| parent | d840f718d28715a9833c1a8f46c2493ff3fd219b (diff) | |
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
Pull tracing updated from Steve Rostedt.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | Documentation/trace/ftrace.txt | 83 | ||||
| -rw-r--r-- | include/linux/ftrace_event.h | 3 | ||||
| -rw-r--r-- | include/linux/ring_buffer.h | 1 | ||||
| -rw-r--r-- | kernel/trace/Kconfig | 10 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 18 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 222 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_clock.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 8 | ||||
| -rw-r--r-- | samples/Kconfig | 6 | ||||
| -rw-r--r-- | samples/Makefile | 2 | ||||
| -rw-r--r-- | samples/tracepoints/Makefile | 6 | ||||
| -rw-r--r-- | samples/tracepoints/tp-samples-trace.h | 11 | ||||
| -rw-r--r-- | samples/tracepoints/tracepoint-probe-sample.c | 57 | ||||
| -rw-r--r-- | samples/tracepoints/tracepoint-probe-sample2.c | 44 | ||||
| -rw-r--r-- | samples/tracepoints/tracepoint-sample.c | 57 |
16 files changed, 288 insertions, 243 deletions
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 6f51fed45f2d..53d6a3c51d87 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt | |||
| @@ -1842,6 +1842,89 @@ an error. | |||
| 1842 | # cat buffer_size_kb | 1842 | # cat buffer_size_kb |
| 1843 | 85 | 1843 | 85 |
| 1844 | 1844 | ||
| 1845 | Snapshot | ||
| 1846 | -------- | ||
| 1847 | CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature | ||
| 1848 | available to all non latency tracers. (Latency tracers which | ||
| 1849 | record max latency, such as "irqsoff" or "wakeup", can't use | ||
| 1850 | this feature, since those are already using the snapshot | ||
| 1851 | mechanism internally.) | ||
| 1852 | |||
| 1853 | Snapshot preserves a current trace buffer at a particular point | ||
| 1854 | in time without stopping tracing. Ftrace swaps the current | ||
| 1855 | buffer with a spare buffer, and tracing continues in the new | ||
| 1856 | current (=previous spare) buffer. | ||
| 1857 | |||
| 1858 | The following debugfs files in "tracing" are related to this | ||
| 1859 | feature: | ||
| 1860 | |||
| 1861 | snapshot: | ||
| 1862 | |||
| 1863 | This is used to take a snapshot and to read the output | ||
| 1864 | of the snapshot. Echo 1 into this file to allocate a | ||
| 1865 | spare buffer and to take a snapshot (swap), then read | ||
| 1866 | the snapshot from this file in the same format as | ||
| 1867 | "trace" (described above in the section "The File | ||
| 1868 | System"). Both reads snapshot and tracing are executable | ||
| 1869 | in parallel. When the spare buffer is allocated, echoing | ||
| 1870 | 0 frees it, and echoing else (positive) values clear the | ||
| 1871 | snapshot contents. | ||
| 1872 | More details are shown in the table below. | ||
| 1873 | |||
| 1874 | status\input | 0 | 1 | else | | ||
| 1875 | --------------+------------+------------+------------+ | ||
| 1876 | not allocated |(do nothing)| alloc+swap | EINVAL | | ||
| 1877 | --------------+------------+------------+------------+ | ||
| 1878 | allocated | free | swap | clear | | ||
| 1879 | --------------+------------+------------+------------+ | ||
| 1880 | |||
| 1881 | Here is an example of using the snapshot feature. | ||
| 1882 | |||
| 1883 | # echo 1 > events/sched/enable | ||
| 1884 | # echo 1 > snapshot | ||
| 1885 | # cat snapshot | ||
| 1886 | # tracer: nop | ||
| 1887 | # | ||
| 1888 | # entries-in-buffer/entries-written: 71/71 #P:8 | ||
| 1889 | # | ||
| 1890 | # _-----=> irqs-off | ||
| 1891 | # / _----=> need-resched | ||
| 1892 | # | / _---=> hardirq/softirq | ||
| 1893 | # || / _--=> preempt-depth | ||
| 1894 | # ||| / delay | ||
| 1895 | # TASK-PID CPU# |||| TIMESTAMP FUNCTION | ||
| 1896 | # | | | |||| | | | ||
| 1897 | <idle>-0 [005] d... 2440.603828: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2242 next_prio=120 | ||
| 1898 | sleep-2242 [005] d... 2440.603846: sched_switch: prev_comm=snapshot-test-2 prev_pid=2242 prev_prio=120 prev_state=R ==> next_comm=kworker/5:1 next_pid=60 next_prio=120 | ||
| 1899 | [...] | ||
| 1900 | <idle>-0 [002] d... 2440.707230: sched_switch: prev_comm=swapper/2 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2229 next_prio=120 | ||
| 1901 | |||
| 1902 | # cat trace | ||
| 1903 | # tracer: nop | ||
| 1904 | # | ||
| 1905 | # entries-in-buffer/entries-written: 77/77 #P:8 | ||
| 1906 | # | ||
| 1907 | # _-----=> irqs-off | ||
| 1908 | # / _----=> need-resched | ||
| 1909 | # | / _---=> hardirq/softirq | ||
| 1910 | # || / _--=> preempt-depth | ||
| 1911 | # ||| / delay | ||
| 1912 | # TASK-PID CPU# |||| TIMESTAMP FUNCTION | ||
| 1913 | # | | | |||| | | | ||
| 1914 | <idle>-0 [007] d... 2440.707395: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2243 next_prio=120 | ||
| 1915 | snapshot-test-2-2229 [002] d... 2440.707438: sched_switch: prev_comm=snapshot-test-2 prev_pid=2229 prev_prio=120 prev_state=S ==> next_comm=swapper/2 next_pid=0 next_prio=120 | ||
| 1916 | [...] | ||
| 1917 | |||
| 1918 | |||
| 1919 | If you try to use this snapshot feature when current tracer is | ||
| 1920 | one of the latency tracers, you will get the following results. | ||
| 1921 | |||
| 1922 | # echo wakeup > current_tracer | ||
| 1923 | # echo 1 > snapshot | ||
| 1924 | bash: echo: write error: Device or resource busy | ||
| 1925 | # cat snapshot | ||
| 1926 | cat: snapshot: Device or resource busy | ||
| 1927 | |||
| 1845 | ----------- | 1928 | ----------- |
| 1846 | 1929 | ||
| 1847 | More details can be found in the source code, in the | 1930 | More details can be found in the source code, in the |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 6f8d0b77006b..13a54d0bdfa8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -83,6 +83,9 @@ struct trace_iterator { | |||
| 83 | long idx; | 83 | long idx; |
| 84 | 84 | ||
| 85 | cpumask_var_t started; | 85 | cpumask_var_t started; |
| 86 | |||
| 87 | /* it's true when current open file is snapshot */ | ||
| 88 | bool snapshot; | ||
| 86 | }; | 89 | }; |
| 87 | 90 | ||
| 88 | enum trace_iter_flags { | 91 | enum trace_iter_flags { |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 519777e3fa01..1342e69542f3 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
| @@ -167,6 +167,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | |||
| 167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
| 168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); | 168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); |
| 169 | unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); | 169 | unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); |
| 170 | unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); | ||
| 170 | 171 | ||
| 171 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); | 172 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
| 172 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 173 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index cdc9d284d24e..36567564e221 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -253,6 +253,16 @@ config FTRACE_SYSCALLS | |||
| 253 | help | 253 | help |
| 254 | Basic tracer to catch the syscall entry and exit events. | 254 | Basic tracer to catch the syscall entry and exit events. |
| 255 | 255 | ||
| 256 | config TRACER_SNAPSHOT | ||
| 257 | bool "Create a snapshot trace buffer" | ||
| 258 | select TRACER_MAX_TRACE | ||
| 259 | help | ||
| 260 | Allow tracing users to take snapshot of the current buffer using the | ||
| 261 | ftrace interface, e.g.: | ||
| 262 | |||
| 263 | echo 1 > /sys/kernel/debug/tracing/snapshot | ||
| 264 | cat snapshot | ||
| 265 | |||
| 256 | config TRACE_BRANCH_PROFILING | 266 | config TRACE_BRANCH_PROFILING |
| 257 | bool | 267 | bool |
| 258 | select GENERIC_TRACER | 268 | select GENERIC_TRACER |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 13950d9027cb..7244acde77b0 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -3103,6 +3103,24 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) | |||
| 3103 | EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); | 3103 | EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); |
| 3104 | 3104 | ||
| 3105 | /** | 3105 | /** |
| 3106 | * ring_buffer_read_events_cpu - get the number of events successfully read | ||
| 3107 | * @buffer: The ring buffer | ||
| 3108 | * @cpu: The per CPU buffer to get the number of events read | ||
| 3109 | */ | ||
| 3110 | unsigned long | ||
| 3111 | ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) | ||
| 3112 | { | ||
| 3113 | struct ring_buffer_per_cpu *cpu_buffer; | ||
| 3114 | |||
| 3115 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | ||
| 3116 | return 0; | ||
| 3117 | |||
| 3118 | cpu_buffer = buffer->buffers[cpu]; | ||
| 3119 | return cpu_buffer->read; | ||
| 3120 | } | ||
| 3121 | EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); | ||
| 3122 | |||
| 3123 | /** | ||
| 3106 | * ring_buffer_entries - get the number of entries in a buffer | 3124 | * ring_buffer_entries - get the number of entries in a buffer |
| 3107 | * @buffer: The ring buffer | 3125 | * @buffer: The ring buffer |
| 3108 | * | 3126 | * |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d2a658349ca1..5d520b7bb4c5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -249,7 +249,7 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | |||
| 249 | static struct tracer *trace_types __read_mostly; | 249 | static struct tracer *trace_types __read_mostly; |
| 250 | 250 | ||
| 251 | /* current_trace points to the tracer that is currently active */ | 251 | /* current_trace points to the tracer that is currently active */ |
| 252 | static struct tracer *current_trace __read_mostly; | 252 | static struct tracer *current_trace __read_mostly = &nop_trace; |
| 253 | 253 | ||
| 254 | /* | 254 | /* |
| 255 | * trace_types_lock is used to protect the trace_types list. | 255 | * trace_types_lock is used to protect the trace_types list. |
| @@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 710 | 710 | ||
| 711 | WARN_ON_ONCE(!irqs_disabled()); | 711 | WARN_ON_ONCE(!irqs_disabled()); |
| 712 | 712 | ||
| 713 | /* If we disabled the tracer, stop now */ | 713 | if (!current_trace->allocated_snapshot) { |
| 714 | if (current_trace == &nop_trace) | 714 | /* Only the nop tracer should hit this when disabling */ |
| 715 | return; | 715 | WARN_ON_ONCE(current_trace != &nop_trace); |
| 716 | |||
| 717 | if (WARN_ON_ONCE(!current_trace->use_max_tr)) | ||
| 718 | return; | 716 | return; |
| 717 | } | ||
| 719 | 718 | ||
| 720 | arch_spin_lock(&ftrace_max_lock); | 719 | arch_spin_lock(&ftrace_max_lock); |
| 721 | 720 | ||
| @@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 743 | return; | 742 | return; |
| 744 | 743 | ||
| 745 | WARN_ON_ONCE(!irqs_disabled()); | 744 | WARN_ON_ONCE(!irqs_disabled()); |
| 746 | if (!current_trace->use_max_tr) { | 745 | if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) |
| 747 | WARN_ON_ONCE(1); | ||
| 748 | return; | 746 | return; |
| 749 | } | ||
| 750 | 747 | ||
| 751 | arch_spin_lock(&ftrace_max_lock); | 748 | arch_spin_lock(&ftrace_max_lock); |
| 752 | 749 | ||
| @@ -866,10 +863,13 @@ int register_tracer(struct tracer *type) | |||
| 866 | 863 | ||
| 867 | current_trace = type; | 864 | current_trace = type; |
| 868 | 865 | ||
| 869 | /* If we expanded the buffers, make sure the max is expanded too */ | 866 | if (type->use_max_tr) { |
| 870 | if (ring_buffer_expanded && type->use_max_tr) | 867 | /* If we expanded the buffers, make sure the max is expanded too */ |
| 871 | ring_buffer_resize(max_tr.buffer, trace_buf_size, | 868 | if (ring_buffer_expanded) |
| 872 | RING_BUFFER_ALL_CPUS); | 869 | ring_buffer_resize(max_tr.buffer, trace_buf_size, |
| 870 | RING_BUFFER_ALL_CPUS); | ||
| 871 | type->allocated_snapshot = true; | ||
| 872 | } | ||
| 873 | 873 | ||
| 874 | /* the test is responsible for initializing and enabling */ | 874 | /* the test is responsible for initializing and enabling */ |
| 875 | pr_info("Testing tracer %s: ", type->name); | 875 | pr_info("Testing tracer %s: ", type->name); |
| @@ -885,10 +885,14 @@ int register_tracer(struct tracer *type) | |||
| 885 | /* Only reset on passing, to avoid touching corrupted buffers */ | 885 | /* Only reset on passing, to avoid touching corrupted buffers */ |
| 886 | tracing_reset_online_cpus(tr); | 886 | tracing_reset_online_cpus(tr); |
| 887 | 887 | ||
| 888 | /* Shrink the max buffer again */ | 888 | if (type->use_max_tr) { |
| 889 | if (ring_buffer_expanded && type->use_max_tr) | 889 | type->allocated_snapshot = false; |
| 890 | ring_buffer_resize(max_tr.buffer, 1, | 890 | |
| 891 | RING_BUFFER_ALL_CPUS); | 891 | /* Shrink the max buffer again */ |
| 892 | if (ring_buffer_expanded) | ||
| 893 | ring_buffer_resize(max_tr.buffer, 1, | ||
| 894 | RING_BUFFER_ALL_CPUS); | ||
| 895 | } | ||
| 892 | 896 | ||
| 893 | printk(KERN_CONT "PASSED\n"); | 897 | printk(KERN_CONT "PASSED\n"); |
| 894 | } | 898 | } |
| @@ -1344,7 +1348,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
| 1344 | */ | 1348 | */ |
| 1345 | preempt_disable_notrace(); | 1349 | preempt_disable_notrace(); |
| 1346 | 1350 | ||
| 1347 | use_stack = ++__get_cpu_var(ftrace_stack_reserve); | 1351 | use_stack = __this_cpu_inc_return(ftrace_stack_reserve); |
| 1348 | /* | 1352 | /* |
| 1349 | * We don't need any atomic variables, just a barrier. | 1353 | * We don't need any atomic variables, just a barrier. |
| 1350 | * If an interrupt comes in, we don't care, because it would | 1354 | * If an interrupt comes in, we don't care, because it would |
| @@ -1398,7 +1402,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
| 1398 | out: | 1402 | out: |
| 1399 | /* Again, don't let gcc optimize things here */ | 1403 | /* Again, don't let gcc optimize things here */ |
| 1400 | barrier(); | 1404 | barrier(); |
| 1401 | __get_cpu_var(ftrace_stack_reserve)--; | 1405 | __this_cpu_dec(ftrace_stack_reserve); |
| 1402 | preempt_enable_notrace(); | 1406 | preempt_enable_notrace(); |
| 1403 | 1407 | ||
| 1404 | } | 1408 | } |
| @@ -1948,21 +1952,27 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
| 1948 | static void *s_start(struct seq_file *m, loff_t *pos) | 1952 | static void *s_start(struct seq_file *m, loff_t *pos) |
| 1949 | { | 1953 | { |
| 1950 | struct trace_iterator *iter = m->private; | 1954 | struct trace_iterator *iter = m->private; |
| 1951 | static struct tracer *old_tracer; | ||
| 1952 | int cpu_file = iter->cpu_file; | 1955 | int cpu_file = iter->cpu_file; |
| 1953 | void *p = NULL; | 1956 | void *p = NULL; |
| 1954 | loff_t l = 0; | 1957 | loff_t l = 0; |
| 1955 | int cpu; | 1958 | int cpu; |
| 1956 | 1959 | ||
| 1957 | /* copy the tracer to avoid using a global lock all around */ | 1960 | /* |
| 1961 | * copy the tracer to avoid using a global lock all around. | ||
| 1962 | * iter->trace is a copy of current_trace, the pointer to the | ||
| 1963 | * name may be used instead of a strcmp(), as iter->trace->name | ||
| 1964 | * will point to the same string as current_trace->name. | ||
| 1965 | */ | ||
| 1958 | mutex_lock(&trace_types_lock); | 1966 | mutex_lock(&trace_types_lock); |
| 1959 | if (unlikely(old_tracer != current_trace && current_trace)) { | 1967 | if (unlikely(current_trace && iter->trace->name != current_trace->name)) |
| 1960 | old_tracer = current_trace; | ||
| 1961 | *iter->trace = *current_trace; | 1968 | *iter->trace = *current_trace; |
| 1962 | } | ||
| 1963 | mutex_unlock(&trace_types_lock); | 1969 | mutex_unlock(&trace_types_lock); |
| 1964 | 1970 | ||
| 1965 | atomic_inc(&trace_record_cmdline_disabled); | 1971 | if (iter->snapshot && iter->trace->use_max_tr) |
| 1972 | return ERR_PTR(-EBUSY); | ||
| 1973 | |||
| 1974 | if (!iter->snapshot) | ||
| 1975 | atomic_inc(&trace_record_cmdline_disabled); | ||
| 1966 | 1976 | ||
| 1967 | if (*pos != iter->pos) { | 1977 | if (*pos != iter->pos) { |
| 1968 | iter->ent = NULL; | 1978 | iter->ent = NULL; |
| @@ -2001,7 +2011,11 @@ static void s_stop(struct seq_file *m, void *p) | |||
| 2001 | { | 2011 | { |
| 2002 | struct trace_iterator *iter = m->private; | 2012 | struct trace_iterator *iter = m->private; |
| 2003 | 2013 | ||
| 2004 | atomic_dec(&trace_record_cmdline_disabled); | 2014 | if (iter->snapshot && iter->trace->use_max_tr) |
| 2015 | return; | ||
| 2016 | |||
| 2017 | if (!iter->snapshot) | ||
| 2018 | atomic_dec(&trace_record_cmdline_disabled); | ||
| 2005 | trace_access_unlock(iter->cpu_file); | 2019 | trace_access_unlock(iter->cpu_file); |
| 2006 | trace_event_read_unlock(); | 2020 | trace_event_read_unlock(); |
| 2007 | } | 2021 | } |
| @@ -2086,8 +2100,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
| 2086 | unsigned long total; | 2100 | unsigned long total; |
| 2087 | const char *name = "preemption"; | 2101 | const char *name = "preemption"; |
| 2088 | 2102 | ||
| 2089 | if (type) | 2103 | name = type->name; |
| 2090 | name = type->name; | ||
| 2091 | 2104 | ||
| 2092 | get_total_entries(tr, &total, &entries); | 2105 | get_total_entries(tr, &total, &entries); |
| 2093 | 2106 | ||
| @@ -2436,7 +2449,7 @@ static const struct seq_operations tracer_seq_ops = { | |||
| 2436 | }; | 2449 | }; |
| 2437 | 2450 | ||
| 2438 | static struct trace_iterator * | 2451 | static struct trace_iterator * |
| 2439 | __tracing_open(struct inode *inode, struct file *file) | 2452 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) |
| 2440 | { | 2453 | { |
| 2441 | long cpu_file = (long) inode->i_private; | 2454 | long cpu_file = (long) inode->i_private; |
| 2442 | struct trace_iterator *iter; | 2455 | struct trace_iterator *iter; |
| @@ -2463,16 +2476,16 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 2463 | if (!iter->trace) | 2476 | if (!iter->trace) |
| 2464 | goto fail; | 2477 | goto fail; |
| 2465 | 2478 | ||
| 2466 | if (current_trace) | 2479 | *iter->trace = *current_trace; |
| 2467 | *iter->trace = *current_trace; | ||
| 2468 | 2480 | ||
| 2469 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) | 2481 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
| 2470 | goto fail; | 2482 | goto fail; |
| 2471 | 2483 | ||
| 2472 | if (current_trace && current_trace->print_max) | 2484 | if (current_trace->print_max || snapshot) |
| 2473 | iter->tr = &max_tr; | 2485 | iter->tr = &max_tr; |
| 2474 | else | 2486 | else |
| 2475 | iter->tr = &global_trace; | 2487 | iter->tr = &global_trace; |
| 2488 | iter->snapshot = snapshot; | ||
| 2476 | iter->pos = -1; | 2489 | iter->pos = -1; |
| 2477 | mutex_init(&iter->mutex); | 2490 | mutex_init(&iter->mutex); |
| 2478 | iter->cpu_file = cpu_file; | 2491 | iter->cpu_file = cpu_file; |
| @@ -2489,8 +2502,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 2489 | if (trace_clocks[trace_clock_id].in_ns) | 2502 | if (trace_clocks[trace_clock_id].in_ns) |
| 2490 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 2503 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
| 2491 | 2504 | ||
| 2492 | /* stop the trace while dumping */ | 2505 | /* stop the trace while dumping if we are not opening "snapshot" */ |
| 2493 | tracing_stop(); | 2506 | if (!iter->snapshot) |
| 2507 | tracing_stop(); | ||
| 2494 | 2508 | ||
| 2495 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2509 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
| 2496 | for_each_tracing_cpu(cpu) { | 2510 | for_each_tracing_cpu(cpu) { |
| @@ -2553,8 +2567,9 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
| 2553 | if (iter->trace && iter->trace->close) | 2567 | if (iter->trace && iter->trace->close) |
| 2554 | iter->trace->close(iter); | 2568 | iter->trace->close(iter); |
| 2555 | 2569 | ||
| 2556 | /* reenable tracing if it was previously enabled */ | 2570 | if (!iter->snapshot) |
| 2557 | tracing_start(); | 2571 | /* reenable tracing if it was previously enabled */ |
| 2572 | tracing_start(); | ||
| 2558 | mutex_unlock(&trace_types_lock); | 2573 | mutex_unlock(&trace_types_lock); |
| 2559 | 2574 | ||
| 2560 | mutex_destroy(&iter->mutex); | 2575 | mutex_destroy(&iter->mutex); |
| @@ -2582,7 +2597,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
| 2582 | } | 2597 | } |
| 2583 | 2598 | ||
| 2584 | if (file->f_mode & FMODE_READ) { | 2599 | if (file->f_mode & FMODE_READ) { |
| 2585 | iter = __tracing_open(inode, file); | 2600 | iter = __tracing_open(inode, file, false); |
| 2586 | if (IS_ERR(iter)) | 2601 | if (IS_ERR(iter)) |
| 2587 | ret = PTR_ERR(iter); | 2602 | ret = PTR_ERR(iter); |
| 2588 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 2603 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) |
| @@ -3020,10 +3035,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
| 3020 | int r; | 3035 | int r; |
| 3021 | 3036 | ||
| 3022 | mutex_lock(&trace_types_lock); | 3037 | mutex_lock(&trace_types_lock); |
| 3023 | if (current_trace) | 3038 | r = sprintf(buf, "%s\n", current_trace->name); |
| 3024 | r = sprintf(buf, "%s\n", current_trace->name); | ||
| 3025 | else | ||
| 3026 | r = sprintf(buf, "\n"); | ||
| 3027 | mutex_unlock(&trace_types_lock); | 3039 | mutex_unlock(&trace_types_lock); |
| 3028 | 3040 | ||
| 3029 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3041 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
| @@ -3214,10 +3226,10 @@ static int tracing_set_tracer(const char *buf) | |||
| 3214 | goto out; | 3226 | goto out; |
| 3215 | 3227 | ||
| 3216 | trace_branch_disable(); | 3228 | trace_branch_disable(); |
| 3217 | if (current_trace && current_trace->reset) | 3229 | if (current_trace->reset) |
| 3218 | current_trace->reset(tr); | 3230 | current_trace->reset(tr); |
| 3219 | 3231 | ||
| 3220 | had_max_tr = current_trace && current_trace->use_max_tr; | 3232 | had_max_tr = current_trace->allocated_snapshot; |
| 3221 | current_trace = &nop_trace; | 3233 | current_trace = &nop_trace; |
| 3222 | 3234 | ||
| 3223 | if (had_max_tr && !t->use_max_tr) { | 3235 | if (had_max_tr && !t->use_max_tr) { |
| @@ -3236,6 +3248,8 @@ static int tracing_set_tracer(const char *buf) | |||
| 3236 | */ | 3248 | */ |
| 3237 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); | 3249 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); |
| 3238 | set_buffer_entries(&max_tr, 1); | 3250 | set_buffer_entries(&max_tr, 1); |
| 3251 | tracing_reset_online_cpus(&max_tr); | ||
| 3252 | current_trace->allocated_snapshot = false; | ||
| 3239 | } | 3253 | } |
| 3240 | destroy_trace_option_files(topts); | 3254 | destroy_trace_option_files(topts); |
| 3241 | 3255 | ||
| @@ -3246,6 +3260,7 @@ static int tracing_set_tracer(const char *buf) | |||
| 3246 | RING_BUFFER_ALL_CPUS); | 3260 | RING_BUFFER_ALL_CPUS); |
| 3247 | if (ret < 0) | 3261 | if (ret < 0) |
| 3248 | goto out; | 3262 | goto out; |
| 3263 | t->allocated_snapshot = true; | ||
| 3249 | } | 3264 | } |
| 3250 | 3265 | ||
| 3251 | if (t->init) { | 3266 | if (t->init) { |
| @@ -3353,8 +3368,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
| 3353 | ret = -ENOMEM; | 3368 | ret = -ENOMEM; |
| 3354 | goto fail; | 3369 | goto fail; |
| 3355 | } | 3370 | } |
| 3356 | if (current_trace) | 3371 | *iter->trace = *current_trace; |
| 3357 | *iter->trace = *current_trace; | ||
| 3358 | 3372 | ||
| 3359 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 3373 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { |
| 3360 | ret = -ENOMEM; | 3374 | ret = -ENOMEM; |
| @@ -3494,7 +3508,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
| 3494 | size_t cnt, loff_t *ppos) | 3508 | size_t cnt, loff_t *ppos) |
| 3495 | { | 3509 | { |
| 3496 | struct trace_iterator *iter = filp->private_data; | 3510 | struct trace_iterator *iter = filp->private_data; |
| 3497 | static struct tracer *old_tracer; | ||
| 3498 | ssize_t sret; | 3511 | ssize_t sret; |
| 3499 | 3512 | ||
| 3500 | /* return any leftover data */ | 3513 | /* return any leftover data */ |
| @@ -3506,10 +3519,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
| 3506 | 3519 | ||
| 3507 | /* copy the tracer to avoid using a global lock all around */ | 3520 | /* copy the tracer to avoid using a global lock all around */ |
| 3508 | mutex_lock(&trace_types_lock); | 3521 | mutex_lock(&trace_types_lock); |
| 3509 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3522 | if (unlikely(iter->trace->name != current_trace->name)) |
| 3510 | old_tracer = current_trace; | ||
| 3511 | *iter->trace = *current_trace; | 3523 | *iter->trace = *current_trace; |
| 3512 | } | ||
| 3513 | mutex_unlock(&trace_types_lock); | 3524 | mutex_unlock(&trace_types_lock); |
| 3514 | 3525 | ||
| 3515 | /* | 3526 | /* |
| @@ -3665,7 +3676,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 3665 | .ops = &tracing_pipe_buf_ops, | 3676 | .ops = &tracing_pipe_buf_ops, |
| 3666 | .spd_release = tracing_spd_release_pipe, | 3677 | .spd_release = tracing_spd_release_pipe, |
| 3667 | }; | 3678 | }; |
| 3668 | static struct tracer *old_tracer; | ||
| 3669 | ssize_t ret; | 3679 | ssize_t ret; |
| 3670 | size_t rem; | 3680 | size_t rem; |
| 3671 | unsigned int i; | 3681 | unsigned int i; |
| @@ -3675,10 +3685,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 3675 | 3685 | ||
| 3676 | /* copy the tracer to avoid using a global lock all around */ | 3686 | /* copy the tracer to avoid using a global lock all around */ |
| 3677 | mutex_lock(&trace_types_lock); | 3687 | mutex_lock(&trace_types_lock); |
| 3678 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3688 | if (unlikely(iter->trace->name != current_trace->name)) |
| 3679 | old_tracer = current_trace; | ||
| 3680 | *iter->trace = *current_trace; | 3689 | *iter->trace = *current_trace; |
| 3681 | } | ||
| 3682 | mutex_unlock(&trace_types_lock); | 3690 | mutex_unlock(&trace_types_lock); |
| 3683 | 3691 | ||
| 3684 | mutex_lock(&iter->mutex); | 3692 | mutex_lock(&iter->mutex); |
| @@ -4070,6 +4078,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file) | |||
| 4070 | return single_open(file, tracing_clock_show, NULL); | 4078 | return single_open(file, tracing_clock_show, NULL); |
| 4071 | } | 4079 | } |
| 4072 | 4080 | ||
| 4081 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
| 4082 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | ||
| 4083 | { | ||
| 4084 | struct trace_iterator *iter; | ||
| 4085 | int ret = 0; | ||
| 4086 | |||
| 4087 | if (file->f_mode & FMODE_READ) { | ||
| 4088 | iter = __tracing_open(inode, file, true); | ||
| 4089 | if (IS_ERR(iter)) | ||
| 4090 | ret = PTR_ERR(iter); | ||
| 4091 | } | ||
| 4092 | return ret; | ||
| 4093 | } | ||
| 4094 | |||
| 4095 | static ssize_t | ||
| 4096 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
| 4097 | loff_t *ppos) | ||
| 4098 | { | ||
| 4099 | unsigned long val; | ||
| 4100 | int ret; | ||
| 4101 | |||
| 4102 | ret = tracing_update_buffers(); | ||
| 4103 | if (ret < 0) | ||
| 4104 | return ret; | ||
| 4105 | |||
| 4106 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
| 4107 | if (ret) | ||
| 4108 | return ret; | ||
| 4109 | |||
| 4110 | mutex_lock(&trace_types_lock); | ||
| 4111 | |||
| 4112 | if (current_trace->use_max_tr) { | ||
| 4113 | ret = -EBUSY; | ||
| 4114 | goto out; | ||
| 4115 | } | ||
| 4116 | |||
| 4117 | switch (val) { | ||
| 4118 | case 0: | ||
| 4119 | if (current_trace->allocated_snapshot) { | ||
| 4120 | /* free spare buffer */ | ||
| 4121 | ring_buffer_resize(max_tr.buffer, 1, | ||
| 4122 | RING_BUFFER_ALL_CPUS); | ||
| 4123 | set_buffer_entries(&max_tr, 1); | ||
| 4124 | tracing_reset_online_cpus(&max_tr); | ||
| 4125 | current_trace->allocated_snapshot = false; | ||
| 4126 | } | ||
| 4127 | break; | ||
| 4128 | case 1: | ||
| 4129 | if (!current_trace->allocated_snapshot) { | ||
| 4130 | /* allocate spare buffer */ | ||
| 4131 | ret = resize_buffer_duplicate_size(&max_tr, | ||
| 4132 | &global_trace, RING_BUFFER_ALL_CPUS); | ||
| 4133 | if (ret < 0) | ||
| 4134 | break; | ||
| 4135 | current_trace->allocated_snapshot = true; | ||
| 4136 | } | ||
| 4137 | |||
| 4138 | local_irq_disable(); | ||
| 4139 | /* Now, we're going to swap */ | ||
| 4140 | update_max_tr(&global_trace, current, smp_processor_id()); | ||
| 4141 | local_irq_enable(); | ||
| 4142 | break; | ||
| 4143 | default: | ||
| 4144 | if (current_trace->allocated_snapshot) | ||
| 4145 | tracing_reset_online_cpus(&max_tr); | ||
| 4146 | else | ||
| 4147 | ret = -EINVAL; | ||
| 4148 | break; | ||
| 4149 | } | ||
| 4150 | |||
| 4151 | if (ret >= 0) { | ||
| 4152 | *ppos += cnt; | ||
| 4153 | ret = cnt; | ||
| 4154 | } | ||
| 4155 | out: | ||
| 4156 | mutex_unlock(&trace_types_lock); | ||
| 4157 | return ret; | ||
| 4158 | } | ||
| 4159 | #endif /* CONFIG_TRACER_SNAPSHOT */ | ||
| 4160 | |||
| 4161 | |||
| 4073 | static const struct file_operations tracing_max_lat_fops = { | 4162 | static const struct file_operations tracing_max_lat_fops = { |
| 4074 | .open = tracing_open_generic, | 4163 | .open = tracing_open_generic, |
| 4075 | .read = tracing_max_lat_read, | 4164 | .read = tracing_max_lat_read, |
| @@ -4126,6 +4215,16 @@ static const struct file_operations trace_clock_fops = { | |||
| 4126 | .write = tracing_clock_write, | 4215 | .write = tracing_clock_write, |
| 4127 | }; | 4216 | }; |
| 4128 | 4217 | ||
| 4218 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
| 4219 | static const struct file_operations snapshot_fops = { | ||
| 4220 | .open = tracing_snapshot_open, | ||
| 4221 | .read = seq_read, | ||
| 4222 | .write = tracing_snapshot_write, | ||
| 4223 | .llseek = tracing_seek, | ||
| 4224 | .release = tracing_release, | ||
| 4225 | }; | ||
| 4226 | #endif /* CONFIG_TRACER_SNAPSHOT */ | ||
| 4227 | |||
| 4129 | struct ftrace_buffer_info { | 4228 | struct ftrace_buffer_info { |
| 4130 | struct trace_array *tr; | 4229 | struct trace_array *tr; |
| 4131 | void *spare; | 4230 | void *spare; |
| @@ -4430,6 +4529,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
| 4430 | cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); | 4529 | cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); |
| 4431 | trace_seq_printf(s, "dropped events: %ld\n", cnt); | 4530 | trace_seq_printf(s, "dropped events: %ld\n", cnt); |
| 4432 | 4531 | ||
| 4532 | cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); | ||
| 4533 | trace_seq_printf(s, "read events: %ld\n", cnt); | ||
| 4534 | |||
| 4433 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 4535 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); |
| 4434 | 4536 | ||
| 4435 | kfree(s); | 4537 | kfree(s); |
| @@ -4506,7 +4608,7 @@ struct dentry *tracing_init_dentry(void) | |||
| 4506 | 4608 | ||
| 4507 | static struct dentry *d_percpu; | 4609 | static struct dentry *d_percpu; |
| 4508 | 4610 | ||
| 4509 | struct dentry *tracing_dentry_percpu(void) | 4611 | static struct dentry *tracing_dentry_percpu(void) |
| 4510 | { | 4612 | { |
| 4511 | static int once; | 4613 | static int once; |
| 4512 | struct dentry *d_tracer; | 4614 | struct dentry *d_tracer; |
| @@ -4922,6 +5024,11 @@ static __init int tracer_init_debugfs(void) | |||
| 4922 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 5024 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
| 4923 | #endif | 5025 | #endif |
| 4924 | 5026 | ||
| 5027 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
| 5028 | trace_create_file("snapshot", 0644, d_tracer, | ||
| 5029 | (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); | ||
| 5030 | #endif | ||
| 5031 | |||
| 4925 | create_trace_options_dir(); | 5032 | create_trace_options_dir(); |
| 4926 | 5033 | ||
| 4927 | for_each_tracing_cpu(cpu) | 5034 | for_each_tracing_cpu(cpu) |
| @@ -5030,6 +5137,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
| 5030 | if (disable_tracing) | 5137 | if (disable_tracing) |
| 5031 | ftrace_kill(); | 5138 | ftrace_kill(); |
| 5032 | 5139 | ||
| 5140 | /* Simulate the iterator */ | ||
| 5033 | trace_init_global_iter(&iter); | 5141 | trace_init_global_iter(&iter); |
| 5034 | 5142 | ||
| 5035 | for_each_tracing_cpu(cpu) { | 5143 | for_each_tracing_cpu(cpu) { |
| @@ -5041,10 +5149,6 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
| 5041 | /* don't look at user memory in panic mode */ | 5149 | /* don't look at user memory in panic mode */ |
| 5042 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 5150 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
| 5043 | 5151 | ||
| 5044 | /* Simulate the iterator */ | ||
| 5045 | iter.tr = &global_trace; | ||
| 5046 | iter.trace = current_trace; | ||
| 5047 | |||
| 5048 | switch (oops_dump_mode) { | 5152 | switch (oops_dump_mode) { |
| 5049 | case DUMP_ALL: | 5153 | case DUMP_ALL: |
| 5050 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 5154 | iter.cpu_file = TRACE_PIPE_ALL_CPU; |
| @@ -5189,7 +5293,7 @@ __init static int tracer_alloc_buffers(void) | |||
| 5189 | init_irq_work(&trace_work_wakeup, trace_wake_up); | 5293 | init_irq_work(&trace_work_wakeup, trace_wake_up); |
| 5190 | 5294 | ||
| 5191 | register_tracer(&nop_trace); | 5295 | register_tracer(&nop_trace); |
| 5192 | current_trace = &nop_trace; | 5296 | |
| 5193 | /* All seems OK, enable tracing */ | 5297 | /* All seems OK, enable tracing */ |
| 5194 | tracing_disabled = 0; | 5298 | tracing_disabled = 0; |
| 5195 | 5299 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 04a2c7ab1735..57d7e5397d56 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -287,6 +287,7 @@ struct tracer { | |||
| 287 | struct tracer_flags *flags; | 287 | struct tracer_flags *flags; |
| 288 | bool print_max; | 288 | bool print_max; |
| 289 | bool use_max_tr; | 289 | bool use_max_tr; |
| 290 | bool allocated_snapshot; | ||
| 290 | }; | 291 | }; |
| 291 | 292 | ||
| 292 | 293 | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 22b638b28e48..24bf48eabfcc 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
| @@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void) | |||
| 84 | local_irq_save(flags); | 84 | local_irq_save(flags); |
| 85 | 85 | ||
| 86 | this_cpu = raw_smp_processor_id(); | 86 | this_cpu = raw_smp_processor_id(); |
| 87 | now = cpu_clock(this_cpu); | 87 | now = sched_clock_cpu(this_cpu); |
| 88 | /* | 88 | /* |
| 89 | * If in an NMI context then dont risk lockups and return the | 89 | * If in an NMI context then dont risk lockups and return the |
| 90 | * cpu_clock() time: | 90 | * cpu_clock() time: |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 7008d2e13cf2..39ada66389cc 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -191,10 +191,16 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |||
| 191 | 191 | ||
| 192 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); | 192 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
| 193 | trace.rettime = trace_clock_local(); | 193 | trace.rettime = trace_clock_local(); |
| 194 | ftrace_graph_return(&trace); | ||
| 195 | barrier(); | 194 | barrier(); |
| 196 | current->curr_ret_stack--; | 195 | current->curr_ret_stack--; |
| 197 | 196 | ||
| 197 | /* | ||
| 198 | * The trace should run after decrementing the ret counter | ||
| 199 | * in case an interrupt were to come in. We don't want to | ||
| 200 | * lose the interrupt if max_depth is set. | ||
| 201 | */ | ||
| 202 | ftrace_graph_return(&trace); | ||
| 203 | |||
| 198 | if (unlikely(!ret)) { | 204 | if (unlikely(!ret)) { |
| 199 | ftrace_graph_stop(); | 205 | ftrace_graph_stop(); |
| 200 | WARN_ON(1); | 206 | WARN_ON(1); |
diff --git a/samples/Kconfig b/samples/Kconfig index 7b6792a18c05..6181c2cc9ca0 100644 --- a/samples/Kconfig +++ b/samples/Kconfig | |||
| @@ -5,12 +5,6 @@ menuconfig SAMPLES | |||
| 5 | 5 | ||
| 6 | if SAMPLES | 6 | if SAMPLES |
| 7 | 7 | ||
| 8 | config SAMPLE_TRACEPOINTS | ||
| 9 | tristate "Build tracepoints examples -- loadable modules only" | ||
| 10 | depends on TRACEPOINTS && m | ||
| 11 | help | ||
| 12 | This build tracepoints example modules. | ||
| 13 | |||
| 14 | config SAMPLE_TRACE_EVENTS | 8 | config SAMPLE_TRACE_EVENTS |
| 15 | tristate "Build trace_events examples -- loadable modules only" | 9 | tristate "Build trace_events examples -- loadable modules only" |
| 16 | depends on EVENT_TRACING && m | 10 | depends on EVENT_TRACING && m |
diff --git a/samples/Makefile b/samples/Makefile index 5ef08bba96ce..1a60c62e2045 100644 --- a/samples/Makefile +++ b/samples/Makefile | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | # Makefile for Linux samples code | 1 | # Makefile for Linux samples code |
| 2 | 2 | ||
| 3 | obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ \ | 3 | obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ \ |
| 4 | hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ | 4 | hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ |
diff --git a/samples/tracepoints/Makefile b/samples/tracepoints/Makefile deleted file mode 100644 index 36479ad9ae14..000000000000 --- a/samples/tracepoints/Makefile +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | # builds the tracepoint example kernel modules; | ||
| 2 | # then to use one (as root): insmod <module_name.ko> | ||
| 3 | |||
| 4 | obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-sample.o | ||
| 5 | obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample.o | ||
| 6 | obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample2.o | ||
diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h deleted file mode 100644 index 4d46be965961..000000000000 --- a/samples/tracepoints/tp-samples-trace.h +++ /dev/null | |||
| @@ -1,11 +0,0 @@ | |||
| 1 | #ifndef _TP_SAMPLES_TRACE_H | ||
| 2 | #define _TP_SAMPLES_TRACE_H | ||
| 3 | |||
| 4 | #include <linux/proc_fs.h> /* for struct inode and struct file */ | ||
| 5 | #include <linux/tracepoint.h> | ||
| 6 | |||
| 7 | DECLARE_TRACE(subsys_event, | ||
| 8 | TP_PROTO(struct inode *inode, struct file *file), | ||
| 9 | TP_ARGS(inode, file)); | ||
| 10 | DECLARE_TRACE_NOARGS(subsys_eventb); | ||
| 11 | #endif | ||
diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c deleted file mode 100644 index 744c0b9652a7..000000000000 --- a/samples/tracepoints/tracepoint-probe-sample.c +++ /dev/null | |||
| @@ -1,57 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * tracepoint-probe-sample.c | ||
| 3 | * | ||
| 4 | * sample tracepoint probes. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/module.h> | ||
| 8 | #include <linux/file.h> | ||
| 9 | #include <linux/dcache.h> | ||
| 10 | #include "tp-samples-trace.h" | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Here the caller only guarantees locking for struct file and struct inode. | ||
| 14 | * Locking must therefore be done in the probe to use the dentry. | ||
| 15 | */ | ||
| 16 | static void probe_subsys_event(void *ignore, | ||
| 17 | struct inode *inode, struct file *file) | ||
| 18 | { | ||
| 19 | path_get(&file->f_path); | ||
| 20 | dget(file->f_path.dentry); | ||
| 21 | printk(KERN_INFO "Event is encountered with filename %s\n", | ||
| 22 | file->f_path.dentry->d_name.name); | ||
| 23 | dput(file->f_path.dentry); | ||
| 24 | path_put(&file->f_path); | ||
| 25 | } | ||
| 26 | |||
| 27 | static void probe_subsys_eventb(void *ignore) | ||
| 28 | { | ||
| 29 | printk(KERN_INFO "Event B is encountered\n"); | ||
| 30 | } | ||
| 31 | |||
| 32 | static int __init tp_sample_trace_init(void) | ||
| 33 | { | ||
| 34 | int ret; | ||
| 35 | |||
| 36 | ret = register_trace_subsys_event(probe_subsys_event, NULL); | ||
| 37 | WARN_ON(ret); | ||
| 38 | ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL); | ||
| 39 | WARN_ON(ret); | ||
| 40 | |||
| 41 | return 0; | ||
| 42 | } | ||
| 43 | |||
| 44 | module_init(tp_sample_trace_init); | ||
| 45 | |||
| 46 | static void __exit tp_sample_trace_exit(void) | ||
| 47 | { | ||
| 48 | unregister_trace_subsys_eventb(probe_subsys_eventb, NULL); | ||
| 49 | unregister_trace_subsys_event(probe_subsys_event, NULL); | ||
| 50 | tracepoint_synchronize_unregister(); | ||
| 51 | } | ||
| 52 | |||
| 53 | module_exit(tp_sample_trace_exit); | ||
| 54 | |||
| 55 | MODULE_LICENSE("GPL"); | ||
| 56 | MODULE_AUTHOR("Mathieu Desnoyers"); | ||
| 57 | MODULE_DESCRIPTION("Tracepoint Probes Samples"); | ||
diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c deleted file mode 100644 index 9fcf990e5d4b..000000000000 --- a/samples/tracepoints/tracepoint-probe-sample2.c +++ /dev/null | |||
| @@ -1,44 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * tracepoint-probe-sample2.c | ||
| 3 | * | ||
| 4 | * 2nd sample tracepoint probes. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/module.h> | ||
| 8 | #include <linux/fs.h> | ||
| 9 | #include "tp-samples-trace.h" | ||
| 10 | |||
| 11 | /* | ||
| 12 | * Here the caller only guarantees locking for struct file and struct inode. | ||
| 13 | * Locking must therefore be done in the probe to use the dentry. | ||
| 14 | */ | ||
| 15 | static void probe_subsys_event(void *ignore, | ||
| 16 | struct inode *inode, struct file *file) | ||
| 17 | { | ||
| 18 | printk(KERN_INFO "Event is encountered with inode number %lu\n", | ||
| 19 | inode->i_ino); | ||
| 20 | } | ||
| 21 | |||
| 22 | static int __init tp_sample_trace_init(void) | ||
| 23 | { | ||
| 24 | int ret; | ||
| 25 | |||
| 26 | ret = register_trace_subsys_event(probe_subsys_event, NULL); | ||
| 27 | WARN_ON(ret); | ||
| 28 | |||
| 29 | return 0; | ||
| 30 | } | ||
| 31 | |||
| 32 | module_init(tp_sample_trace_init); | ||
| 33 | |||
| 34 | static void __exit tp_sample_trace_exit(void) | ||
| 35 | { | ||
| 36 | unregister_trace_subsys_event(probe_subsys_event, NULL); | ||
| 37 | tracepoint_synchronize_unregister(); | ||
| 38 | } | ||
| 39 | |||
| 40 | module_exit(tp_sample_trace_exit); | ||
| 41 | |||
| 42 | MODULE_LICENSE("GPL"); | ||
| 43 | MODULE_AUTHOR("Mathieu Desnoyers"); | ||
| 44 | MODULE_DESCRIPTION("Tracepoint Probes Samples"); | ||
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c deleted file mode 100644 index f4d89e008c32..000000000000 --- a/samples/tracepoints/tracepoint-sample.c +++ /dev/null | |||
| @@ -1,57 +0,0 @@ | |||
| 1 | /* tracepoint-sample.c | ||
| 2 | * | ||
| 3 | * Executes a tracepoint when /proc/tracepoint-sample is opened. | ||
| 4 | * | ||
| 5 | * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | ||
| 6 | * | ||
| 7 | * This file is released under the GPLv2. | ||
| 8 | * See the file COPYING for more details. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/sched.h> | ||
| 13 | #include <linux/proc_fs.h> | ||
| 14 | #include "tp-samples-trace.h" | ||
| 15 | |||
| 16 | DEFINE_TRACE(subsys_event); | ||
| 17 | DEFINE_TRACE(subsys_eventb); | ||
| 18 | |||
| 19 | struct proc_dir_entry *pentry_sample; | ||
| 20 | |||
| 21 | static int my_open(struct inode *inode, struct file *file) | ||
| 22 | { | ||
| 23 | int i; | ||
| 24 | |||
| 25 | trace_subsys_event(inode, file); | ||
| 26 | for (i = 0; i < 10; i++) | ||
| 27 | trace_subsys_eventb(); | ||
| 28 | return -EPERM; | ||
| 29 | } | ||
| 30 | |||
| 31 | static const struct file_operations mark_ops = { | ||
| 32 | .open = my_open, | ||
| 33 | .llseek = noop_llseek, | ||
| 34 | }; | ||
| 35 | |||
| 36 | static int __init sample_init(void) | ||
| 37 | { | ||
| 38 | printk(KERN_ALERT "sample init\n"); | ||
| 39 | pentry_sample = proc_create("tracepoint-sample", 0444, NULL, | ||
| 40 | &mark_ops); | ||
| 41 | if (!pentry_sample) | ||
| 42 | return -EPERM; | ||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | |||
| 46 | static void __exit sample_exit(void) | ||
| 47 | { | ||
| 48 | printk(KERN_ALERT "sample exit\n"); | ||
| 49 | remove_proc_entry("tracepoint-sample", NULL); | ||
| 50 | } | ||
| 51 | |||
| 52 | module_init(sample_init) | ||
| 53 | module_exit(sample_exit) | ||
| 54 | |||
| 55 | MODULE_LICENSE("GPL"); | ||
| 56 | MODULE_AUTHOR("Mathieu Desnoyers"); | ||
| 57 | MODULE_DESCRIPTION("Tracepoint sample"); | ||
