aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/debugfs-kmemtrace71
-rw-r--r--Documentation/ftrace.txt1134
-rw-r--r--Documentation/kernel-parameters.txt12
-rw-r--r--Documentation/sysrq.txt2
-rw-r--r--Documentation/tracepoints.txt21
-rw-r--r--Documentation/vm/kmemtrace.txt126
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/Kconfig1
-rw-r--r--arch/alpha/include/asm/ftrace.h1
-rw-r--r--arch/alpha/include/asm/hardirq.h13
-rw-r--r--arch/avr32/include/asm/ftrace.h1
-rw-r--r--arch/avr32/include/asm/hardirq.h11
-rw-r--r--arch/blackfin/include/asm/ftrace.h1
-rw-r--r--arch/cris/include/asm/ftrace.h1
-rw-r--r--arch/h8300/include/asm/ftrace.h1
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/include/asm/ftrace.h28
-rw-r--r--arch/ia64/include/asm/hardirq.h10
-rw-r--r--arch/ia64/kernel/Makefile5
-rw-r--r--arch/ia64/kernel/entry.S100
-rw-r--r--arch/ia64/kernel/ftrace.c206
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c6
-rw-r--r--arch/m68k/include/asm/ftrace.h1
-rw-r--r--arch/mips/include/asm/ftrace.h1
-rw-r--r--arch/parisc/include/asm/ftrace.h1
-rw-r--r--arch/um/include/asm/ftrace.h1
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/cacheflush.h5
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/ftrace.h7
-rw-r--r--arch/x86/include/asm/ptrace-abi.h3
-rw-r--r--arch/x86/include/asm/thread_info.h9
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/alternative.c29
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/dumpstack.c6
-rw-r--r--arch/x86/kernel/ftrace.c192
-rw-r--r--arch/x86/kernel/kprobes.c17
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/kernel/ptrace.c7
-rw-r--r--arch/x86/kvm/Kconfig3
-rw-r--r--arch/x86/mm/init_32.c35
-rw-r--r--arch/x86/mm/init_64.c37
-rw-r--r--arch/xtensa/include/asm/ftrace.h1
-rw-r--r--block/Kconfig16
-rw-r--r--block/Makefile1
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c5
-rw-r--r--fs/debugfs/inode.c16
-rw-r--r--fs/partitions/check.c4
-rw-r--r--include/asm-frv/ftrace.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h29
-rw-r--r--include/asm-m32r/ftrace.h1
-rw-r--r--include/asm-mn10300/ftrace.h1
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/ftrace.h244
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/hardirq.h73
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/irqflags.h8
-rw-r--r--include/linux/kernel.h150
-rw-r--r--include/linux/memory.h6
-rw-r--r--include/linux/module.h5
-rw-r--r--include/linux/ring_buffer.h38
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/slab_def.h68
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--include/linux/slub_def.h53
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/syscalls.h60
-rw-r--r--include/linux/trace_clock.h19
-rw-r--r--include/linux/tracepoint.h116
-rw-r--r--include/trace/block.h70
-rw-r--r--include/trace/irq.h9
-rw-r--r--include/trace/irq_event_types.h55
-rw-r--r--include/trace/kmemtrace.h75
-rw-r--r--include/trace/lockdep.h9
-rw-r--r--include/trace/lockdep_event_types.h44
-rw-r--r--include/trace/power.h32
-rw-r--r--include/trace/sched.h49
-rw-r--r--include/trace/sched_event_types.h337
-rw-r--r--include/trace/trace_event_types.h5
-rw-r--r--include/trace/trace_events.h5
-rw-r--r--include/trace/workqueue.h25
-rw-r--r--init/Kconfig2
-rw-r--r--init/main.c5
-rw-r--r--kernel/extable.c19
-rw-r--r--kernel/irq/handle.c6
-rw-r--r--kernel/kprobes.c19
-rw-r--r--kernel/lockdep.c33
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sched_clock.c12
-rw-r--r--kernel/softirq.c32
-rw-r--r--kernel/trace/Kconfig123
-rw-r--r--kernel/trace/Makefile13
-rw-r--r--kernel/trace/blktrace.c (renamed from block/blktrace.c)805
-rw-r--r--kernel/trace/events.c14
-rw-r--r--kernel/trace/ftrace.c1133
-rw-r--r--kernel/trace/kmemtrace.c339
-rw-r--r--kernel/trace/ring_buffer.c693
-rw-r--r--kernel/trace/trace.c3034
-rw-r--r--kernel/trace/trace.h315
-rw-r--r--kernel/trace/trace_boot.c36
-rw-r--r--kernel/trace/trace_branch.c278
-rw-r--r--kernel/trace/trace_clock.c109
-rw-r--r--kernel/trace/trace_event_profile.c31
-rw-r--r--kernel/trace/trace_event_types.h173
-rw-r--r--kernel/trace/trace_events.c824
-rw-r--r--kernel/trace/trace_events_filter.c427
-rw-r--r--kernel/trace/trace_events_stage_1.h39
-rw-r--r--kernel/trace/trace_events_stage_2.h176
-rw-r--r--kernel/trace/trace_events_stage_3.h281
-rw-r--r--kernel/trace/trace_export.c102
-rw-r--r--kernel/trace/trace_functions.c369
-rw-r--r--kernel/trace/trace_functions_graph.c570
-rw-r--r--kernel/trace/trace_hw_branches.c185
-rw-r--r--kernel/trace/trace_irqsoff.c54
-rw-r--r--kernel/trace/trace_mmiotrace.c45
-rw-r--r--kernel/trace/trace_nop.c6
-rw-r--r--kernel/trace/trace_output.c1017
-rw-r--r--kernel/trace/trace_output.h71
-rw-r--r--kernel/trace/trace_power.c194
-rw-r--r--kernel/trace/trace_printk.c270
-rw-r--r--kernel/trace/trace_sched_switch.c24
-rw-r--r--kernel/trace/trace_sched_wakeup.c96
-rw-r--r--kernel/trace/trace_selftest.c169
-rw-r--r--kernel/trace/trace_stack.c19
-rw-r--r--kernel/trace/trace_stat.c326
-rw-r--r--kernel/trace/trace_stat.h31
-rw-r--r--kernel/trace/trace_syscalls.c250
-rw-r--r--kernel/trace/trace_sysprof.c23
-rw-r--r--kernel/trace/trace_workqueue.c288
-rw-r--r--kernel/tracepoint.c7
-rw-r--r--kernel/workqueue.c16
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/locking-selftest.c4
-rw-r--r--lib/vsprintf.c1005
-rw-r--r--mm/slab.c71
-rw-r--r--mm/slob.c35
-rw-r--r--mm/slub.c83
-rw-r--r--samples/tracepoints/tp-samples-trace.h8
-rw-r--r--samples/tracepoints/tracepoint-sample.c24
-rw-r--r--scripts/Makefile.build13
-rw-r--r--scripts/kallsyms.c57
-rwxr-xr-xscripts/recordmcount.pl37
149 files changed, 14599 insertions, 3666 deletions
diff --git a/Documentation/ABI/testing/debugfs-kmemtrace b/Documentation/ABI/testing/debugfs-kmemtrace
new file mode 100644
index 000000000000..5e6a92a02d85
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-kmemtrace
@@ -0,0 +1,71 @@
1What: /sys/kernel/debug/kmemtrace/
2Date: July 2008
3Contact: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
4Description:
5
6In kmemtrace-enabled kernels, the following files are created:
7
8/sys/kernel/debug/kmemtrace/
9 cpu<n> (0400) Per-CPU tracing data, see below. (binary)
10 total_overruns (0400) Total number of bytes which were dropped from
11 cpu<n> files because of full buffer condition,
12 non-binary. (text)
13 abi_version (0400) Kernel's kmemtrace ABI version. (text)
14
15Each per-CPU file should be read according to the relay interface. That is,
16the reader should set affinity to that specific CPU and, as currently done by
17the userspace application (though there are other methods), use poll() with
18an infinite timeout before every read(). Otherwise, erroneous data may be
19read. The binary data has the following _core_ format:
20
21 Event ID (1 byte) Unsigned integer, one of:
22 0 - represents an allocation (KMEMTRACE_EVENT_ALLOC)
23 1 - represents a freeing of previously allocated memory
24 (KMEMTRACE_EVENT_FREE)
25 Type ID (1 byte) Unsigned integer, one of:
26 0 - this is a kmalloc() / kfree()
27 1 - this is a kmem_cache_alloc() / kmem_cache_free()
28 2 - this is a __get_free_pages() et al.
29 Event size (2 bytes) Unsigned integer representing the
30 size of this event. Used to extend
31 kmemtrace. Discard the bytes you
32 don't know about.
33 Sequence number (4 bytes) Signed integer used to reorder data
34 logged on SMP machines. Wraparound
35 must be taken into account, although
36 it is unlikely.
37 Caller address (8 bytes) Return address to the caller.
38 Pointer to mem (8 bytes) Pointer to target memory area. Can be
39 NULL, but not all such calls might be
40 recorded.
41
42In case of KMEMTRACE_EVENT_ALLOC events, the next fields follow:
43
44 Requested bytes (8 bytes) Total number of requested bytes,
45 unsigned, must not be zero.
46 Allocated bytes (8 bytes) Total number of actually allocated
47 bytes, unsigned, must not be lower
48 than requested bytes.
49 Requested flags (4 bytes) GFP flags supplied by the caller.
50 Target CPU (4 bytes) Signed integer, valid for event id 1.
51 If equal to -1, target CPU is the same
52 as origin CPU, but the reverse might
53 not be true.
54
55The data is made available in the same endianness the machine has.
56
57Other event ids and type ids may be defined and added. Other fields may be
58added by increasing event size, but see below for details.
59Every modification to the ABI, including new id definitions, are followed
60by bumping the ABI version by one.
61
62Adding new data to the packet (features) is done at the end of the mandatory
63data:
64 Feature size (2 byte)
65 Feature ID (1 byte)
66 Feature data (Feature size - 3 bytes)
67
68
69Users:
70 kmemtrace-user - git://repo.or.cz/kmemtrace-user.git
71
diff --git a/Documentation/ftrace.txt b/Documentation/ftrace.txt
index 803b1318b13d..fd9a3e693813 100644
--- a/Documentation/ftrace.txt
+++ b/Documentation/ftrace.txt
@@ -15,31 +15,31 @@ Introduction
15 15
16Ftrace is an internal tracer designed to help out developers and 16Ftrace is an internal tracer designed to help out developers and
17designers of systems to find what is going on inside the kernel. 17designers of systems to find what is going on inside the kernel.
18It can be used for debugging or analyzing latencies and performance 18It can be used for debugging or analyzing latencies and
19issues that take place outside of user-space. 19performance issues that take place outside of user-space.
20 20
21Although ftrace is the function tracer, it also includes an 21Although ftrace is the function tracer, it also includes an
22infrastructure that allows for other types of tracing. Some of the 22infrastructure that allows for other types of tracing. Some of
23tracers that are currently in ftrace include a tracer to trace 23the tracers that are currently in ftrace include a tracer to
24context switches, the time it takes for a high priority task to 24trace context switches, the time it takes for a high priority
25run after it was woken up, the time interrupts are disabled, and 25task to run after it was woken up, the time interrupts are
26more (ftrace allows for tracer plugins, which means that the list of 26disabled, and more (ftrace allows for tracer plugins, which
27tracers can always grow). 27means that the list of tracers can always grow).
28 28
29 29
30The File System 30The File System
31--------------- 31---------------
32 32
33Ftrace uses the debugfs file system to hold the control files as well 33Ftrace uses the debugfs file system to hold the control files as
34as the files to display output. 34well as the files to display output.
35 35
36To mount the debugfs system: 36To mount the debugfs system:
37 37
38 # mkdir /debug 38 # mkdir /debug
39 # mount -t debugfs nodev /debug 39 # mount -t debugfs nodev /debug
40 40
41(Note: it is more common to mount at /sys/kernel/debug, but for simplicity 41( Note: it is more common to mount at /sys/kernel/debug, but for
42 this document will use /debug) 42 simplicity this document will use /debug)
43 43
44That's it! (assuming that you have ftrace configured into your kernel) 44That's it! (assuming that you have ftrace configured into your kernel)
45 45
@@ -50,90 +50,124 @@ of ftrace. Here is a list of some of the key files:
50 50
51 Note: all time values are in microseconds. 51 Note: all time values are in microseconds.
52 52
53 current_tracer: This is used to set or display the current tracer 53 current_tracer:
54 that is configured. 54
55 55 This is used to set or display the current tracer
56 available_tracers: This holds the different types of tracers that 56 that is configured.
57 have been compiled into the kernel. The tracers 57
58 listed here can be configured by echoing their name 58 available_tracers:
59 into current_tracer. 59
60 60 This holds the different types of tracers that
61 tracing_enabled: This sets or displays whether the current_tracer 61 have been compiled into the kernel. The
62 is activated and tracing or not. Echo 0 into this 62 tracers listed here can be configured by
63 file to disable the tracer or 1 to enable it. 63 echoing their name into current_tracer.
64 64
65 trace: This file holds the output of the trace in a human readable 65 tracing_enabled:
66 format (described below). 66
67 67 This sets or displays whether the current_tracer
68 latency_trace: This file shows the same trace but the information 68 is activated and tracing or not. Echo 0 into this
69 is organized more to display possible latencies 69 file to disable the tracer or 1 to enable it.
70 in the system (described below). 70
71 71 trace:
72 trace_pipe: The output is the same as the "trace" file but this 72
73 file is meant to be streamed with live tracing. 73 This file holds the output of the trace in a human
74 Reads from this file will block until new data 74 readable format (described below).
75 is retrieved. Unlike the "trace" and "latency_trace" 75
76 files, this file is a consumer. This means reading 76 latency_trace:
77 from this file causes sequential reads to display 77
78 more current data. Once data is read from this 78 This file shows the same trace but the information
79 file, it is consumed, and will not be read 79 is organized more to display possible latencies
80 again with a sequential read. The "trace" and 80 in the system (described below).
81 "latency_trace" files are static, and if the 81
82 tracer is not adding more data, they will display 82 trace_pipe:
83 the same information every time they are read. 83
84 84 The output is the same as the "trace" file but this
85 trace_options: This file lets the user control the amount of data 85 file is meant to be streamed with live tracing.
86 that is displayed in one of the above output 86 Reads from this file will block until new data
87 files. 87 is retrieved. Unlike the "trace" and "latency_trace"
88 88 files, this file is a consumer. This means reading
89 trace_max_latency: Some of the tracers record the max latency. 89 from this file causes sequential reads to display
90 For example, the time interrupts are disabled. 90 more current data. Once data is read from this
91 This time is saved in this file. The max trace 91 file, it is consumed, and will not be read
92 will also be stored, and displayed by either 92 again with a sequential read. The "trace" and
93 "trace" or "latency_trace". A new max trace will 93 "latency_trace" files are static, and if the
94 only be recorded if the latency is greater than 94 tracer is not adding more data, they will display
95 the value in this file. (in microseconds) 95 the same information every time they are read.
96 96
97 buffer_size_kb: This sets or displays the number of kilobytes each CPU 97 trace_options:
98 buffer can hold. The tracer buffers are the same size 98
99 for each CPU. The displayed number is the size of the 99 This file lets the user control the amount of data
100 CPU buffer and not total size of all buffers. The 100 that is displayed in one of the above output
101 trace buffers are allocated in pages (blocks of memory 101 files.
102 that the kernel uses for allocation, usually 4 KB in size). 102
103 If the last page allocated has room for more bytes 103 tracing_max_latency:
104 than requested, the rest of the page will be used, 104
105 making the actual allocation bigger than requested. 105 Some of the tracers record the max latency.
106 (Note, the size may not be a multiple of the page size due 106 For example, the time interrupts are disabled.
107 to buffer managment overhead.) 107 This time is saved in this file. The max trace
108 108 will also be stored, and displayed by either
109 This can only be updated when the current_tracer 109 "trace" or "latency_trace". A new max trace will
110 is set to "nop". 110 only be recorded if the latency is greater than
111 111 the value in this file. (in microseconds)
112 tracing_cpumask: This is a mask that lets the user only trace 112
113 on specified CPUS. The format is a hex string 113 buffer_size_kb:
114 representing the CPUS. 114
115 115 This sets or displays the number of kilobytes each CPU
116 set_ftrace_filter: When dynamic ftrace is configured in (see the 116 buffer can hold. The tracer buffers are the same size
117 section below "dynamic ftrace"), the code is dynamically 117 for each CPU. The displayed number is the size of the
118 modified (code text rewrite) to disable calling of the 118 CPU buffer and not total size of all buffers. The
119 function profiler (mcount). This lets tracing be configured 119 trace buffers are allocated in pages (blocks of memory
120 in with practically no overhead in performance. This also 120 that the kernel uses for allocation, usually 4 KB in size).
121 has a side effect of enabling or disabling specific functions 121 If the last page allocated has room for more bytes
122 to be traced. Echoing names of functions into this file 122 than requested, the rest of the page will be used,
123 will limit the trace to only those functions. 123 making the actual allocation bigger than requested.
124 124 ( Note, the size may not be a multiple of the page size
125 set_ftrace_notrace: This has an effect opposite to that of 125 due to buffer managment overhead. )
126 set_ftrace_filter. Any function that is added here will not 126
127 be traced. If a function exists in both set_ftrace_filter 127 This can only be updated when the current_tracer
128 and set_ftrace_notrace, the function will _not_ be traced. 128 is set to "nop".
129 129
130 set_ftrace_pid: Have the function tracer only trace a single thread. 130 tracing_cpumask:
131 131
132 available_filter_functions: This lists the functions that ftrace 132 This is a mask that lets the user only trace
133 has processed and can trace. These are the function 133 on specified CPUS. The format is a hex string
134 names that you can pass to "set_ftrace_filter" or 134 representing the CPUS.
135 "set_ftrace_notrace". (See the section "dynamic ftrace" 135
136 below for more details.) 136 set_ftrace_filter:
137
138 When dynamic ftrace is configured in (see the
139 section below "dynamic ftrace"), the code is dynamically
140 modified (code text rewrite) to disable calling of the
141 function profiler (mcount). This lets tracing be configured
142 in with practically no overhead in performance. This also
143 has a side effect of enabling or disabling specific functions
144 to be traced. Echoing names of functions into this file
145 will limit the trace to only those functions.
146
147 set_ftrace_notrace:
148
149 This has an effect opposite to that of
150 set_ftrace_filter. Any function that is added here will not
151 be traced. If a function exists in both set_ftrace_filter
152 and set_ftrace_notrace, the function will _not_ be traced.
153
154 set_ftrace_pid:
155
156 Have the function tracer only trace a single thread.
157
158 set_graph_function:
159
160 Set a "trigger" function where tracing should start
161 with the function graph tracer (See the section
162 "dynamic ftrace" for more details).
163
164 available_filter_functions:
165
166 This lists the functions that ftrace
167 has processed and can trace. These are the function
168 names that you can pass to "set_ftrace_filter" or
169 "set_ftrace_notrace". (See the section "dynamic ftrace"
170 below for more details.)
137 171
138 172
139The Tracers 173The Tracers
@@ -141,36 +175,66 @@ The Tracers
141 175
142Here is the list of current tracers that may be configured. 176Here is the list of current tracers that may be configured.
143 177
144 function - function tracer that uses mcount to trace all functions. 178 "function"
179
180 Function call tracer to trace all kernel functions.
181
182 "function_graph_tracer"
183
184 Similar to the function tracer except that the
185 function tracer probes the functions on their entry
186 whereas the function graph tracer traces on both entry
187 and exit of the functions. It then provides the ability
188 to draw a graph of function calls similar to C code
189 source.
145 190
146 sched_switch - traces the context switches between tasks. 191 "sched_switch"
147 192
148 irqsoff - traces the areas that disable interrupts and saves 193 Traces the context switches and wakeups between tasks.
149 the trace with the longest max latency.
150 See tracing_max_latency. When a new max is recorded,
151 it replaces the old trace. It is best to view this
152 trace via the latency_trace file.
153 194
154 preemptoff - Similar to irqsoff but traces and records the amount of 195 "irqsoff"
155 time for which preemption is disabled.
156 196
157 preemptirqsoff - Similar to irqsoff and preemptoff, but traces and 197 Traces the areas that disable interrupts and saves
158 records the largest time for which irqs and/or preemption 198 the trace with the longest max latency.
159 is disabled. 199 See tracing_max_latency. When a new max is recorded,
200 it replaces the old trace. It is best to view this
201 trace via the latency_trace file.
160 202
161 wakeup - Traces and records the max latency that it takes for 203 "preemptoff"
162 the highest priority task to get scheduled after
163 it has been woken up.
164 204
165 nop - This is not a tracer. To remove all tracers from tracing 205 Similar to irqsoff but traces and records the amount of
166 simply echo "nop" into current_tracer. 206 time for which preemption is disabled.
207
208 "preemptirqsoff"
209
210 Similar to irqsoff and preemptoff, but traces and
211 records the largest time for which irqs and/or preemption
212 is disabled.
213
214 "wakeup"
215
216 Traces and records the max latency that it takes for
217 the highest priority task to get scheduled after
218 it has been woken up.
219
220 "hw-branch-tracer"
221
222 Uses the BTS CPU feature on x86 CPUs to traces all
223 branches executed.
224
225 "nop"
226
227 This is the "trace nothing" tracer. To remove all
228 tracers from tracing simply echo "nop" into
229 current_tracer.
167 230
168 231
169Examples of using the tracer 232Examples of using the tracer
170---------------------------- 233----------------------------
171 234
172Here are typical examples of using the tracers when controlling them only 235Here are typical examples of using the tracers when controlling
173with the debugfs interface (without using any user-land utilities). 236them only with the debugfs interface (without using any
237user-land utilities).
174 238
175Output format: 239Output format:
176-------------- 240--------------
@@ -187,16 +251,16 @@ Here is an example of the output format of the file "trace"
187 bash-4251 [01] 10152.583855: _atomic_dec_and_lock <-dput 251 bash-4251 [01] 10152.583855: _atomic_dec_and_lock <-dput
188 -------- 252 --------
189 253
190A header is printed with the tracer name that is represented by the trace. 254A header is printed with the tracer name that is represented by
191In this case the tracer is "function". Then a header showing the format. Task 255the trace. In this case the tracer is "function". Then a header
192name "bash", the task PID "4251", the CPU that it was running on 256showing the format. Task name "bash", the task PID "4251", the
193"01", the timestamp in <secs>.<usecs> format, the function name that was 257CPU that it was running on "01", the timestamp in <secs>.<usecs>
194traced "path_put" and the parent function that called this function 258format, the function name that was traced "path_put" and the
195"path_walk". The timestamp is the time at which the function was 259parent function that called this function "path_walk". The
196entered. 260timestamp is the time at which the function was entered.
197 261
198The sched_switch tracer also includes tracing of task wakeups and 262The sched_switch tracer also includes tracing of task wakeups
199context switches. 263and context switches.
200 264
201 ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 2916:115:S 265 ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 2916:115:S
202 ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 10:115:S 266 ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 10:115:S
@@ -205,8 +269,8 @@ context switches.
205 kondemand/1-2916 [01] 1453.070013: 2916:115:S ==> 7:115:R 269 kondemand/1-2916 [01] 1453.070013: 2916:115:S ==> 7:115:R
206 ksoftirqd/1-7 [01] 1453.070013: 7:115:S ==> 0:140:R 270 ksoftirqd/1-7 [01] 1453.070013: 7:115:S ==> 0:140:R
207 271
208Wake ups are represented by a "+" and the context switches are shown as 272Wake ups are represented by a "+" and the context switches are
209"==>". The format is: 273shown as "==>". The format is:
210 274
211 Context switches: 275 Context switches:
212 276
@@ -220,19 +284,20 @@ Wake ups are represented by a "+" and the context switches are shown as
220 284
221 <pid>:<prio>:<state> + <pid>:<prio>:<state> 285 <pid>:<prio>:<state> + <pid>:<prio>:<state>
222 286
223The prio is the internal kernel priority, which is the inverse of the 287The prio is the internal kernel priority, which is the inverse
224priority that is usually displayed by user-space tools. Zero represents 288of the priority that is usually displayed by user-space tools.
225the highest priority (99). Prio 100 starts the "nice" priorities with 289Zero represents the highest priority (99). Prio 100 starts the
226100 being equal to nice -20 and 139 being nice 19. The prio "140" is 290"nice" priorities with 100 being equal to nice -20 and 139 being
227reserved for the idle task which is the lowest priority thread (pid 0). 291nice 19. The prio "140" is reserved for the idle task which is
292the lowest priority thread (pid 0).
228 293
229 294
230Latency trace format 295Latency trace format
231-------------------- 296--------------------
232 297
233For traces that display latency times, the latency_trace file gives 298For traces that display latency times, the latency_trace file
234somewhat more information to see why a latency happened. Here is a typical 299gives somewhat more information to see why a latency happened.
235trace. 300Here is a typical trace.
236 301
237# tracer: irqsoff 302# tracer: irqsoff
238# 303#
@@ -259,20 +324,20 @@ irqsoff latency trace v1.1.5 on 2.6.26-rc8
259 <idle>-0 0d.s1 98us : trace_hardirqs_on (do_softirq) 324 <idle>-0 0d.s1 98us : trace_hardirqs_on (do_softirq)
260 325
261 326
327This shows that the current tracer is "irqsoff" tracing the time
328for which interrupts were disabled. It gives the trace version
329and the version of the kernel upon which this was executed on
330(2.6.26-rc8). Then it displays the max latency in microsecs (97
331us). The number of trace entries displayed and the total number
332recorded (both are three: #3/3). The type of preemption that was
333used (PREEMPT). VP, KP, SP, and HP are always zero and are
334reserved for later use. #P is the number of online CPUS (#P:2).
262 335
263This shows that the current tracer is "irqsoff" tracing the time for which 336The task is the process that was running when the latency
264interrupts were disabled. It gives the trace version and the version 337occurred. (swapper pid: 0).
265of the kernel upon which this was executed on (2.6.26-rc8). Then it displays
266the max latency in microsecs (97 us). The number of trace entries displayed
267and the total number recorded (both are three: #3/3). The type of
268preemption that was used (PREEMPT). VP, KP, SP, and HP are always zero
269and are reserved for later use. #P is the number of online CPUS (#P:2).
270
271The task is the process that was running when the latency occurred.
272(swapper pid: 0).
273 338
274The start and stop (the functions in which the interrupts were disabled and 339The start and stop (the functions in which the interrupts were
275enabled respectively) that caused the latencies: 340disabled and enabled respectively) that caused the latencies:
276 341
277 apic_timer_interrupt is where the interrupts were disabled. 342 apic_timer_interrupt is where the interrupts were disabled.
278 do_softirq is where they were enabled again. 343 do_softirq is where they were enabled again.
@@ -308,12 +373,12 @@ The above is mostly meaningful for kernel developers.
308 latency_trace file is relative to the start of the trace. 373 latency_trace file is relative to the start of the trace.
309 374
310 delay: This is just to help catch your eye a bit better. And 375 delay: This is just to help catch your eye a bit better. And
311 needs to be fixed to be only relative to the same CPU. 376 needs to be fixed to be only relative to the same CPU.
312 The marks are determined by the difference between this 377 The marks are determined by the difference between this
313 current trace and the next trace. 378 current trace and the next trace.
314 '!' - greater than preempt_mark_thresh (default 100) 379 '!' - greater than preempt_mark_thresh (default 100)
315 '+' - greater than 1 microsecond 380 '+' - greater than 1 microsecond
316 ' ' - less than or equal to 1 microsecond. 381 ' ' - less than or equal to 1 microsecond.
317 382
318 The rest is the same as the 'trace' file. 383 The rest is the same as the 'trace' file.
319 384
@@ -321,14 +386,15 @@ The above is mostly meaningful for kernel developers.
321trace_options 386trace_options
322------------- 387-------------
323 388
324The trace_options file is used to control what gets printed in the trace 389The trace_options file is used to control what gets printed in
325output. To see what is available, simply cat the file: 390the trace output. To see what is available, simply cat the file:
326 391
327 cat /debug/tracing/trace_options 392 cat /debug/tracing/trace_options
328 print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \ 393 print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
329 noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj 394 noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
330 395
331To disable one of the options, echo in the option prepended with "no". 396To disable one of the options, echo in the option prepended with
397"no".
332 398
333 echo noprint-parent > /debug/tracing/trace_options 399 echo noprint-parent > /debug/tracing/trace_options
334 400
@@ -338,8 +404,8 @@ To enable an option, leave off the "no".
338 404
339Here are the available options: 405Here are the available options:
340 406
341 print-parent - On function traces, display the calling function 407 print-parent - On function traces, display the calling (parent)
342 as well as the function being traced. 408 function as well as the function being traced.
343 409
344 print-parent: 410 print-parent:
345 bash-4000 [01] 1477.606694: simple_strtoul <-strict_strtoul 411 bash-4000 [01] 1477.606694: simple_strtoul <-strict_strtoul
@@ -348,15 +414,16 @@ Here are the available options:
348 bash-4000 [01] 1477.606694: simple_strtoul 414 bash-4000 [01] 1477.606694: simple_strtoul
349 415
350 416
351 sym-offset - Display not only the function name, but also the offset 417 sym-offset - Display not only the function name, but also the
352 in the function. For example, instead of seeing just 418 offset in the function. For example, instead of
353 "ktime_get", you will see "ktime_get+0xb/0x20". 419 seeing just "ktime_get", you will see
420 "ktime_get+0xb/0x20".
354 421
355 sym-offset: 422 sym-offset:
356 bash-4000 [01] 1477.606694: simple_strtoul+0x6/0xa0 423 bash-4000 [01] 1477.606694: simple_strtoul+0x6/0xa0
357 424
358 sym-addr - this will also display the function address as well as 425 sym-addr - this will also display the function address as well
359 the function name. 426 as the function name.
360 427
361 sym-addr: 428 sym-addr:
362 bash-4000 [01] 1477.606694: simple_strtoul <c0339346> 429 bash-4000 [01] 1477.606694: simple_strtoul <c0339346>
@@ -366,35 +433,41 @@ Here are the available options:
366 bash 4000 1 0 00000000 00010a95 [58127d26] 1720.415ms \ 433 bash 4000 1 0 00000000 00010a95 [58127d26] 1720.415ms \
367 (+0.000ms): simple_strtoul (strict_strtoul) 434 (+0.000ms): simple_strtoul (strict_strtoul)
368 435
369 raw - This will display raw numbers. This option is best for use with 436 raw - This will display raw numbers. This option is best for
370 user applications that can translate the raw numbers better than 437 use with user applications that can translate the raw
371 having it done in the kernel. 438 numbers better than having it done in the kernel.
372 439
373 hex - Similar to raw, but the numbers will be in a hexadecimal format. 440 hex - Similar to raw, but the numbers will be in a hexadecimal
441 format.
374 442
375 bin - This will print out the formats in raw binary. 443 bin - This will print out the formats in raw binary.
376 444
377 block - TBD (needs update) 445 block - TBD (needs update)
378 446
379 stacktrace - This is one of the options that changes the trace itself. 447 stacktrace - This is one of the options that changes the trace
380 When a trace is recorded, so is the stack of functions. 448 itself. When a trace is recorded, so is the stack
381 This allows for back traces of trace sites. 449 of functions. This allows for back traces of
450 trace sites.
382 451
383 userstacktrace - This option changes the trace. 452 userstacktrace - This option changes the trace. It records a
384 It records a stacktrace of the current userspace thread. 453 stacktrace of the current userspace thread.
385 454
386 sym-userobj - when user stacktrace are enabled, look up which object the 455 sym-userobj - when user stacktrace are enabled, look up which
387 address belongs to, and print a relative address 456 object the address belongs to, and print a
388 This is especially useful when ASLR is on, otherwise you don't 457 relative address. This is especially useful when
389 get a chance to resolve the address to object/file/line after the app is no 458 ASLR is on, otherwise you don't get a chance to
390 longer running 459 resolve the address to object/file/line after
460 the app is no longer running
391 461
392 The lookup is performed when you read trace,trace_pipe,latency_trace. Example: 462 The lookup is performed when you read
463 trace,trace_pipe,latency_trace. Example:
393 464
394 a.out-1623 [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0 465 a.out-1623 [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0
395x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6] 466x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
396 467
397 sched-tree - TBD (any users??) 468 sched-tree - trace all tasks that are on the runqueue, at
469 every scheduling event. Will add overhead if
470 there's a lot of tasks running at once.
398 471
399 472
400sched_switch 473sched_switch
@@ -431,18 +504,19 @@ of how to use it.
431 [...] 504 [...]
432 505
433 506
434As we have discussed previously about this format, the header shows 507As we have discussed previously about this format, the header
435the name of the trace and points to the options. The "FUNCTION" 508shows the name of the trace and points to the options. The
436is a misnomer since here it represents the wake ups and context 509"FUNCTION" is a misnomer since here it represents the wake ups
437switches. 510and context switches.
438 511
439The sched_switch file only lists the wake ups (represented with '+') 512The sched_switch file only lists the wake ups (represented with
440and context switches ('==>') with the previous task or current task 513'+') and context switches ('==>') with the previous task or
441first followed by the next task or task waking up. The format for both 514current task first followed by the next task or task waking up.
442of these is PID:KERNEL-PRIO:TASK-STATE. Remember that the KERNEL-PRIO 515The format for both of these is PID:KERNEL-PRIO:TASK-STATE.
443is the inverse of the actual priority with zero (0) being the highest 516Remember that the KERNEL-PRIO is the inverse of the actual
444priority and the nice values starting at 100 (nice -20). Below is 517priority with zero (0) being the highest priority and the nice
445a quick chart to map the kernel priority to user land priorities. 518values starting at 100 (nice -20). Below is a quick chart to map
519the kernel priority to user land priorities.
446 520
447 Kernel priority: 0 to 99 ==> user RT priority 99 to 0 521 Kernel priority: 0 to 99 ==> user RT priority 99 to 0
448 Kernel priority: 100 to 139 ==> user nice -20 to 19 522 Kernel priority: 100 to 139 ==> user nice -20 to 19
@@ -463,10 +537,10 @@ The task states are:
463ftrace_enabled 537ftrace_enabled
464-------------- 538--------------
465 539
466The following tracers (listed below) give different output depending 540The following tracers (listed below) give different output
467on whether or not the sysctl ftrace_enabled is set. To set ftrace_enabled, 541depending on whether or not the sysctl ftrace_enabled is set. To
468one can either use the sysctl function or set it via the proc 542set ftrace_enabled, one can either use the sysctl function or
469file system interface. 543set it via the proc file system interface.
470 544
471 sysctl kernel.ftrace_enabled=1 545 sysctl kernel.ftrace_enabled=1
472 546
@@ -474,12 +548,12 @@ file system interface.
474 548
475 echo 1 > /proc/sys/kernel/ftrace_enabled 549 echo 1 > /proc/sys/kernel/ftrace_enabled
476 550
477To disable ftrace_enabled simply replace the '1' with '0' in 551To disable ftrace_enabled simply replace the '1' with '0' in the
478the above commands. 552above commands.
479 553
480When ftrace_enabled is set the tracers will also record the functions 554When ftrace_enabled is set the tracers will also record the
481that are within the trace. The descriptions of the tracers 555functions that are within the trace. The descriptions of the
482will also show an example with ftrace enabled. 556tracers will also show an example with ftrace enabled.
483 557
484 558
485irqsoff 559irqsoff
@@ -487,17 +561,18 @@ irqsoff
487 561
488When interrupts are disabled, the CPU can not react to any other 562When interrupts are disabled, the CPU can not react to any other
489external event (besides NMIs and SMIs). This prevents the timer 563external event (besides NMIs and SMIs). This prevents the timer
490interrupt from triggering or the mouse interrupt from letting the 564interrupt from triggering or the mouse interrupt from letting
491kernel know of a new mouse event. The result is a latency with the 565the kernel know of a new mouse event. The result is a latency
492reaction time. 566with the reaction time.
493 567
494The irqsoff tracer tracks the time for which interrupts are disabled. 568The irqsoff tracer tracks the time for which interrupts are
495When a new maximum latency is hit, the tracer saves the trace leading up 569disabled. When a new maximum latency is hit, the tracer saves
496to that latency point so that every time a new maximum is reached, the old 570the trace leading up to that latency point so that every time a
497saved trace is discarded and the new trace is saved. 571new maximum is reached, the old saved trace is discarded and the
572new trace is saved.
498 573
499To reset the maximum, echo 0 into tracing_max_latency. Here is an 574To reset the maximum, echo 0 into tracing_max_latency. Here is
500example: 575an example:
501 576
502 # echo irqsoff > /debug/tracing/current_tracer 577 # echo irqsoff > /debug/tracing/current_tracer
503 # echo 0 > /debug/tracing/tracing_max_latency 578 # echo 0 > /debug/tracing/tracing_max_latency
@@ -532,10 +607,11 @@ irqsoff latency trace v1.1.5 on 2.6.26
532 607
533 608
534Here we see that that we had a latency of 12 microsecs (which is 609Here we see that that we had a latency of 12 microsecs (which is
535very good). The _write_lock_irq in sys_setpgid disabled interrupts. 610very good). The _write_lock_irq in sys_setpgid disabled
536The difference between the 12 and the displayed timestamp 14us occurred 611interrupts. The difference between the 12 and the displayed
537because the clock was incremented between the time of recording the max 612timestamp 14us occurred because the clock was incremented
538latency and the time of recording the function that had that latency. 613between the time of recording the max latency and the time of
614recording the function that had that latency.
539 615
540Note the above example had ftrace_enabled not set. If we set the 616Note the above example had ftrace_enabled not set. If we set the
541ftrace_enabled, we get a much larger output: 617ftrace_enabled, we get a much larger output:
@@ -586,24 +662,24 @@ irqsoff latency trace v1.1.5 on 2.6.26-rc8
586 662
587 663
588Here we traced a 50 microsecond latency. But we also see all the 664Here we traced a 50 microsecond latency. But we also see all the
589functions that were called during that time. Note that by enabling 665functions that were called during that time. Note that by
590function tracing, we incur an added overhead. This overhead may 666enabling function tracing, we incur an added overhead. This
591extend the latency times. But nevertheless, this trace has provided 667overhead may extend the latency times. But nevertheless, this
592some very helpful debugging information. 668trace has provided some very helpful debugging information.
593 669
594 670
595preemptoff 671preemptoff
596---------- 672----------
597 673
598When preemption is disabled, we may be able to receive interrupts but 674When preemption is disabled, we may be able to receive
599the task cannot be preempted and a higher priority task must wait 675interrupts but the task cannot be preempted and a higher
600for preemption to be enabled again before it can preempt a lower 676priority task must wait for preemption to be enabled again
601priority task. 677before it can preempt a lower priority task.
602 678
603The preemptoff tracer traces the places that disable preemption. 679The preemptoff tracer traces the places that disable preemption.
604Like the irqsoff tracer, it records the maximum latency for which preemption 680Like the irqsoff tracer, it records the maximum latency for
605was disabled. The control of preemptoff tracer is much like the irqsoff 681which preemption was disabled. The control of preemptoff tracer
606tracer. 682is much like the irqsoff tracer.
607 683
608 # echo preemptoff > /debug/tracing/current_tracer 684 # echo preemptoff > /debug/tracing/current_tracer
609 # echo 0 > /debug/tracing/tracing_max_latency 685 # echo 0 > /debug/tracing/tracing_max_latency
@@ -637,11 +713,12 @@ preemptoff latency trace v1.1.5 on 2.6.26-rc8
637 sshd-4261 0d.s1 30us : trace_preempt_on (__do_softirq) 713 sshd-4261 0d.s1 30us : trace_preempt_on (__do_softirq)
638 714
639 715
640This has some more changes. Preemption was disabled when an interrupt 716This has some more changes. Preemption was disabled when an
641came in (notice the 'h'), and was enabled while doing a softirq. 717interrupt came in (notice the 'h'), and was enabled while doing
642(notice the 's'). But we also see that interrupts have been disabled 718a softirq. (notice the 's'). But we also see that interrupts
643when entering the preempt off section and leaving it (the 'd'). 719have been disabled when entering the preempt off section and
644We do not know if interrupts were enabled in the mean time. 720leaving it (the 'd'). We do not know if interrupts were enabled
721in the mean time.
645 722
646# tracer: preemptoff 723# tracer: preemptoff
647# 724#
@@ -700,28 +777,30 @@ preemptoff latency trace v1.1.5 on 2.6.26-rc8
700 sshd-4261 0d.s1 64us : trace_preempt_on (__do_softirq) 777 sshd-4261 0d.s1 64us : trace_preempt_on (__do_softirq)
701 778
702 779
703The above is an example of the preemptoff trace with ftrace_enabled 780The above is an example of the preemptoff trace with
704set. Here we see that interrupts were disabled the entire time. 781ftrace_enabled set. Here we see that interrupts were disabled
705The irq_enter code lets us know that we entered an interrupt 'h'. 782the entire time. The irq_enter code lets us know that we entered
706Before that, the functions being traced still show that it is not 783an interrupt 'h'. Before that, the functions being traced still
707in an interrupt, but we can see from the functions themselves that 784show that it is not in an interrupt, but we can see from the
708this is not the case. 785functions themselves that this is not the case.
709 786
710Notice that __do_softirq when called does not have a preempt_count. 787Notice that __do_softirq when called does not have a
711It may seem that we missed a preempt enabling. What really happened 788preempt_count. It may seem that we missed a preempt enabling.
712is that the preempt count is held on the thread's stack and we 789What really happened is that the preempt count is held on the
713switched to the softirq stack (4K stacks in effect). The code 790thread's stack and we switched to the softirq stack (4K stacks
714does not copy the preempt count, but because interrupts are disabled, 791in effect). The code does not copy the preempt count, but
715we do not need to worry about it. Having a tracer like this is good 792because interrupts are disabled, we do not need to worry about
716for letting people know what really happens inside the kernel. 793it. Having a tracer like this is good for letting people know
794what really happens inside the kernel.
717 795
718 796
719preemptirqsoff 797preemptirqsoff
720-------------- 798--------------
721 799
722Knowing the locations that have interrupts disabled or preemption 800Knowing the locations that have interrupts disabled or
723disabled for the longest times is helpful. But sometimes we would 801preemption disabled for the longest times is helpful. But
724like to know when either preemption and/or interrupts are disabled. 802sometimes we would like to know when either preemption and/or
803interrupts are disabled.
725 804
726Consider the following code: 805Consider the following code:
727 806
@@ -741,11 +820,13 @@ The preemptoff tracer will record the total length of
741call_function_with_irqs_and_preemption_off() and 820call_function_with_irqs_and_preemption_off() and
742call_function_with_preemption_off(). 821call_function_with_preemption_off().
743 822
744But neither will trace the time that interrupts and/or preemption 823But neither will trace the time that interrupts and/or
745is disabled. This total time is the time that we can not schedule. 824preemption is disabled. This total time is the time that we can
746To record this time, use the preemptirqsoff tracer. 825not schedule. To record this time, use the preemptirqsoff
826tracer.
747 827
748Again, using this trace is much like the irqsoff and preemptoff tracers. 828Again, using this trace is much like the irqsoff and preemptoff
829tracers.
749 830
750 # echo preemptirqsoff > /debug/tracing/current_tracer 831 # echo preemptirqsoff > /debug/tracing/current_tracer
751 # echo 0 > /debug/tracing/tracing_max_latency 832 # echo 0 > /debug/tracing/tracing_max_latency
@@ -781,9 +862,10 @@ preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
781 862
782 863
783The trace_hardirqs_off_thunk is called from assembly on x86 when 864The trace_hardirqs_off_thunk is called from assembly on x86 when
784interrupts are disabled in the assembly code. Without the function 865interrupts are disabled in the assembly code. Without the
785tracing, we do not know if interrupts were enabled within the preemption 866function tracing, we do not know if interrupts were enabled
786points. We do see that it started with preemption enabled. 867within the preemption points. We do see that it started with
868preemption enabled.
787 869
788Here is a trace with ftrace_enabled set: 870Here is a trace with ftrace_enabled set:
789 871
@@ -871,40 +953,42 @@ preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
871 sshd-4261 0d.s1 105us : trace_preempt_on (__do_softirq) 953 sshd-4261 0d.s1 105us : trace_preempt_on (__do_softirq)
872 954
873 955
874This is a very interesting trace. It started with the preemption of 956This is a very interesting trace. It started with the preemption
875the ls task. We see that the task had the "need_resched" bit set 957of the ls task. We see that the task had the "need_resched" bit
876via the 'N' in the trace. Interrupts were disabled before the spin_lock 958set via the 'N' in the trace. Interrupts were disabled before
877at the beginning of the trace. We see that a schedule took place to run 959the spin_lock at the beginning of the trace. We see that a
878sshd. When the interrupts were enabled, we took an interrupt. 960schedule took place to run sshd. When the interrupts were
879On return from the interrupt handler, the softirq ran. We took another 961enabled, we took an interrupt. On return from the interrupt
880interrupt while running the softirq as we see from the capital 'H'. 962handler, the softirq ran. We took another interrupt while
963running the softirq as we see from the capital 'H'.
881 964
882 965
883wakeup 966wakeup
884------ 967------
885 968
886In a Real-Time environment it is very important to know the wakeup 969In a Real-Time environment it is very important to know the
887time it takes for the highest priority task that is woken up to the 970wakeup time it takes for the highest priority task that is woken
888time that it executes. This is also known as "schedule latency". 971up to the time that it executes. This is also known as "schedule
889I stress the point that this is about RT tasks. It is also important 972latency". I stress the point that this is about RT tasks. It is
890to know the scheduling latency of non-RT tasks, but the average 973also important to know the scheduling latency of non-RT tasks,
891schedule latency is better for non-RT tasks. Tools like 974but the average schedule latency is better for non-RT tasks.
892LatencyTop are more appropriate for such measurements. 975Tools like LatencyTop are more appropriate for such
976measurements.
893 977
894Real-Time environments are interested in the worst case latency. 978Real-Time environments are interested in the worst case latency.
895That is the longest latency it takes for something to happen, and 979That is the longest latency it takes for something to happen,
896not the average. We can have a very fast scheduler that may only 980and not the average. We can have a very fast scheduler that may
897have a large latency once in a while, but that would not work well 981only have a large latency once in a while, but that would not
898with Real-Time tasks. The wakeup tracer was designed to record 982work well with Real-Time tasks. The wakeup tracer was designed
899the worst case wakeups of RT tasks. Non-RT tasks are not recorded 983to record the worst case wakeups of RT tasks. Non-RT tasks are
900because the tracer only records one worst case and tracing non-RT 984not recorded because the tracer only records one worst case and
901tasks that are unpredictable will overwrite the worst case latency 985tracing non-RT tasks that are unpredictable will overwrite the
902of RT tasks. 986worst case latency of RT tasks.
903 987
904Since this tracer only deals with RT tasks, we will run this slightly 988Since this tracer only deals with RT tasks, we will run this
905differently than we did with the previous tracers. Instead of performing 989slightly differently than we did with the previous tracers.
906an 'ls', we will run 'sleep 1' under 'chrt' which changes the 990Instead of performing an 'ls', we will run 'sleep 1' under
907priority of the task. 991'chrt' which changes the priority of the task.
908 992
909 # echo wakeup > /debug/tracing/current_tracer 993 # echo wakeup > /debug/tracing/current_tracer
910 # echo 0 > /debug/tracing/tracing_max_latency 994 # echo 0 > /debug/tracing/tracing_max_latency
@@ -934,17 +1018,16 @@ wakeup latency trace v1.1.5 on 2.6.26-rc8
934 <idle>-0 1d..4 4us : schedule (cpu_idle) 1018 <idle>-0 1d..4 4us : schedule (cpu_idle)
935 1019
936 1020
1021Running this on an idle system, we see that it only took 4
1022microseconds to perform the task switch. Note, since the trace
1023marker in the schedule is before the actual "switch", we stop
1024the tracing when the recorded task is about to schedule in. This
1025may change if we add a new marker at the end of the scheduler.
937 1026
938Running this on an idle system, we see that it only took 4 microseconds 1027Notice that the recorded task is 'sleep' with the PID of 4901
939to perform the task switch. Note, since the trace marker in the 1028and it has an rt_prio of 5. This priority is user-space priority
940schedule is before the actual "switch", we stop the tracing when 1029and not the internal kernel priority. The policy is 1 for
941the recorded task is about to schedule in. This may change if 1030SCHED_FIFO and 2 for SCHED_RR.
942we add a new marker at the end of the scheduler.
943
944Notice that the recorded task is 'sleep' with the PID of 4901 and it
945has an rt_prio of 5. This priority is user-space priority and not
946the internal kernel priority. The policy is 1 for SCHED_FIFO and 2
947for SCHED_RR.
948 1031
949Doing the same with chrt -r 5 and ftrace_enabled set. 1032Doing the same with chrt -r 5 and ftrace_enabled set.
950 1033
@@ -1001,24 +1084,25 @@ ksoftirq-7 1d..6 49us : _spin_unlock (tracing_record_cmdline)
1001ksoftirq-7 1d..6 49us : sub_preempt_count (_spin_unlock) 1084ksoftirq-7 1d..6 49us : sub_preempt_count (_spin_unlock)
1002ksoftirq-7 1d..4 50us : schedule (__cond_resched) 1085ksoftirq-7 1d..4 50us : schedule (__cond_resched)
1003 1086
1004The interrupt went off while running ksoftirqd. This task runs at 1087The interrupt went off while running ksoftirqd. This task runs
1005SCHED_OTHER. Why did not we see the 'N' set early? This may be 1088at SCHED_OTHER. Why did not we see the 'N' set early? This may
1006a harmless bug with x86_32 and 4K stacks. On x86_32 with 4K stacks 1089be a harmless bug with x86_32 and 4K stacks. On x86_32 with 4K
1007configured, the interrupt and softirq run with their own stack. 1090stacks configured, the interrupt and softirq run with their own
1008Some information is held on the top of the task's stack (need_resched 1091stack. Some information is held on the top of the task's stack
1009and preempt_count are both stored there). The setting of the NEED_RESCHED 1092(need_resched and preempt_count are both stored there). The
1010bit is done directly to the task's stack, but the reading of the 1093setting of the NEED_RESCHED bit is done directly to the task's
1011NEED_RESCHED is done by looking at the current stack, which in this case 1094stack, but the reading of the NEED_RESCHED is done by looking at
1012is the stack for the hard interrupt. This hides the fact that NEED_RESCHED 1095the current stack, which in this case is the stack for the hard
1013has been set. We do not see the 'N' until we switch back to the task's 1096interrupt. This hides the fact that NEED_RESCHED has been set.
1097We do not see the 'N' until we switch back to the task's
1014assigned stack. 1098assigned stack.
1015 1099
1016function 1100function
1017-------- 1101--------
1018 1102
1019This tracer is the function tracer. Enabling the function tracer 1103This tracer is the function tracer. Enabling the function tracer
1020can be done from the debug file system. Make sure the ftrace_enabled is 1104can be done from the debug file system. Make sure the
1021set; otherwise this tracer is a nop. 1105ftrace_enabled is set; otherwise this tracer is a nop.
1022 1106
1023 # sysctl kernel.ftrace_enabled=1 1107 # sysctl kernel.ftrace_enabled=1
1024 # echo function > /debug/tracing/current_tracer 1108 # echo function > /debug/tracing/current_tracer
@@ -1048,14 +1132,15 @@ set; otherwise this tracer is a nop.
1048[...] 1132[...]
1049 1133
1050 1134
1051Note: function tracer uses ring buffers to store the above entries. 1135Note: function tracer uses ring buffers to store the above
1052The newest data may overwrite the oldest data. Sometimes using echo to 1136entries. The newest data may overwrite the oldest data.
1053stop the trace is not sufficient because the tracing could have overwritten 1137Sometimes using echo to stop the trace is not sufficient because
1054the data that you wanted to record. For this reason, it is sometimes better to 1138the tracing could have overwritten the data that you wanted to
1055disable tracing directly from a program. This allows you to stop the 1139record. For this reason, it is sometimes better to disable
1056tracing at the point that you hit the part that you are interested in. 1140tracing directly from a program. This allows you to stop the
1057To disable the tracing directly from a C program, something like following 1141tracing at the point that you hit the part that you are
1058code snippet can be used: 1142interested in. To disable the tracing directly from a C program,
1143something like following code snippet can be used:
1059 1144
1060int trace_fd; 1145int trace_fd;
1061[...] 1146[...]
@@ -1070,10 +1155,10 @@ int main(int argc, char *argv[]) {
1070} 1155}
1071 1156
1072Note: Here we hard coded the path name. The debugfs mount is not 1157Note: Here we hard coded the path name. The debugfs mount is not
1073guaranteed to be at /debug (and is more commonly at /sys/kernel/debug). 1158guaranteed to be at /debug (and is more commonly at
1074For simple one time traces, the above is sufficent. For anything else, 1159/sys/kernel/debug). For simple one time traces, the above is
1075a search through /proc/mounts may be needed to find where the debugfs 1160sufficent. For anything else, a search through /proc/mounts may
1076file-system is mounted. 1161be needed to find where the debugfs file-system is mounted.
1077 1162
1078 1163
1079Single thread tracing 1164Single thread tracing
@@ -1152,49 +1237,297 @@ int main (int argc, char **argv)
1152 return 0; 1237 return 0;
1153} 1238}
1154 1239
1240
1241hw-branch-tracer (x86 only)
1242---------------------------
1243
1244This tracer uses the x86 last branch tracing hardware feature to
1245collect a branch trace on all cpus with relatively low overhead.
1246
1247The tracer uses a fixed-size circular buffer per cpu and only
1248traces ring 0 branches. The trace file dumps that buffer in the
1249following format:
1250
1251# tracer: hw-branch-tracer
1252#
1253# CPU# TO <- FROM
1254 0 scheduler_tick+0xb5/0x1bf <- task_tick_idle+0x5/0x6
1255 2 run_posix_cpu_timers+0x2b/0x72a <- run_posix_cpu_timers+0x25/0x72a
1256 0 scheduler_tick+0x139/0x1bf <- scheduler_tick+0xed/0x1bf
1257 0 scheduler_tick+0x17c/0x1bf <- scheduler_tick+0x148/0x1bf
1258 2 run_posix_cpu_timers+0x9e/0x72a <- run_posix_cpu_timers+0x5e/0x72a
1259 0 scheduler_tick+0x1b6/0x1bf <- scheduler_tick+0x1aa/0x1bf
1260
1261
1262The tracer may be used to dump the trace for the oops'ing cpu on
1263a kernel oops into the system log. To enable this,
1264ftrace_dump_on_oops must be set. To set ftrace_dump_on_oops, one
1265can either use the sysctl function or set it via the proc system
1266interface.
1267
1268 sysctl kernel.ftrace_dump_on_oops=1
1269
1270or
1271
1272 echo 1 > /proc/sys/kernel/ftrace_dump_on_oops
1273
1274
1275Here's an example of such a dump after a null pointer
1276dereference in a kernel module:
1277
1278[57848.105921] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
1279[57848.106019] IP: [<ffffffffa0000006>] open+0x6/0x14 [oops]
1280[57848.106019] PGD 2354e9067 PUD 2375e7067 PMD 0
1281[57848.106019] Oops: 0002 [#1] SMP
1282[57848.106019] last sysfs file: /sys/devices/pci0000:00/0000:00:1e.0/0000:20:05.0/local_cpus
1283[57848.106019] Dumping ftrace buffer:
1284[57848.106019] ---------------------------------
1285[...]
1286[57848.106019] 0 chrdev_open+0xe6/0x165 <- cdev_put+0x23/0x24
1287[57848.106019] 0 chrdev_open+0x117/0x165 <- chrdev_open+0xfa/0x165
1288[57848.106019] 0 chrdev_open+0x120/0x165 <- chrdev_open+0x11c/0x165
1289[57848.106019] 0 chrdev_open+0x134/0x165 <- chrdev_open+0x12b/0x165
1290[57848.106019] 0 open+0x0/0x14 [oops] <- chrdev_open+0x144/0x165
1291[57848.106019] 0 page_fault+0x0/0x30 <- open+0x6/0x14 [oops]
1292[57848.106019] 0 error_entry+0x0/0x5b <- page_fault+0x4/0x30
1293[57848.106019] 0 error_kernelspace+0x0/0x31 <- error_entry+0x59/0x5b
1294[57848.106019] 0 error_sti+0x0/0x1 <- error_kernelspace+0x2d/0x31
1295[57848.106019] 0 page_fault+0x9/0x30 <- error_sti+0x0/0x1
1296[57848.106019] 0 do_page_fault+0x0/0x881 <- page_fault+0x1a/0x30
1297[...]
1298[57848.106019] 0 do_page_fault+0x66b/0x881 <- is_prefetch+0x1ee/0x1f2
1299[57848.106019] 0 do_page_fault+0x6e0/0x881 <- do_page_fault+0x67a/0x881
1300[57848.106019] 0 oops_begin+0x0/0x96 <- do_page_fault+0x6e0/0x881
1301[57848.106019] 0 trace_hw_branch_oops+0x0/0x2d <- oops_begin+0x9/0x96
1302[...]
1303[57848.106019] 0 ds_suspend_bts+0x2a/0xe3 <- ds_suspend_bts+0x1a/0xe3
1304[57848.106019] ---------------------------------
1305[57848.106019] CPU 0
1306[57848.106019] Modules linked in: oops
1307[57848.106019] Pid: 5542, comm: cat Tainted: G W 2.6.28 #23
1308[57848.106019] RIP: 0010:[<ffffffffa0000006>] [<ffffffffa0000006>] open+0x6/0x14 [oops]
1309[57848.106019] RSP: 0018:ffff880235457d48 EFLAGS: 00010246
1310[...]
1311
1312
1313function graph tracer
1314---------------------------
1315
1316This tracer is similar to the function tracer except that it
1317probes a function on its entry and its exit. This is done by
1318using a dynamically allocated stack of return addresses in each
1319task_struct. On function entry the tracer overwrites the return
1320address of each function traced to set a custom probe. Thus the
1321original return address is stored on the stack of return address
1322in the task_struct.
1323
1324Probing on both ends of a function leads to special features
1325such as:
1326
1327- measure of a function's time execution
1328- having a reliable call stack to draw function calls graph
1329
1330This tracer is useful in several situations:
1331
1332- you want to find the reason of a strange kernel behavior and
1333 need to see what happens in detail on any areas (or specific
1334 ones).
1335
1336- you are experiencing weird latencies but it's difficult to
1337 find its origin.
1338
1339- you want to find quickly which path is taken by a specific
1340 function
1341
1342- you just want to peek inside a working kernel and want to see
1343 what happens there.
1344
1345# tracer: function_graph
1346#
1347# CPU DURATION FUNCTION CALLS
1348# | | | | | | |
1349
1350 0) | sys_open() {
1351 0) | do_sys_open() {
1352 0) | getname() {
1353 0) | kmem_cache_alloc() {
1354 0) 1.382 us | __might_sleep();
1355 0) 2.478 us | }
1356 0) | strncpy_from_user() {
1357 0) | might_fault() {
1358 0) 1.389 us | __might_sleep();
1359 0) 2.553 us | }
1360 0) 3.807 us | }
1361 0) 7.876 us | }
1362 0) | alloc_fd() {
1363 0) 0.668 us | _spin_lock();
1364 0) 0.570 us | expand_files();
1365 0) 0.586 us | _spin_unlock();
1366
1367
1368There are several columns that can be dynamically
1369enabled/disabled. You can use every combination of options you
1370want, depending on your needs.
1371
1372- The cpu number on which the function executed is default
1373 enabled. It is sometimes better to only trace one cpu (see
1374 tracing_cpu_mask file) or you might sometimes see unordered
1375 function calls while cpu tracing switch.
1376
1377 hide: echo nofuncgraph-cpu > /debug/tracing/trace_options
1378 show: echo funcgraph-cpu > /debug/tracing/trace_options
1379
1380- The duration (function's time of execution) is displayed on
1381 the closing bracket line of a function or on the same line
1382 than the current function in case of a leaf one. It is default
1383 enabled.
1384
1385 hide: echo nofuncgraph-duration > /debug/tracing/trace_options
1386 show: echo funcgraph-duration > /debug/tracing/trace_options
1387
1388- The overhead field precedes the duration field in case of
1389 reached duration thresholds.
1390
1391 hide: echo nofuncgraph-overhead > /debug/tracing/trace_options
1392 show: echo funcgraph-overhead > /debug/tracing/trace_options
1393 depends on: funcgraph-duration
1394
1395 ie:
1396
1397 0) | up_write() {
1398 0) 0.646 us | _spin_lock_irqsave();
1399 0) 0.684 us | _spin_unlock_irqrestore();
1400 0) 3.123 us | }
1401 0) 0.548 us | fput();
1402 0) + 58.628 us | }
1403
1404 [...]
1405
1406 0) | putname() {
1407 0) | kmem_cache_free() {
1408 0) 0.518 us | __phys_addr();
1409 0) 1.757 us | }
1410 0) 2.861 us | }
1411 0) ! 115.305 us | }
1412 0) ! 116.402 us | }
1413
1414 + means that the function exceeded 10 usecs.
1415 ! means that the function exceeded 100 usecs.
1416
1417
1418- The task/pid field displays the thread cmdline and pid which
1419 executed the function. It is default disabled.
1420
1421 hide: echo nofuncgraph-proc > /debug/tracing/trace_options
1422 show: echo funcgraph-proc > /debug/tracing/trace_options
1423
1424 ie:
1425
1426 # tracer: function_graph
1427 #
1428 # CPU TASK/PID DURATION FUNCTION CALLS
1429 # | | | | | | | | |
1430 0) sh-4802 | | d_free() {
1431 0) sh-4802 | | call_rcu() {
1432 0) sh-4802 | | __call_rcu() {
1433 0) sh-4802 | 0.616 us | rcu_process_gp_end();
1434 0) sh-4802 | 0.586 us | check_for_new_grace_period();
1435 0) sh-4802 | 2.899 us | }
1436 0) sh-4802 | 4.040 us | }
1437 0) sh-4802 | 5.151 us | }
1438 0) sh-4802 | + 49.370 us | }
1439
1440
1441- The absolute time field is an absolute timestamp given by the
1442 system clock since it started. A snapshot of this time is
1443 given on each entry/exit of functions
1444
1445 hide: echo nofuncgraph-abstime > /debug/tracing/trace_options
1446 show: echo funcgraph-abstime > /debug/tracing/trace_options
1447
1448 ie:
1449
1450 #
1451 # TIME CPU DURATION FUNCTION CALLS
1452 # | | | | | | | |
1453 360.774522 | 1) 0.541 us | }
1454 360.774522 | 1) 4.663 us | }
1455 360.774523 | 1) 0.541 us | __wake_up_bit();
1456 360.774524 | 1) 6.796 us | }
1457 360.774524 | 1) 7.952 us | }
1458 360.774525 | 1) 9.063 us | }
1459 360.774525 | 1) 0.615 us | journal_mark_dirty();
1460 360.774527 | 1) 0.578 us | __brelse();
1461 360.774528 | 1) | reiserfs_prepare_for_journal() {
1462 360.774528 | 1) | unlock_buffer() {
1463 360.774529 | 1) | wake_up_bit() {
1464 360.774529 | 1) | bit_waitqueue() {
1465 360.774530 | 1) 0.594 us | __phys_addr();
1466
1467
1468You can put some comments on specific functions by using
1469trace_printk() For example, if you want to put a comment inside
1470the __might_sleep() function, you just have to include
1471<linux/ftrace.h> and call trace_printk() inside __might_sleep()
1472
1473trace_printk("I'm a comment!\n")
1474
1475will produce:
1476
1477 1) | __might_sleep() {
1478 1) | /* I'm a comment! */
1479 1) 1.449 us | }
1480
1481
1482You might find other useful features for this tracer in the
1483following "dynamic ftrace" section such as tracing only specific
1484functions or tasks.
1485
1155dynamic ftrace 1486dynamic ftrace
1156-------------- 1487--------------
1157 1488
1158If CONFIG_DYNAMIC_FTRACE is set, the system will run with 1489If CONFIG_DYNAMIC_FTRACE is set, the system will run with
1159virtually no overhead when function tracing is disabled. The way 1490virtually no overhead when function tracing is disabled. The way
1160this works is the mcount function call (placed at the start of 1491this works is the mcount function call (placed at the start of
1161every kernel function, produced by the -pg switch in gcc), starts 1492every kernel function, produced by the -pg switch in gcc),
1162of pointing to a simple return. (Enabling FTRACE will include the 1493starts of pointing to a simple return. (Enabling FTRACE will
1163-pg switch in the compiling of the kernel.) 1494include the -pg switch in the compiling of the kernel.)
1164 1495
1165At compile time every C file object is run through the 1496At compile time every C file object is run through the
1166recordmcount.pl script (located in the scripts directory). This 1497recordmcount.pl script (located in the scripts directory). This
1167script will process the C object using objdump to find all the 1498script will process the C object using objdump to find all the
1168locations in the .text section that call mcount. (Note, only 1499locations in the .text section that call mcount. (Note, only the
1169the .text section is processed, since processing other sections 1500.text section is processed, since processing other sections like
1170like .init.text may cause races due to those sections being freed). 1501.init.text may cause races due to those sections being freed).
1171 1502
1172A new section called "__mcount_loc" is created that holds references 1503A new section called "__mcount_loc" is created that holds
1173to all the mcount call sites in the .text section. This section is 1504references to all the mcount call sites in the .text section.
1174compiled back into the original object. The final linker will add 1505This section is compiled back into the original object. The
1175all these references into a single table. 1506final linker will add all these references into a single table.
1176 1507
1177On boot up, before SMP is initialized, the dynamic ftrace code 1508On boot up, before SMP is initialized, the dynamic ftrace code
1178scans this table and updates all the locations into nops. It also 1509scans this table and updates all the locations into nops. It
1179records the locations, which are added to the available_filter_functions 1510also records the locations, which are added to the
1180list. Modules are processed as they are loaded and before they are 1511available_filter_functions list. Modules are processed as they
1181executed. When a module is unloaded, it also removes its functions from 1512are loaded and before they are executed. When a module is
1182the ftrace function list. This is automatic in the module unload 1513unloaded, it also removes its functions from the ftrace function
1183code, and the module author does not need to worry about it. 1514list. This is automatic in the module unload code, and the
1184 1515module author does not need to worry about it.
1185When tracing is enabled, kstop_machine is called to prevent races 1516
1186with the CPUS executing code being modified (which can cause the 1517When tracing is enabled, kstop_machine is called to prevent
1187CPU to do undesireable things), and the nops are patched back 1518races with the CPUS executing code being modified (which can
1188to calls. But this time, they do not call mcount (which is just 1519cause the CPU to do undesireable things), and the nops are
1189a function stub). They now call into the ftrace infrastructure. 1520patched back to calls. But this time, they do not call mcount
1521(which is just a function stub). They now call into the ftrace
1522infrastructure.
1190 1523
1191One special side-effect to the recording of the functions being 1524One special side-effect to the recording of the functions being
1192traced is that we can now selectively choose which functions we 1525traced is that we can now selectively choose which functions we
1193wish to trace and which ones we want the mcount calls to remain as 1526wish to trace and which ones we want the mcount calls to remain
1194nops. 1527as nops.
1195 1528
1196Two files are used, one for enabling and one for disabling the tracing 1529Two files are used, one for enabling and one for disabling the
1197of specified functions. They are: 1530tracing of specified functions. They are:
1198 1531
1199 set_ftrace_filter 1532 set_ftrace_filter
1200 1533
@@ -1202,8 +1535,8 @@ and
1202 1535
1203 set_ftrace_notrace 1536 set_ftrace_notrace
1204 1537
1205A list of available functions that you can add to these files is listed 1538A list of available functions that you can add to these files is
1206in: 1539listed in:
1207 1540
1208 available_filter_functions 1541 available_filter_functions
1209 1542
@@ -1240,8 +1573,8 @@ hrtimer_interrupt
1240sys_nanosleep 1573sys_nanosleep
1241 1574
1242 1575
1243Perhaps this is not enough. The filters also allow simple wild cards. 1576Perhaps this is not enough. The filters also allow simple wild
1244Only the following are currently available 1577cards. Only the following are currently available
1245 1578
1246 <match>* - will match functions that begin with <match> 1579 <match>* - will match functions that begin with <match>
1247 *<match> - will match functions that end with <match> 1580 *<match> - will match functions that end with <match>
@@ -1251,9 +1584,9 @@ These are the only wild cards which are supported.
1251 1584
1252 <match>*<match> will not work. 1585 <match>*<match> will not work.
1253 1586
1254Note: It is better to use quotes to enclose the wild cards, otherwise 1587Note: It is better to use quotes to enclose the wild cards,
1255 the shell may expand the parameters into names of files in the local 1588 otherwise the shell may expand the parameters into names
1256 directory. 1589 of files in the local directory.
1257 1590
1258 # echo 'hrtimer_*' > /debug/tracing/set_ftrace_filter 1591 # echo 'hrtimer_*' > /debug/tracing/set_ftrace_filter
1259 1592
@@ -1299,7 +1632,8 @@ This is because the '>' and '>>' act just like they do in bash.
1299To rewrite the filters, use '>' 1632To rewrite the filters, use '>'
1300To append to the filters, use '>>' 1633To append to the filters, use '>>'
1301 1634
1302To clear out a filter so that all functions will be recorded again: 1635To clear out a filter so that all functions will be recorded
1636again:
1303 1637
1304 # echo > /debug/tracing/set_ftrace_filter 1638 # echo > /debug/tracing/set_ftrace_filter
1305 # cat /debug/tracing/set_ftrace_filter 1639 # cat /debug/tracing/set_ftrace_filter
@@ -1331,7 +1665,8 @@ hrtimer_get_res
1331hrtimer_init_sleeper 1665hrtimer_init_sleeper
1332 1666
1333 1667
1334The set_ftrace_notrace prevents those functions from being traced. 1668The set_ftrace_notrace prevents those functions from being
1669traced.
1335 1670
1336 # echo '*preempt*' '*lock*' > /debug/tracing/set_ftrace_notrace 1671 # echo '*preempt*' '*lock*' > /debug/tracing/set_ftrace_notrace
1337 1672
@@ -1353,13 +1688,75 @@ Produces:
1353 1688
1354We can see that there's no more lock or preempt tracing. 1689We can see that there's no more lock or preempt tracing.
1355 1690
1691
1692Dynamic ftrace with the function graph tracer
1693---------------------------------------------
1694
1695Although what has been explained above concerns both the
1696function tracer and the function-graph-tracer, there are some
1697special features only available in the function-graph tracer.
1698
1699If you want to trace only one function and all of its children,
1700you just have to echo its name into set_graph_function:
1701
1702 echo __do_fault > set_graph_function
1703
1704will produce the following "expanded" trace of the __do_fault()
1705function:
1706
1707 0) | __do_fault() {
1708 0) | filemap_fault() {
1709 0) | find_lock_page() {
1710 0) 0.804 us | find_get_page();
1711 0) | __might_sleep() {
1712 0) 1.329 us | }
1713 0) 3.904 us | }
1714 0) 4.979 us | }
1715 0) 0.653 us | _spin_lock();
1716 0) 0.578 us | page_add_file_rmap();
1717 0) 0.525 us | native_set_pte_at();
1718 0) 0.585 us | _spin_unlock();
1719 0) | unlock_page() {
1720 0) 0.541 us | page_waitqueue();
1721 0) 0.639 us | __wake_up_bit();
1722 0) 2.786 us | }
1723 0) + 14.237 us | }
1724 0) | __do_fault() {
1725 0) | filemap_fault() {
1726 0) | find_lock_page() {
1727 0) 0.698 us | find_get_page();
1728 0) | __might_sleep() {
1729 0) 1.412 us | }
1730 0) 3.950 us | }
1731 0) 5.098 us | }
1732 0) 0.631 us | _spin_lock();
1733 0) 0.571 us | page_add_file_rmap();
1734 0) 0.526 us | native_set_pte_at();
1735 0) 0.586 us | _spin_unlock();
1736 0) | unlock_page() {
1737 0) 0.533 us | page_waitqueue();
1738 0) 0.638 us | __wake_up_bit();
1739 0) 2.793 us | }
1740 0) + 14.012 us | }
1741
1742You can also expand several functions at once:
1743
1744 echo sys_open > set_graph_function
1745 echo sys_close >> set_graph_function
1746
1747Now if you want to go back to trace all functions you can clear
1748this special filter via:
1749
1750 echo > set_graph_function
1751
1752
1356trace_pipe 1753trace_pipe
1357---------- 1754----------
1358 1755
1359The trace_pipe outputs the same content as the trace file, but the effect 1756The trace_pipe outputs the same content as the trace file, but
1360on the tracing is different. Every read from trace_pipe is consumed. 1757the effect on the tracing is different. Every read from
1361This means that subsequent reads will be different. The trace 1758trace_pipe is consumed. This means that subsequent reads will be
1362is live. 1759different. The trace is live.
1363 1760
1364 # echo function > /debug/tracing/current_tracer 1761 # echo function > /debug/tracing/current_tracer
1365 # cat /debug/tracing/trace_pipe > /tmp/trace.out & 1762 # cat /debug/tracing/trace_pipe > /tmp/trace.out &
@@ -1387,38 +1784,45 @@ is live.
1387 bash-4043 [00] 41.267111: select_task_rq_rt <-try_to_wake_up 1784 bash-4043 [00] 41.267111: select_task_rq_rt <-try_to_wake_up
1388 1785
1389 1786
1390Note, reading the trace_pipe file will block until more input is added. 1787Note, reading the trace_pipe file will block until more input is
1391By changing the tracer, trace_pipe will issue an EOF. We needed 1788added. By changing the tracer, trace_pipe will issue an EOF. We
1392to set the function tracer _before_ we "cat" the trace_pipe file. 1789needed to set the function tracer _before_ we "cat" the
1790trace_pipe file.
1393 1791
1394 1792
1395trace entries 1793trace entries
1396------------- 1794-------------
1397 1795
1398Having too much or not enough data can be troublesome in diagnosing 1796Having too much or not enough data can be troublesome in
1399an issue in the kernel. The file buffer_size_kb is used to modify 1797diagnosing an issue in the kernel. The file buffer_size_kb is
1400the size of the internal trace buffers. The number listed 1798used to modify the size of the internal trace buffers. The
1401is the number of entries that can be recorded per CPU. To know 1799number listed is the number of entries that can be recorded per
1402the full size, multiply the number of possible CPUS with the 1800CPU. To know the full size, multiply the number of possible CPUS
1403number of entries. 1801with the number of entries.
1404 1802
1405 # cat /debug/tracing/buffer_size_kb 1803 # cat /debug/tracing/buffer_size_kb
14061408 (units kilobytes) 18041408 (units kilobytes)
1407 1805
1408Note, to modify this, you must have tracing completely disabled. To do that, 1806Note, to modify this, you must have tracing completely disabled.
1409echo "nop" into the current_tracer. If the current_tracer is not set 1807To do that, echo "nop" into the current_tracer. If the
1410to "nop", an EINVAL error will be returned. 1808current_tracer is not set to "nop", an EINVAL error will be
1809returned.
1411 1810
1412 # echo nop > /debug/tracing/current_tracer 1811 # echo nop > /debug/tracing/current_tracer
1413 # echo 10000 > /debug/tracing/buffer_size_kb 1812 # echo 10000 > /debug/tracing/buffer_size_kb
1414 # cat /debug/tracing/buffer_size_kb 1813 # cat /debug/tracing/buffer_size_kb
141510000 (units kilobytes) 181410000 (units kilobytes)
1416 1815
1417The number of pages which will be allocated is limited to a percentage 1816The number of pages which will be allocated is limited to a
1418of available memory. Allocating too much will produce an error. 1817percentage of available memory. Allocating too much will produce
1818an error.
1419 1819
1420 # echo 1000000000000 > /debug/tracing/buffer_size_kb 1820 # echo 1000000000000 > /debug/tracing/buffer_size_kb
1421-bash: echo: write error: Cannot allocate memory 1821-bash: echo: write error: Cannot allocate memory
1422 # cat /debug/tracing/buffer_size_kb 1822 # cat /debug/tracing/buffer_size_kb
142385 182385
1424 1824
1825-----------
1826
1827More details can be found in the source code, in the
1828kernel/tracing/*.c files.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 240257dd4238..ebdeb7c4330e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -50,6 +50,7 @@ parameter is applicable:
50 ISAPNP ISA PnP code is enabled. 50 ISAPNP ISA PnP code is enabled.
51 ISDN Appropriate ISDN support is enabled. 51 ISDN Appropriate ISDN support is enabled.
52 JOY Appropriate joystick support is enabled. 52 JOY Appropriate joystick support is enabled.
53 KMEMTRACE kmemtrace is enabled.
53 LIBATA Libata driver is enabled 54 LIBATA Libata driver is enabled
54 LP Printer support is enabled. 55 LP Printer support is enabled.
55 LOOP Loopback device support is enabled. 56 LOOP Loopback device support is enabled.
@@ -1078,6 +1079,15 @@ and is between 256 and 4096 characters. It is defined in the file
1078 use the HighMem zone if it exists, and the Normal 1079 use the HighMem zone if it exists, and the Normal
1079 zone if it does not. 1080 zone if it does not.
1080 1081
1082 kmemtrace.enable= [KNL,KMEMTRACE] Format: { yes | no }
1083 Controls whether kmemtrace is enabled
1084 at boot-time.
1085
1086 kmemtrace.subbufs=n [KNL,KMEMTRACE] Overrides the number of
1087 subbufs kmemtrace's relay channel has. Set this
1088 higher than default (KMEMTRACE_N_SUBBUFS in code) if
1089 you experience buffer overruns.
1090
1081 movablecore=nn[KMG] [KNL,X86-32,IA-64,PPC,X86-64] This parameter 1091 movablecore=nn[KMG] [KNL,X86-32,IA-64,PPC,X86-64] This parameter
1082 is similar to kernelcore except it specifies the 1092 is similar to kernelcore except it specifies the
1083 amount of memory used for migratable allocations. 1093 amount of memory used for migratable allocations.
@@ -2362,6 +2372,8 @@ and is between 256 and 4096 characters. It is defined in the file
2362 2372
2363 tp720= [HW,PS2] 2373 tp720= [HW,PS2]
2364 2374
2375 trace_buf_size=nn[KMG] [ftrace] will set tracing buffer size.
2376
2365 trix= [HW,OSS] MediaTrix AudioTrix Pro 2377 trix= [HW,OSS] MediaTrix AudioTrix Pro
2366 Format: 2378 Format:
2367 <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq> 2379 <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq>
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index afa2946892da..cf42b820ff9d 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -115,6 +115,8 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
115 115
116'x' - Used by xmon interface on ppc/powerpc platforms. 116'x' - Used by xmon interface on ppc/powerpc platforms.
117 117
118'z' - Dump the ftrace buffer
119
118'0'-'9' - Sets the console log level, controlling which kernel messages 120'0'-'9' - Sets the console log level, controlling which kernel messages
119 will be printed to your console. ('0', for example would make 121 will be printed to your console. ('0', for example would make
120 it so that only emergency messages like PANICs or OOPSes would 122 it so that only emergency messages like PANICs or OOPSes would
diff --git a/Documentation/tracepoints.txt b/Documentation/tracepoints.txt
index 6f0a044f5b5e..c0e1ceed75a4 100644
--- a/Documentation/tracepoints.txt
+++ b/Documentation/tracepoints.txt
@@ -45,8 +45,8 @@ In include/trace/subsys.h :
45#include <linux/tracepoint.h> 45#include <linux/tracepoint.h>
46 46
47DECLARE_TRACE(subsys_eventname, 47DECLARE_TRACE(subsys_eventname,
48 TPPROTO(int firstarg, struct task_struct *p), 48 TP_PROTO(int firstarg, struct task_struct *p),
49 TPARGS(firstarg, p)); 49 TP_ARGS(firstarg, p));
50 50
51In subsys/file.c (where the tracing statement must be added) : 51In subsys/file.c (where the tracing statement must be added) :
52 52
@@ -66,10 +66,10 @@ Where :
66 - subsys is the name of your subsystem. 66 - subsys is the name of your subsystem.
67 - eventname is the name of the event to trace. 67 - eventname is the name of the event to trace.
68 68
69- TPPROTO(int firstarg, struct task_struct *p) is the prototype of the 69- TP_PROTO(int firstarg, struct task_struct *p) is the prototype of the
70 function called by this tracepoint. 70 function called by this tracepoint.
71 71
72- TPARGS(firstarg, p) are the parameters names, same as found in the 72- TP_ARGS(firstarg, p) are the parameters names, same as found in the
73 prototype. 73 prototype.
74 74
75Connecting a function (probe) to a tracepoint is done by providing a 75Connecting a function (probe) to a tracepoint is done by providing a
@@ -103,13 +103,14 @@ used to export the defined tracepoints.
103 103
104* Probe / tracepoint example 104* Probe / tracepoint example
105 105
106See the example provided in samples/tracepoints/src 106See the example provided in samples/tracepoints
107 107
108Compile them with your kernel. 108Compile them with your kernel. They are built during 'make' (not
109'make modules') when CONFIG_SAMPLE_TRACEPOINTS=m.
109 110
110Run, as root : 111Run, as root :
111modprobe tracepoint-example (insmod order is not important) 112modprobe tracepoint-sample (insmod order is not important)
112modprobe tracepoint-probe-example 113modprobe tracepoint-probe-sample
113cat /proc/tracepoint-example (returns an expected error) 114cat /proc/tracepoint-sample (returns an expected error)
114rmmod tracepoint-example tracepoint-probe-example 115rmmod tracepoint-sample tracepoint-probe-sample
115dmesg 116dmesg
diff --git a/Documentation/vm/kmemtrace.txt b/Documentation/vm/kmemtrace.txt
new file mode 100644
index 000000000000..a956d9b7f943
--- /dev/null
+++ b/Documentation/vm/kmemtrace.txt
@@ -0,0 +1,126 @@
1 kmemtrace - Kernel Memory Tracer
2
3 by Eduard - Gabriel Munteanu
4 <eduard.munteanu@linux360.ro>
5
6I. Introduction
7===============
8
9kmemtrace helps kernel developers figure out two things:
101) how different allocators (SLAB, SLUB etc.) perform
112) how kernel code allocates memory and how much
12
13To do this, we trace every allocation and export information to the userspace
14through the relay interface. We export things such as the number of requested
15bytes, the number of bytes actually allocated (i.e. including internal
16fragmentation), whether this is a slab allocation or a plain kmalloc() and so
17on.
18
19The actual analysis is performed by a userspace tool (see section III for
20details on where to get it from). It logs the data exported by the kernel,
21processes it and (as of writing this) can provide the following information:
22- the total amount of memory allocated and fragmentation per call-site
23- the amount of memory allocated and fragmentation per allocation
24- total memory allocated and fragmentation in the collected dataset
25- number of cross-CPU allocation and frees (makes sense in NUMA environments)
26
27Moreover, it can potentially find inconsistent and erroneous behavior in
28kernel code, such as using slab free functions on kmalloc'ed memory or
29allocating less memory than requested (but not truly failed allocations).
30
31kmemtrace also makes provisions for tracing on some arch and analysing the
32data on another.
33
34II. Design and goals
35====================
36
37kmemtrace was designed to handle rather large amounts of data. Thus, it uses
38the relay interface to export whatever is logged to userspace, which then
39stores it. Analysis and reporting is done asynchronously, that is, after the
40data is collected and stored. By design, it allows one to log and analyse
41on different machines and different arches.
42
43As of writing this, the ABI is not considered stable, though it might not
44change much. However, no guarantees are made about compatibility yet. When
45deemed stable, the ABI should still allow easy extension while maintaining
46backward compatibility. This is described further in Documentation/ABI.
47
48Summary of design goals:
49 - allow logging and analysis to be done across different machines
50 - be fast and anticipate usage in high-load environments (*)
51 - be reasonably extensible
52 - make it possible for GNU/Linux distributions to have kmemtrace
53 included in their repositories
54
55(*) - one of the reasons Pekka Enberg's original userspace data analysis
56 tool's code was rewritten from Perl to C (although this is more than a
57 simple conversion)
58
59
60III. Quick usage guide
61======================
62
631) Get a kernel that supports kmemtrace and build it accordingly (i.e. enable
64CONFIG_KMEMTRACE).
65
662) Get the userspace tool and build it:
67$ git-clone git://repo.or.cz/kmemtrace-user.git # current repository
68$ cd kmemtrace-user/
69$ ./autogen.sh
70$ ./configure
71$ make
72
733) Boot the kmemtrace-enabled kernel if you haven't, preferably in the
74'single' runlevel (so that relay buffers don't fill up easily), and run
75kmemtrace:
76# '$' does not mean user, but root here.
77$ mount -t debugfs none /sys/kernel/debug
78$ mount -t proc none /proc
79$ cd path/to/kmemtrace-user/
80$ ./kmemtraced
81Wait a bit, then stop it with CTRL+C.
82$ cat /sys/kernel/debug/kmemtrace/total_overruns # Check if we didn't
83 # overrun, should
84 # be zero.
85$ (Optionally) [Run kmemtrace_check separately on each cpu[0-9]*.out file to
86 check its correctness]
87$ ./kmemtrace-report
88
89Now you should have a nice and short summary of how the allocator performs.
90
91IV. FAQ and known issues
92========================
93
94Q: 'cat /sys/kernel/debug/kmemtrace/total_overruns' is non-zero, how do I fix
95this? Should I worry?
96A: If it's non-zero, this affects kmemtrace's accuracy, depending on how
97large the number is. You can fix it by supplying a higher
98'kmemtrace.subbufs=N' kernel parameter.
99---
100
101Q: kmemtrace_check reports errors, how do I fix this? Should I worry?
102A: This is a bug and should be reported. It can occur for a variety of
103reasons:
104 - possible bugs in relay code
105 - possible misuse of relay by kmemtrace
106 - timestamps being collected unorderly
107Or you may fix it yourself and send us a patch.
108---
109
110Q: kmemtrace_report shows many errors, how do I fix this? Should I worry?
111A: This is a known issue and I'm working on it. These might be true errors
112in kernel code, which may have inconsistent behavior (e.g. allocating memory
113with kmem_cache_alloc() and freeing it with kfree()). Pekka Enberg pointed
114out this behavior may work with SLAB, but may fail with other allocators.
115
116It may also be due to lack of tracing in some unusual allocator functions.
117
118We don't want bug reports regarding this issue yet.
119---
120
121V. See also
122===========
123
124Documentation/kernel-parameters.txt
125Documentation/ABI/testing/debugfs-kmemtrace
126
diff --git a/MAINTAINERS b/MAINTAINERS
index 068f5fb90020..25a17b49cfe1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2642,6 +2642,12 @@ M: jason.wessel@windriver.com
2642L: kgdb-bugreport@lists.sourceforge.net 2642L: kgdb-bugreport@lists.sourceforge.net
2643S: Maintained 2643S: Maintained
2644 2644
2645KMEMTRACE
2646P: Eduard - Gabriel Munteanu
2647M: eduard.munteanu@linux360.ro
2648L: linux-kernel@vger.kernel.org
2649S: Maintained
2650
2645KPROBES 2651KPROBES
2646P: Ananth N Mavinakayanahalli 2652P: Ananth N Mavinakayanahalli
2647M: ananth@in.ibm.com 2653M: ananth@in.ibm.com
diff --git a/arch/Kconfig b/arch/Kconfig
index 830c16a2b801..dc81b34c5d82 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -6,6 +6,7 @@ config OPROFILE
6 tristate "OProfile system profiling (EXPERIMENTAL)" 6 tristate "OProfile system profiling (EXPERIMENTAL)"
7 depends on PROFILING 7 depends on PROFILING
8 depends on HAVE_OPROFILE 8 depends on HAVE_OPROFILE
9 depends on TRACING_SUPPORT
9 select TRACING 10 select TRACING
10 select RING_BUFFER 11 select RING_BUFFER
11 help 12 help
diff --git a/arch/alpha/include/asm/ftrace.h b/arch/alpha/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/alpha/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/alpha/include/asm/hardirq.h b/arch/alpha/include/asm/hardirq.h
index d953e234daa8..88971460fa6c 100644
--- a/arch/alpha/include/asm/hardirq.h
+++ b/arch/alpha/include/asm/hardirq.h
@@ -14,17 +14,4 @@ typedef struct {
14 14
15void ack_bad_irq(unsigned int irq); 15void ack_bad_irq(unsigned int irq);
16 16
17#define HARDIRQ_BITS 12
18
19/*
20 * The hardirq mask has to be large enough to have
21 * space for potentially nestable IRQ sources in the system
22 * to nest on a single CPU. On Alpha, interrupts are masked at the CPU
23 * by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode)
24 * so we really only have 8 nestable IRQs, but allow some overhead
25 */
26#if (1 << HARDIRQ_BITS) < 16
27#error HARDIRQ_BITS is too low!
28#endif
29
30#endif /* _ALPHA_HARDIRQ_H */ 17#endif /* _ALPHA_HARDIRQ_H */
diff --git a/arch/avr32/include/asm/ftrace.h b/arch/avr32/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/avr32/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/avr32/include/asm/hardirq.h b/arch/avr32/include/asm/hardirq.h
index 267354356f60..015bc75ea798 100644
--- a/arch/avr32/include/asm/hardirq.h
+++ b/arch/avr32/include/asm/hardirq.h
@@ -20,15 +20,4 @@ void ack_bad_irq(unsigned int irq);
20 20
21#endif /* __ASSEMBLY__ */ 21#endif /* __ASSEMBLY__ */
22 22
23#define HARDIRQ_BITS 12
24
25/*
26 * The hardirq mask has to be large enough to have
27 * space for potentially all IRQ sources in the system
28 * nesting on a single CPU:
29 */
30#if (1 << HARDIRQ_BITS) < NR_IRQS
31# error HARDIRQ_BITS is too low!
32#endif
33
34#endif /* __ASM_AVR32_HARDIRQ_H */ 23#endif /* __ASM_AVR32_HARDIRQ_H */
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/blackfin/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/cris/include/asm/ftrace.h b/arch/cris/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/cris/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/h8300/include/asm/ftrace.h b/arch/h8300/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/h8300/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 153e727a6e8e..294a3b13ecac 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -22,6 +22,9 @@ config IA64
22 select HAVE_OPROFILE 22 select HAVE_OPROFILE
23 select HAVE_KPROBES 23 select HAVE_KPROBES
24 select HAVE_KRETPROBES 24 select HAVE_KRETPROBES
25 select HAVE_FTRACE_MCOUNT_RECORD
26 select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
27 select HAVE_FUNCTION_TRACER
25 select HAVE_DMA_ATTRS 28 select HAVE_DMA_ATTRS
26 select HAVE_KVM 29 select HAVE_KVM
27 select HAVE_ARCH_TRACEHOOK 30 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
new file mode 100644
index 000000000000..d20db3c2a656
--- /dev/null
+++ b/arch/ia64/include/asm/ftrace.h
@@ -0,0 +1,28 @@
1#ifndef _ASM_IA64_FTRACE_H
2#define _ASM_IA64_FTRACE_H
3
4#ifdef CONFIG_FUNCTION_TRACER
5#define MCOUNT_INSN_SIZE 32 /* sizeof mcount call */
6
7#ifndef __ASSEMBLY__
8extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
9#define mcount _mcount
10
11#include <asm/kprobes.h>
12/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
13#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
14#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
15
16static inline unsigned long ftrace_call_adjust(unsigned long addr)
17{
18 /* second bundle, insn 2 */
19 return addr - 0x12;
20}
21
22struct dyn_arch_ftrace {
23};
24#endif
25
26#endif /* CONFIG_FUNCTION_TRACER */
27
28#endif /* _ASM_IA64_FTRACE_H */
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
index 140e495b8e0e..d514cd9edb49 100644
--- a/arch/ia64/include/asm/hardirq.h
+++ b/arch/ia64/include/asm/hardirq.h
@@ -20,16 +20,6 @@
20 20
21#define local_softirq_pending() (local_cpu_data->softirq_pending) 21#define local_softirq_pending() (local_cpu_data->softirq_pending)
22 22
23#define HARDIRQ_BITS 14
24
25/*
26 * The hardirq mask has to be large enough to have space for potentially all IRQ sources
27 * in the system nesting on a single CPU:
28 */
29#if (1 << HARDIRQ_BITS) < NR_IRQS
30# error HARDIRQ_BITS is too low!
31#endif
32
33extern void __iomem *ipi_base_addr; 23extern void __iomem *ipi_base_addr;
34 24
35void ack_bad_irq(unsigned int irq); 25void ack_bad_irq(unsigned int irq);
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index f2778f2c4fd9..dc62df021673 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -2,6 +2,10 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5ifdef CONFIG_DYNAMIC_FTRACE
6CFLAGS_REMOVE_ftrace.o = -pg
7endif
8
5extra-y := head.o init_task.o vmlinux.lds 9extra-y := head.o init_task.o vmlinux.lds
6 10
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 11obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
@@ -28,6 +32,7 @@ obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
28obj-$(CONFIG_CPU_FREQ) += cpufreq/ 32obj-$(CONFIG_CPU_FREQ) += cpufreq/
29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 33obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 34obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
35obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
31obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o 36obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
32obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 37obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
33obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 38obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index e5341e2c1175..7e3382b06d56 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -47,6 +47,7 @@
47#include <asm/processor.h> 47#include <asm/processor.h>
48#include <asm/thread_info.h> 48#include <asm/thread_info.h>
49#include <asm/unistd.h> 49#include <asm/unistd.h>
50#include <asm/ftrace.h>
50 51
51#include "minstate.h" 52#include "minstate.h"
52 53
@@ -1404,6 +1405,105 @@ GLOBAL_ENTRY(unw_init_running)
1404 br.ret.sptk.many rp 1405 br.ret.sptk.many rp
1405END(unw_init_running) 1406END(unw_init_running)
1406 1407
1408#ifdef CONFIG_FUNCTION_TRACER
1409#ifdef CONFIG_DYNAMIC_FTRACE
1410GLOBAL_ENTRY(_mcount)
1411 br ftrace_stub
1412END(_mcount)
1413
1414.here:
1415 br.ret.sptk.many b0
1416
1417GLOBAL_ENTRY(ftrace_caller)
1418 alloc out0 = ar.pfs, 8, 0, 4, 0
1419 mov out3 = r0
1420 ;;
1421 mov out2 = b0
1422 add r3 = 0x20, r3
1423 mov out1 = r1;
1424 br.call.sptk.many b0 = ftrace_patch_gp
1425 //this might be called from module, so we must patch gp
1426ftrace_patch_gp:
1427 movl gp=__gp
1428 mov b0 = r3
1429 ;;
1430.global ftrace_call;
1431ftrace_call:
1432{
1433 .mlx
1434 nop.m 0x0
1435 movl r3 = .here;;
1436}
1437 alloc loc0 = ar.pfs, 4, 4, 2, 0
1438 ;;
1439 mov loc1 = b0
1440 mov out0 = b0
1441 mov loc2 = r8
1442 mov loc3 = r15
1443 ;;
1444 adds out0 = -MCOUNT_INSN_SIZE, out0
1445 mov out1 = in2
1446 mov b6 = r3
1447
1448 br.call.sptk.many b0 = b6
1449 ;;
1450 mov ar.pfs = loc0
1451 mov b0 = loc1
1452 mov r8 = loc2
1453 mov r15 = loc3
1454 br ftrace_stub
1455 ;;
1456END(ftrace_caller)
1457
1458#else
1459GLOBAL_ENTRY(_mcount)
1460 movl r2 = ftrace_stub
1461 movl r3 = ftrace_trace_function;;
1462 ld8 r3 = [r3];;
1463 ld8 r3 = [r3];;
1464 cmp.eq p7,p0 = r2, r3
1465(p7) br.sptk.many ftrace_stub
1466 ;;
1467
1468 alloc loc0 = ar.pfs, 4, 4, 2, 0
1469 ;;
1470 mov loc1 = b0
1471 mov out0 = b0
1472 mov loc2 = r8
1473 mov loc3 = r15
1474 ;;
1475 adds out0 = -MCOUNT_INSN_SIZE, out0
1476 mov out1 = in2
1477 mov b6 = r3
1478
1479 br.call.sptk.many b0 = b6
1480 ;;
1481 mov ar.pfs = loc0
1482 mov b0 = loc1
1483 mov r8 = loc2
1484 mov r15 = loc3
1485 br ftrace_stub
1486 ;;
1487END(_mcount)
1488#endif
1489
1490GLOBAL_ENTRY(ftrace_stub)
1491 mov r3 = b0
1492 movl r2 = _mcount_ret_helper
1493 ;;
1494 mov b6 = r2
1495 mov b7 = r3
1496 br.ret.sptk.many b6
1497
1498_mcount_ret_helper:
1499 mov b0 = r42
1500 mov r1 = r41
1501 mov ar.pfs = r40
1502 br b7
1503END(ftrace_stub)
1504
1505#endif /* CONFIG_FUNCTION_TRACER */
1506
1407 .rodata 1507 .rodata
1408 .align 8 1508 .align 8
1409 .globl sys_call_table 1509 .globl sys_call_table
diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c
new file mode 100644
index 000000000000..7fc8c961b1f7
--- /dev/null
+++ b/arch/ia64/kernel/ftrace.c
@@ -0,0 +1,206 @@
1/*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com>
5 *
6 * For licencing details, see COPYING.
7 *
8 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are
11 * enabled. All code mutation routines here take effect atomically.
12 */
13
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16
17#include <asm/cacheflush.h>
18#include <asm/patch.h>
19
20/* In IA64, each function will be added below two bundles with -pg option */
21static unsigned char __attribute__((aligned(8)))
22ftrace_orig_code[MCOUNT_INSN_SIZE] = {
23 0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
24 0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */
25 0x05, 0x00, 0xc4, 0x00, /* mov r42=b0 */
26 0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */
27 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
28 0x08, 0x00, 0x00, 0x50 /* br.call.sptk.many b0 = _mcount;; */
29};
30
31struct ftrace_orig_insn {
32 u64 dummy1, dummy2, dummy3;
33 u64 dummy4:64-41+13;
34 u64 imm20:20;
35 u64 dummy5:3;
36 u64 sign:1;
37 u64 dummy6:4;
38};
39
40/* mcount stub will be converted below for nop */
41static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = {
42 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
43 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
44 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */
45 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
46 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */
47 0x00, 0x00, 0x04, 0x00
48};
49
50static unsigned char *ftrace_nop_replace(void)
51{
52 return ftrace_nop_code;
53}
54
55/*
56 * mcount stub will be converted below for call
57 * Note: Just the last instruction is changed against nop
58 * */
59static unsigned char __attribute__((aligned(8)))
60ftrace_call_code[MCOUNT_INSN_SIZE] = {
61 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
62 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
63 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */
64 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
65 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/
66 0xf8, 0xff, 0xff, 0xc8
67};
68
69struct ftrace_call_insn {
70 u64 dummy1, dummy2;
71 u64 dummy3:48;
72 u64 imm39_l:16;
73 u64 imm39_h:23;
74 u64 dummy4:13;
75 u64 imm20:20;
76 u64 dummy5:3;
77 u64 i:1;
78 u64 dummy6:4;
79};
80
81static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
82{
83 struct ftrace_call_insn *code = (void *)ftrace_call_code;
84 unsigned long offset = addr - (ip + 0x10);
85
86 code->imm39_l = offset >> 24;
87 code->imm39_h = offset >> 40;
88 code->imm20 = offset >> 4;
89 code->i = offset >> 63;
90 return ftrace_call_code;
91}
92
93static int
94ftrace_modify_code(unsigned long ip, unsigned char *old_code,
95 unsigned char *new_code, int do_check)
96{
97 unsigned char replaced[MCOUNT_INSN_SIZE];
98
99 /*
100 * Note: Due to modules and __init, code can
101 * disappear and change, we need to protect against faulting
102 * as well as code changing. We do this by using the
103 * probe_kernel_* functions.
104 *
105 * No real locking needed, this code is run through
106 * kstop_machine, or before SMP starts.
107 */
108
109 if (!do_check)
110 goto skip_check;
111
112 /* read the text we want to modify */
113 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
114 return -EFAULT;
115
116 /* Make sure it is what we expect it to be */
117 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
118 return -EINVAL;
119
120skip_check:
121 /* replace the text with the new text */
122 if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE))
123 return -EPERM;
124 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
125
126 return 0;
127}
128
129static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr)
130{
131 unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE];
132 unsigned long ip = rec->ip;
133
134 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
135 return -EFAULT;
136 if (rec->flags & FTRACE_FL_CONVERTED) {
137 struct ftrace_call_insn *call_insn, *tmp_call;
138
139 call_insn = (void *)ftrace_call_code;
140 tmp_call = (void *)replaced;
141 call_insn->imm39_l = tmp_call->imm39_l;
142 call_insn->imm39_h = tmp_call->imm39_h;
143 call_insn->imm20 = tmp_call->imm20;
144 call_insn->i = tmp_call->i;
145 if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0)
146 return -EINVAL;
147 return 0;
148 } else {
149 struct ftrace_orig_insn *call_insn, *tmp_call;
150
151 call_insn = (void *)ftrace_orig_code;
152 tmp_call = (void *)replaced;
153 call_insn->sign = tmp_call->sign;
154 call_insn->imm20 = tmp_call->imm20;
155 if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0)
156 return -EINVAL;
157 return 0;
158 }
159}
160
161int ftrace_make_nop(struct module *mod,
162 struct dyn_ftrace *rec, unsigned long addr)
163{
164 int ret;
165 char *new;
166
167 ret = ftrace_make_nop_check(rec, addr);
168 if (ret)
169 return ret;
170 new = ftrace_nop_replace();
171 return ftrace_modify_code(rec->ip, NULL, new, 0);
172}
173
174int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
175{
176 unsigned long ip = rec->ip;
177 unsigned char *old, *new;
178
179 old= ftrace_nop_replace();
180 new = ftrace_call_replace(ip, addr);
181 return ftrace_modify_code(ip, old, new, 1);
182}
183
184/* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */
185int ftrace_update_ftrace_func(ftrace_func_t func)
186{
187 unsigned long ip;
188 unsigned long addr = ((struct fnptr *)ftrace_call)->ip;
189
190 if (func == ftrace_stub)
191 return 0;
192 ip = ((struct fnptr *)func)->ip;
193
194 ia64_patch_imm64(addr + 2, ip);
195
196 flush_icache_range(addr, addr + 16);
197 return 0;
198}
199
200/* run from kstop_machine */
201int __init ftrace_dyn_arch_init(void *data)
202{
203 *(unsigned long *)data = 0;
204
205 return 0;
206}
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 6da1f20d7372..2d311864e359 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -112,3 +112,9 @@ EXPORT_SYMBOL_GPL(esi_call_phys);
112#endif 112#endif
113extern char ia64_ivt[]; 113extern char ia64_ivt[];
114EXPORT_SYMBOL(ia64_ivt); 114EXPORT_SYMBOL(ia64_ivt);
115
116#include <asm/ftrace.h>
117#ifdef CONFIG_FUNCTION_TRACER
118/* mcount is defined in assembly */
119EXPORT_SYMBOL(_mcount);
120#endif
diff --git a/arch/m68k/include/asm/ftrace.h b/arch/m68k/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/m68k/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/mips/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/parisc/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/um/include/asm/ftrace.h b/arch/um/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/um/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 748e50a1a152..0885245e6808 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -34,6 +34,8 @@ config X86
34 select HAVE_FUNCTION_TRACER 34 select HAVE_FUNCTION_TRACER
35 select HAVE_FUNCTION_GRAPH_TRACER 35 select HAVE_FUNCTION_GRAPH_TRACER
36 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 36 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
37 select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
38 select HAVE_FTRACE_SYSCALLS
37 select HAVE_KVM 39 select HAVE_KVM
38 select HAVE_ARCH_KGDB 40 select HAVE_ARCH_KGDB
39 select HAVE_ARCH_TRACEHOOK 41 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index b3894bf52fcd..e55dfc1ad453 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -126,6 +126,11 @@ void clflush_cache_range(void *addr, unsigned int size);
126#ifdef CONFIG_DEBUG_RODATA 126#ifdef CONFIG_DEBUG_RODATA
127void mark_rodata_ro(void); 127void mark_rodata_ro(void);
128extern const int rodata_test_data; 128extern const int rodata_test_data;
129void set_kernel_text_rw(void);
130void set_kernel_text_ro(void);
131#else
132static inline void set_kernel_text_rw(void) { }
133static inline void set_kernel_text_ro(void) { }
129#endif 134#endif
130 135
131#ifdef CONFIG_DEBUG_RODATA_TEST 136#ifdef CONFIG_DEBUG_RODATA_TEST
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 63a79c77d220..81937a5dc77c 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -111,6 +111,8 @@ enum fixed_addresses {
111#ifdef CONFIG_PARAVIRT 111#ifdef CONFIG_PARAVIRT
112 FIX_PARAVIRT_BOOTMAP, 112 FIX_PARAVIRT_BOOTMAP,
113#endif 113#endif
114 FIX_TEXT_POKE0, /* reserve 2 pages for text_poke() */
115 FIX_TEXT_POKE1,
114 __end_of_permanent_fixed_addresses, 116 __end_of_permanent_fixed_addresses,
115#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 117#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
116 FIX_OHCI1394_BASE, 118 FIX_OHCI1394_BASE,
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index db24c2278be0..bd2c6511c887 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -28,6 +28,13 @@
28 28
29#endif 29#endif
30 30
31/* FIXME: I don't want to stay hardcoded */
32#ifdef CONFIG_X86_64
33# define FTRACE_SYSCALL_MAX 296
34#else
35# define FTRACE_SYSCALL_MAX 333
36#endif
37
31#ifdef CONFIG_FUNCTION_TRACER 38#ifdef CONFIG_FUNCTION_TRACER
32#define MCOUNT_ADDR ((long)(mcount)) 39#define MCOUNT_ADDR ((long)(mcount))
33#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 40#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
index 8e0f8d199e05..86723035a515 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/asm/ptrace-abi.h
@@ -80,8 +80,6 @@
80 80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ 81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82 82
83#ifdef CONFIG_X86_PTRACE_BTS
84
85#ifndef __ASSEMBLY__ 83#ifndef __ASSEMBLY__
86#include <linux/types.h> 84#include <linux/types.h>
87 85
@@ -140,6 +138,5 @@ struct ptrace_bts_config {
140 BTS records are read from oldest to newest. 138 BTS records are read from oldest to newest.
141 Returns number of BTS records drained. 139 Returns number of BTS records drained.
142*/ 140*/
143#endif /* CONFIG_X86_PTRACE_BTS */
144 141
145#endif /* _ASM_X86_PTRACE_ABI_H */ 142#endif /* _ASM_X86_PTRACE_ABI_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index df9d5f78385e..8820a73ae090 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -94,6 +94,7 @@ struct thread_info {
94#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ 94#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
95#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ 95#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
96#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ 96#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
97#define TIF_SYSCALL_FTRACE 27 /* for ftrace syscall instrumentation */
97 98
98#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 99#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
99#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -115,15 +116,17 @@ struct thread_info {
115#define _TIF_FORCED_TF (1 << TIF_FORCED_TF) 116#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
116#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) 117#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
117#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) 118#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
119#define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE)
118 120
119/* work to do in syscall_trace_enter() */ 121/* work to do in syscall_trace_enter() */
120#define _TIF_WORK_SYSCALL_ENTRY \ 122#define _TIF_WORK_SYSCALL_ENTRY \
121 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \ 123 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \
122 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) 124 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP)
123 125
124/* work to do in syscall_trace_leave() */ 126/* work to do in syscall_trace_leave() */
125#define _TIF_WORK_SYSCALL_EXIT \ 127#define _TIF_WORK_SYSCALL_EXIT \
126 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP) 128 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
129 _TIF_SYSCALL_FTRACE)
127 130
128/* work to do on interrupt/exception return */ 131/* work to do on interrupt/exception return */
129#define _TIF_WORK_MASK \ 132#define _TIF_WORK_MASK \
@@ -132,7 +135,7 @@ struct thread_info {
132 _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) 135 _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
133 136
134/* work to do on any return to user space */ 137/* work to do on any return to user space */
135#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) 138#define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE)
136 139
137/* Only used for 64 bit */ 140/* Only used for 64 bit */
138#define _TIF_DO_NOTIFY_MASK \ 141#define _TIF_DO_NOTIFY_MASK \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index c611ad64137f..145cce75cda7 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -66,7 +66,8 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
66obj-y += apic/ 66obj-y += apic/
67obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 67obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
68obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 68obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
69obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 69obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
70obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 71obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 72obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 73obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 4c80f1557433..f57658702571 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -5,6 +5,7 @@
5#include <linux/kprobes.h> 5#include <linux/kprobes.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/vmalloc.h> 7#include <linux/vmalloc.h>
8#include <linux/memory.h>
8#include <asm/alternative.h> 9#include <asm/alternative.h>
9#include <asm/sections.h> 10#include <asm/sections.h>
10#include <asm/pgtable.h> 11#include <asm/pgtable.h>
@@ -12,7 +13,9 @@
12#include <asm/nmi.h> 13#include <asm/nmi.h>
13#include <asm/vsyscall.h> 14#include <asm/vsyscall.h>
14#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
15#include <asm/io.h> 17#include <asm/io.h>
18#include <asm/fixmap.h>
16 19
17#define MAX_PATCH_LEN (255-1) 20#define MAX_PATCH_LEN (255-1)
18 21
@@ -226,6 +229,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
226{ 229{
227 u8 **ptr; 230 u8 **ptr;
228 231
232 mutex_lock(&text_mutex);
229 for (ptr = start; ptr < end; ptr++) { 233 for (ptr = start; ptr < end; ptr++) {
230 if (*ptr < text) 234 if (*ptr < text)
231 continue; 235 continue;
@@ -234,6 +238,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
234 /* turn DS segment override prefix into lock prefix */ 238 /* turn DS segment override prefix into lock prefix */
235 text_poke(*ptr, ((unsigned char []){0xf0}), 1); 239 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
236 }; 240 };
241 mutex_unlock(&text_mutex);
237} 242}
238 243
239static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) 244static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
@@ -243,6 +248,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
243 if (noreplace_smp) 248 if (noreplace_smp)
244 return; 249 return;
245 250
251 mutex_lock(&text_mutex);
246 for (ptr = start; ptr < end; ptr++) { 252 for (ptr = start; ptr < end; ptr++) {
247 if (*ptr < text) 253 if (*ptr < text)
248 continue; 254 continue;
@@ -251,6 +257,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
251 /* turn lock prefix into DS segment override prefix */ 257 /* turn lock prefix into DS segment override prefix */
252 text_poke(*ptr, ((unsigned char []){0x3E}), 1); 258 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
253 }; 259 };
260 mutex_unlock(&text_mutex);
254} 261}
255 262
256struct smp_alt_module { 263struct smp_alt_module {
@@ -500,15 +507,16 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
500 * It means the size must be writable atomically and the address must be aligned 507 * It means the size must be writable atomically and the address must be aligned
501 * in a way that permits an atomic write. It also makes sure we fit on a single 508 * in a way that permits an atomic write. It also makes sure we fit on a single
502 * page. 509 * page.
510 *
511 * Note: Must be called under text_mutex.
503 */ 512 */
504void *__kprobes text_poke(void *addr, const void *opcode, size_t len) 513void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
505{ 514{
515 unsigned long flags;
506 char *vaddr; 516 char *vaddr;
507 int nr_pages = 2;
508 struct page *pages[2]; 517 struct page *pages[2];
509 int i; 518 int i;
510 519
511 might_sleep();
512 if (!core_kernel_text((unsigned long)addr)) { 520 if (!core_kernel_text((unsigned long)addr)) {
513 pages[0] = vmalloc_to_page(addr); 521 pages[0] = vmalloc_to_page(addr);
514 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 522 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -518,18 +526,21 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
518 pages[1] = virt_to_page(addr + PAGE_SIZE); 526 pages[1] = virt_to_page(addr + PAGE_SIZE);
519 } 527 }
520 BUG_ON(!pages[0]); 528 BUG_ON(!pages[0]);
521 if (!pages[1]) 529 local_irq_save(flags);
522 nr_pages = 1; 530 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
523 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 531 if (pages[1])
524 BUG_ON(!vaddr); 532 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
525 local_irq_disable(); 533 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
526 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 534 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
527 local_irq_enable(); 535 clear_fixmap(FIX_TEXT_POKE0);
528 vunmap(vaddr); 536 if (pages[1])
537 clear_fixmap(FIX_TEXT_POKE1);
538 local_flush_tlb();
529 sync_core(); 539 sync_core();
530 /* Could also do a CLFLUSH here to speed up CPU recovery; but 540 /* Could also do a CLFLUSH here to speed up CPU recovery; but
531 that causes hangs on some VIA CPUs. */ 541 that causes hangs on some VIA CPUs. */
532 for (i = 0; i < len; i++) 542 for (i = 0; i < len; i++)
533 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); 543 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
544 local_irq_restore(flags);
534 return addr; 545 return addr;
535} 546}
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 23da96e57b17..05209b5cc6ca 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -33,7 +33,7 @@
33#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <linux/ftrace.h> 36#include <trace/power.h>
37 37
38#include <linux/acpi.h> 38#include <linux/acpi.h>
39#include <linux/io.h> 39#include <linux/io.h>
@@ -72,6 +72,8 @@ struct acpi_cpufreq_data {
72 72
73static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 73static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
74 74
75DEFINE_TRACE(power_mark);
76
75/* acpi_perf_data is a pointer to percpu data. */ 77/* acpi_perf_data is a pointer to percpu data. */
76static struct acpi_processor_performance *acpi_perf_data; 78static struct acpi_processor_performance *acpi_perf_data;
77 79
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index dd2130b0fb3e..95ea5fa7d444 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -15,6 +15,7 @@
15#include <linux/bug.h> 15#include <linux/bug.h>
16#include <linux/nmi.h> 16#include <linux/nmi.h>
17#include <linux/sysfs.h> 17#include <linux/sysfs.h>
18#include <linux/ftrace.h>
18 19
19#include <asm/stacktrace.h> 20#include <asm/stacktrace.h>
20 21
@@ -196,6 +197,11 @@ unsigned __kprobes long oops_begin(void)
196 int cpu; 197 int cpu;
197 unsigned long flags; 198 unsigned long flags;
198 199
200 /* notify the hw-branch tracer so it may disable tracing and
201 add the last trace to the trace buffer -
202 the earlier this happens, the more useful the trace. */
203 trace_hw_branch_oops();
204
199 oops_enter(); 205 oops_enter();
200 206
201 /* racy, but better than risking deadlock. */ 207 /* racy, but better than risking deadlock. */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 76f7141e0f91..61df77532120 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/list.h> 19#include <linux/list.h>
20 20
21#include <asm/cacheflush.h>
21#include <asm/ftrace.h> 22#include <asm/ftrace.h>
22#include <linux/ftrace.h> 23#include <linux/ftrace.h>
23#include <asm/nops.h> 24#include <asm/nops.h>
@@ -26,6 +27,18 @@
26 27
27#ifdef CONFIG_DYNAMIC_FTRACE 28#ifdef CONFIG_DYNAMIC_FTRACE
28 29
30int ftrace_arch_code_modify_prepare(void)
31{
32 set_kernel_text_rw();
33 return 0;
34}
35
36int ftrace_arch_code_modify_post_process(void)
37{
38 set_kernel_text_ro();
39 return 0;
40}
41
29union ftrace_code_union { 42union ftrace_code_union {
30 char code[MCOUNT_INSN_SIZE]; 43 char code[MCOUNT_INSN_SIZE];
31 struct { 44 struct {
@@ -66,11 +79,11 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
66 * 79 *
67 * 1) Put the instruction pointer into the IP buffer 80 * 1) Put the instruction pointer into the IP buffer
68 * and the new code into the "code" buffer. 81 * and the new code into the "code" buffer.
69 * 2) Set a flag that says we are modifying code 82 * 2) Wait for any running NMIs to finish and set a flag that says
70 * 3) Wait for any running NMIs to finish. 83 * we are modifying code, it is done in an atomic operation.
71 * 4) Write the code 84 * 3) Write the code
72 * 5) clear the flag. 85 * 4) clear the flag.
73 * 6) Wait for any running NMIs to finish. 86 * 5) Wait for any running NMIs to finish.
74 * 87 *
75 * If an NMI is executed, the first thing it does is to call 88 * If an NMI is executed, the first thing it does is to call
76 * "ftrace_nmi_enter". This will check if the flag is set to write 89 * "ftrace_nmi_enter". This will check if the flag is set to write
@@ -82,9 +95,9 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
82 * are the same as what exists. 95 * are the same as what exists.
83 */ 96 */
84 97
85static atomic_t in_nmi = ATOMIC_INIT(0); 98#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
99static atomic_t nmi_running = ATOMIC_INIT(0);
86static int mod_code_status; /* holds return value of text write */ 100static int mod_code_status; /* holds return value of text write */
87static int mod_code_write; /* set when NMI should do the write */
88static void *mod_code_ip; /* holds the IP to write to */ 101static void *mod_code_ip; /* holds the IP to write to */
89static void *mod_code_newcode; /* holds the text to write to the IP */ 102static void *mod_code_newcode; /* holds the text to write to the IP */
90 103
@@ -101,6 +114,20 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
101 return r; 114 return r;
102} 115}
103 116
117static void clear_mod_flag(void)
118{
119 int old = atomic_read(&nmi_running);
120
121 for (;;) {
122 int new = old & ~MOD_CODE_WRITE_FLAG;
123
124 if (old == new)
125 break;
126
127 old = atomic_cmpxchg(&nmi_running, old, new);
128 }
129}
130
104static void ftrace_mod_code(void) 131static void ftrace_mod_code(void)
105{ 132{
106 /* 133 /*
@@ -111,37 +138,52 @@ static void ftrace_mod_code(void)
111 */ 138 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, 139 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE); 140 MCOUNT_INSN_SIZE);
141
142 /* if we fail, then kill any new writers */
143 if (mod_code_status)
144 clear_mod_flag();
114} 145}
115 146
116void ftrace_nmi_enter(void) 147void ftrace_nmi_enter(void)
117{ 148{
118 atomic_inc(&in_nmi); 149 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
119 /* Must have in_nmi seen before reading write flag */ 150 smp_rmb();
120 smp_mb();
121 if (mod_code_write) {
122 ftrace_mod_code(); 151 ftrace_mod_code();
123 atomic_inc(&nmi_update_count); 152 atomic_inc(&nmi_update_count);
124 } 153 }
154 /* Must have previous changes seen before executions */
155 smp_mb();
125} 156}
126 157
127void ftrace_nmi_exit(void) 158void ftrace_nmi_exit(void)
128{ 159{
129 /* Finish all executions before clearing in_nmi */ 160 /* Finish all executions before clearing nmi_running */
130 smp_wmb(); 161 smp_mb();
131 atomic_dec(&in_nmi); 162 atomic_dec(&nmi_running);
163}
164
165static void wait_for_nmi_and_set_mod_flag(void)
166{
167 if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
168 return;
169
170 do {
171 cpu_relax();
172 } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
173
174 nmi_wait_count++;
132} 175}
133 176
134static void wait_for_nmi(void) 177static void wait_for_nmi(void)
135{ 178{
136 int waited = 0; 179 if (!atomic_read(&nmi_running))
180 return;
137 181
138 while (atomic_read(&in_nmi)) { 182 do {
139 waited = 1;
140 cpu_relax(); 183 cpu_relax();
141 } 184 } while (atomic_read(&nmi_running));
142 185
143 if (waited) 186 nmi_wait_count++;
144 nmi_wait_count++;
145} 187}
146 188
147static int 189static int
@@ -151,14 +193,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
151 mod_code_newcode = new_code; 193 mod_code_newcode = new_code;
152 194
153 /* The buffers need to be visible before we let NMIs write them */ 195 /* The buffers need to be visible before we let NMIs write them */
154 smp_wmb();
155
156 mod_code_write = 1;
157
158 /* Make sure write bit is visible before we wait on NMIs */
159 smp_mb(); 196 smp_mb();
160 197
161 wait_for_nmi(); 198 wait_for_nmi_and_set_mod_flag();
162 199
163 /* Make sure all running NMIs have finished before we write the code */ 200 /* Make sure all running NMIs have finished before we write the code */
164 smp_mb(); 201 smp_mb();
@@ -166,13 +203,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
166 ftrace_mod_code(); 203 ftrace_mod_code();
167 204
168 /* Make sure the write happens before clearing the bit */ 205 /* Make sure the write happens before clearing the bit */
169 smp_wmb();
170
171 mod_code_write = 0;
172
173 /* make sure NMIs see the cleared bit */
174 smp_mb(); 206 smp_mb();
175 207
208 clear_mod_flag();
176 wait_for_nmi(); 209 wait_for_nmi();
177 210
178 return mod_code_status; 211 return mod_code_status;
@@ -368,25 +401,6 @@ int ftrace_disable_ftrace_graph_caller(void)
368 return ftrace_mod_jmp(ip, old_offset, new_offset); 401 return ftrace_mod_jmp(ip, old_offset, new_offset);
369} 402}
370 403
371#else /* CONFIG_DYNAMIC_FTRACE */
372
373/*
374 * These functions are picked from those used on
375 * this page for dynamic ftrace. They have been
376 * simplified to ignore all traces in NMI context.
377 */
378static atomic_t in_nmi;
379
380void ftrace_nmi_enter(void)
381{
382 atomic_inc(&in_nmi);
383}
384
385void ftrace_nmi_exit(void)
386{
387 atomic_dec(&in_nmi);
388}
389
390#endif /* !CONFIG_DYNAMIC_FTRACE */ 404#endif /* !CONFIG_DYNAMIC_FTRACE */
391 405
392/* 406/*
@@ -396,14 +410,13 @@ void ftrace_nmi_exit(void)
396void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 410void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
397{ 411{
398 unsigned long old; 412 unsigned long old;
399 unsigned long long calltime;
400 int faulted; 413 int faulted;
401 struct ftrace_graph_ent trace; 414 struct ftrace_graph_ent trace;
402 unsigned long return_hooker = (unsigned long) 415 unsigned long return_hooker = (unsigned long)
403 &return_to_handler; 416 &return_to_handler;
404 417
405 /* Nmi's are currently unsupported */ 418 /* Nmi's are currently unsupported */
406 if (unlikely(atomic_read(&in_nmi))) 419 if (unlikely(in_nmi()))
407 return; 420 return;
408 421
409 if (unlikely(atomic_read(&current->tracing_graph_pause))) 422 if (unlikely(atomic_read(&current->tracing_graph_pause)))
@@ -439,17 +452,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
439 return; 452 return;
440 } 453 }
441 454
442 if (unlikely(!__kernel_text_address(old))) { 455 if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
443 ftrace_graph_stop();
444 *parent = old;
445 WARN_ON(1);
446 return;
447 }
448
449 calltime = cpu_clock(raw_smp_processor_id());
450
451 if (ftrace_push_return_trace(old, calltime,
452 self_addr, &trace.depth) == -EBUSY) {
453 *parent = old; 456 *parent = old;
454 return; 457 return;
455 } 458 }
@@ -463,3 +466,66 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
463 } 466 }
464} 467}
465#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 468#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
469
470#ifdef CONFIG_FTRACE_SYSCALLS
471
472extern unsigned long __start_syscalls_metadata[];
473extern unsigned long __stop_syscalls_metadata[];
474extern unsigned long *sys_call_table;
475
476static struct syscall_metadata **syscalls_metadata;
477
478static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
479{
480 struct syscall_metadata *start;
481 struct syscall_metadata *stop;
482 char str[KSYM_SYMBOL_LEN];
483
484
485 start = (struct syscall_metadata *)__start_syscalls_metadata;
486 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
487 kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
488
489 for ( ; start < stop; start++) {
490 if (start->name && !strcmp(start->name, str))
491 return start;
492 }
493 return NULL;
494}
495
496struct syscall_metadata *syscall_nr_to_meta(int nr)
497{
498 if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
499 return NULL;
500
501 return syscalls_metadata[nr];
502}
503
504void arch_init_ftrace_syscalls(void)
505{
506 int i;
507 struct syscall_metadata *meta;
508 unsigned long **psys_syscall_table = &sys_call_table;
509 static atomic_t refs;
510
511 if (atomic_inc_return(&refs) != 1)
512 goto end;
513
514 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
515 FTRACE_SYSCALL_MAX, GFP_KERNEL);
516 if (!syscalls_metadata) {
517 WARN_ON(1);
518 return;
519 }
520
521 for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
522 meta = find_syscall_meta(psys_syscall_table[i]);
523 syscalls_metadata[i] = meta;
524 }
525 return;
526
527 /* Paranoid: avoid overflow */
528end:
529 atomic_dec(&refs);
530}
531#endif
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 55b94614e348..7b5169d2b000 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -638,13 +638,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
638#else 638#else
639 " pushf\n" 639 " pushf\n"
640 /* 640 /*
641 * Skip cs, ip, orig_ax. 641 * Skip cs, ip, orig_ax and gs.
642 * trampoline_handler() will plug in these values 642 * trampoline_handler() will plug in these values
643 */ 643 */
644 " subl $12, %esp\n" 644 " subl $16, %esp\n"
645 " pushl %fs\n" 645 " pushl %fs\n"
646 " pushl %ds\n"
647 " pushl %es\n" 646 " pushl %es\n"
647 " pushl %ds\n"
648 " pushl %eax\n" 648 " pushl %eax\n"
649 " pushl %ebp\n" 649 " pushl %ebp\n"
650 " pushl %edi\n" 650 " pushl %edi\n"
@@ -655,10 +655,10 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
655 " movl %esp, %eax\n" 655 " movl %esp, %eax\n"
656 " call trampoline_handler\n" 656 " call trampoline_handler\n"
657 /* Move flags to cs */ 657 /* Move flags to cs */
658 " movl 52(%esp), %edx\n" 658 " movl 56(%esp), %edx\n"
659 " movl %edx, 48(%esp)\n" 659 " movl %edx, 52(%esp)\n"
660 /* Replace saved flags with true return address. */ 660 /* Replace saved flags with true return address. */
661 " movl %eax, 52(%esp)\n" 661 " movl %eax, 56(%esp)\n"
662 " popl %ebx\n" 662 " popl %ebx\n"
663 " popl %ecx\n" 663 " popl %ecx\n"
664 " popl %edx\n" 664 " popl %edx\n"
@@ -666,8 +666,8 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
666 " popl %edi\n" 666 " popl %edi\n"
667 " popl %ebp\n" 667 " popl %ebp\n"
668 " popl %eax\n" 668 " popl %eax\n"
669 /* Skip ip, orig_ax, es, ds, fs */ 669 /* Skip ds, es, fs, gs, orig_ax and ip */
670 " addl $20, %esp\n" 670 " addl $24, %esp\n"
671 " popf\n" 671 " popf\n"
672#endif 672#endif
673 " ret\n"); 673 " ret\n");
@@ -691,6 +691,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
691 regs->cs = __KERNEL_CS; 691 regs->cs = __KERNEL_CS;
692#else 692#else
693 regs->cs = __KERNEL_CS | get_kernel_rpl(); 693 regs->cs = __KERNEL_CS | get_kernel_rpl();
694 regs->gs = 0;
694#endif 695#endif
695 regs->ip = trampoline_address; 696 regs->ip = trampoline_address;
696 regs->orig_ax = ~0UL; 697 regs->orig_ax = ~0UL;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 156f87582c6c..62fc75b67e45 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -8,7 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/pm.h> 9#include <linux/pm.h>
10#include <linux/clockchips.h> 10#include <linux/clockchips.h>
11#include <linux/ftrace.h> 11#include <trace/power.h>
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/apic.h> 13#include <asm/apic.h>
14#include <asm/idle.h> 14#include <asm/idle.h>
@@ -22,6 +22,9 @@ EXPORT_SYMBOL(idle_nomwait);
22 22
23struct kmem_cache *task_xstate_cachep; 23struct kmem_cache *task_xstate_cachep;
24 24
25DEFINE_TRACE(power_start);
26DEFINE_TRACE(power_end);
27
25int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 28int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
26{ 29{
27 *dst = *src; 30 *dst = *src;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 19378715f415..5c6e46320db1 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -21,6 +21,7 @@
21#include <linux/audit.h> 21#include <linux/audit.h>
22#include <linux/seccomp.h> 22#include <linux/seccomp.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/ftrace.h>
24 25
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
@@ -1415,6 +1416,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1415 tracehook_report_syscall_entry(regs)) 1416 tracehook_report_syscall_entry(regs))
1416 ret = -1L; 1417 ret = -1L;
1417 1418
1419 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
1420 ftrace_syscall_enter(regs);
1421
1418 if (unlikely(current->audit_context)) { 1422 if (unlikely(current->audit_context)) {
1419 if (IS_IA32) 1423 if (IS_IA32)
1420 audit_syscall_entry(AUDIT_ARCH_I386, 1424 audit_syscall_entry(AUDIT_ARCH_I386,
@@ -1438,6 +1442,9 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1438 if (unlikely(current->audit_context)) 1442 if (unlikely(current->audit_context))
1439 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1443 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1440 1444
1445 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
1446 ftrace_syscall_exit(regs);
1447
1441 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1448 if (test_thread_flag(TIF_SYSCALL_TRACE))
1442 tracehook_report_syscall_exit(regs, 0); 1449 tracehook_report_syscall_exit(regs, 0);
1443 1450
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 0a303c3ed11f..a58504ea78cc 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -59,7 +59,8 @@ config KVM_AMD
59 59
60config KVM_TRACE 60config KVM_TRACE
61 bool "KVM trace support" 61 bool "KVM trace support"
62 depends on KVM && MARKERS && SYSFS 62 depends on KVM && SYSFS
63 select MARKERS
63 select RELAY 64 select RELAY
64 select DEBUG_FS 65 select DEBUG_FS
65 default n 66 default n
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index db81e9a8556b..749559ed80f5 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -1054,17 +1054,47 @@ static noinline int do_test_wp_bit(void)
1054const int rodata_test_data = 0xC3; 1054const int rodata_test_data = 0xC3;
1055EXPORT_SYMBOL_GPL(rodata_test_data); 1055EXPORT_SYMBOL_GPL(rodata_test_data);
1056 1056
1057static int kernel_set_to_readonly;
1058
1059void set_kernel_text_rw(void)
1060{
1061 unsigned long start = PFN_ALIGN(_text);
1062 unsigned long size = PFN_ALIGN(_etext) - start;
1063
1064 if (!kernel_set_to_readonly)
1065 return;
1066
1067 pr_debug("Set kernel text: %lx - %lx for read write\n",
1068 start, start+size);
1069
1070 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
1071}
1072
1073void set_kernel_text_ro(void)
1074{
1075 unsigned long start = PFN_ALIGN(_text);
1076 unsigned long size = PFN_ALIGN(_etext) - start;
1077
1078 if (!kernel_set_to_readonly)
1079 return;
1080
1081 pr_debug("Set kernel text: %lx - %lx for read only\n",
1082 start, start+size);
1083
1084 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1085}
1086
1057void mark_rodata_ro(void) 1087void mark_rodata_ro(void)
1058{ 1088{
1059 unsigned long start = PFN_ALIGN(_text); 1089 unsigned long start = PFN_ALIGN(_text);
1060 unsigned long size = PFN_ALIGN(_etext) - start; 1090 unsigned long size = PFN_ALIGN(_etext) - start;
1061 1091
1062#ifndef CONFIG_DYNAMIC_FTRACE
1063 /* Dynamic tracing modifies the kernel text section */
1064 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 1092 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1065 printk(KERN_INFO "Write protecting the kernel text: %luk\n", 1093 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1066 size >> 10); 1094 size >> 10);
1067 1095
1096 kernel_set_to_readonly = 1;
1097
1068#ifdef CONFIG_CPA_DEBUG 1098#ifdef CONFIG_CPA_DEBUG
1069 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", 1099 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1070 start, start+size); 1100 start, start+size);
@@ -1073,7 +1103,6 @@ void mark_rodata_ro(void)
1073 printk(KERN_INFO "Testing CPA: write protecting again\n"); 1103 printk(KERN_INFO "Testing CPA: write protecting again\n");
1074 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); 1104 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
1075#endif 1105#endif
1076#endif /* CONFIG_DYNAMIC_FTRACE */
1077 1106
1078 start += size; 1107 start += size;
1079 size = (unsigned long)__end_rodata - start; 1108 size = (unsigned long)__end_rodata - start;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 54efa57d1c03..1753e8020df6 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -734,21 +734,48 @@ void __init mem_init(void)
734const int rodata_test_data = 0xC3; 734const int rodata_test_data = 0xC3;
735EXPORT_SYMBOL_GPL(rodata_test_data); 735EXPORT_SYMBOL_GPL(rodata_test_data);
736 736
737static int kernel_set_to_readonly;
738
739void set_kernel_text_rw(void)
740{
741 unsigned long start = PFN_ALIGN(_stext);
742 unsigned long end = PFN_ALIGN(__start_rodata);
743
744 if (!kernel_set_to_readonly)
745 return;
746
747 pr_debug("Set kernel text: %lx - %lx for read write\n",
748 start, end);
749
750 set_memory_rw(start, (end - start) >> PAGE_SHIFT);
751}
752
753void set_kernel_text_ro(void)
754{
755 unsigned long start = PFN_ALIGN(_stext);
756 unsigned long end = PFN_ALIGN(__start_rodata);
757
758 if (!kernel_set_to_readonly)
759 return;
760
761 pr_debug("Set kernel text: %lx - %lx for read only\n",
762 start, end);
763
764 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
765}
766
737void mark_rodata_ro(void) 767void mark_rodata_ro(void)
738{ 768{
739 unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); 769 unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
740 unsigned long rodata_start = 770 unsigned long rodata_start =
741 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; 771 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
742 772
743#ifdef CONFIG_DYNAMIC_FTRACE
744 /* Dynamic tracing modifies the kernel text section */
745 start = rodata_start;
746#endif
747
748 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 773 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
749 (end - start) >> 10); 774 (end - start) >> 10);
750 set_memory_ro(start, (end - start) >> PAGE_SHIFT); 775 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
751 776
777 kernel_set_to_readonly = 1;
778
752 /* 779 /*
753 * The rodata section (but not the kernel text!) should also be 780 * The rodata section (but not the kernel text!) should also be
754 * not-executable. 781 * not-executable.
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/block/Kconfig b/block/Kconfig
index 0cbb3b88b59a..e7d12782bcfb 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -44,22 +44,6 @@ config LBD
44 44
45 If unsure, say N. 45 If unsure, say N.
46 46
47config BLK_DEV_IO_TRACE
48 bool "Support for tracing block io actions"
49 depends on SYSFS
50 select RELAY
51 select DEBUG_FS
52 select TRACEPOINTS
53 help
54 Say Y here if you want to be able to trace the block layer actions
55 on a given queue. Tracing allows you to see any traffic happening
56 on a block device queue. For more information (and the userspace
57 support tools needed), fetch the blktrace tools from:
58
59 git://git.kernel.dk/blktrace.git
60
61 If unsure, say N.
62
63config BLK_DEV_BSG 47config BLK_DEV_BSG
64 bool "Block layer SG support v4 (EXPERIMENTAL)" 48 bool "Block layer SG support v4 (EXPERIMENTAL)"
65 depends on EXPERIMENTAL 49 depends on EXPERIMENTAL
diff --git a/block/Makefile b/block/Makefile
index bfe73049f939..e9fa4dd690f2 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -13,6 +13,5 @@ obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
13obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o 13obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
14obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o 14obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
15 15
16obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
17obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o 16obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
18obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o 17obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index ebea9b2c30a5..6de020d078e1 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -283,7 +283,7 @@ static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
283} 283}
284static struct sysrq_key_op sysrq_ftrace_dump_op = { 284static struct sysrq_key_op sysrq_ftrace_dump_op = {
285 .handler = sysrq_ftrace_dump, 285 .handler = sysrq_ftrace_dump,
286 .help_msg = "dumpZ-ftrace-buffer", 286 .help_msg = "dump-ftrace-buffer(Z)",
287 .action_msg = "Dump ftrace buffer", 287 .action_msg = "Dump ftrace buffer",
288 .enable_mask = SYSRQ_ENABLE_DUMP, 288 .enable_mask = SYSRQ_ENABLE_DUMP,
289}; 289};
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e76d715e4342..f0e99d4c066b 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -161,7 +161,7 @@ struct op_sample
161{ 161{
162 entry->event = ring_buffer_lock_reserve 162 entry->event = ring_buffer_lock_reserve
163 (op_ring_buffer_write, sizeof(struct op_sample) + 163 (op_ring_buffer_write, sizeof(struct op_sample) +
164 size * sizeof(entry->sample->data[0]), &entry->irq_flags); 164 size * sizeof(entry->sample->data[0]));
165 if (entry->event) 165 if (entry->event)
166 entry->sample = ring_buffer_event_data(entry->event); 166 entry->sample = ring_buffer_event_data(entry->event);
167 else 167 else
@@ -178,8 +178,7 @@ struct op_sample
178 178
179int op_cpu_buffer_write_commit(struct op_entry *entry) 179int op_cpu_buffer_write_commit(struct op_entry *entry)
180{ 180{
181 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, 181 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
182 entry->irq_flags);
183} 182}
184 183
185struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) 184struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 81ae9ea3c6e1..0662ba6de85a 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -30,6 +30,7 @@
30 30
31static struct vfsmount *debugfs_mount; 31static struct vfsmount *debugfs_mount;
32static int debugfs_mount_count; 32static int debugfs_mount_count;
33static bool debugfs_registered;
33 34
34static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev) 35static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev)
35{ 36{
@@ -496,6 +497,16 @@ exit:
496} 497}
497EXPORT_SYMBOL_GPL(debugfs_rename); 498EXPORT_SYMBOL_GPL(debugfs_rename);
498 499
500/**
501 * debugfs_initialized - Tells whether debugfs has been registered
502 */
503bool debugfs_initialized(void)
504{
505 return debugfs_registered;
506}
507EXPORT_SYMBOL_GPL(debugfs_initialized);
508
509
499static struct kobject *debug_kobj; 510static struct kobject *debug_kobj;
500 511
501static int __init debugfs_init(void) 512static int __init debugfs_init(void)
@@ -509,11 +520,16 @@ static int __init debugfs_init(void)
509 retval = register_filesystem(&debug_fs_type); 520 retval = register_filesystem(&debug_fs_type);
510 if (retval) 521 if (retval)
511 kobject_put(debug_kobj); 522 kobject_put(debug_kobj);
523 else
524 debugfs_registered = true;
525
512 return retval; 526 return retval;
513} 527}
514 528
515static void __exit debugfs_exit(void) 529static void __exit debugfs_exit(void)
516{ 530{
531 debugfs_registered = false;
532
517 simple_release_fs(&debugfs_mount, &debugfs_mount_count); 533 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
518 unregister_filesystem(&debug_fs_type); 534 unregister_filesystem(&debug_fs_type);
519 kobject_put(debug_kobj); 535 kobject_put(debug_kobj);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 38e337d51ced..99e33ef40be4 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -19,6 +19,7 @@
19#include <linux/kmod.h> 19#include <linux/kmod.h>
20#include <linux/ctype.h> 20#include <linux/ctype.h>
21#include <linux/genhd.h> 21#include <linux/genhd.h>
22#include <linux/blktrace_api.h>
22 23
23#include "check.h" 24#include "check.h"
24 25
@@ -294,6 +295,9 @@ static struct attribute_group part_attr_group = {
294 295
295static struct attribute_group *part_attr_groups[] = { 296static struct attribute_group *part_attr_groups[] = {
296 &part_attr_group, 297 &part_attr_group,
298#ifdef CONFIG_BLK_DEV_IO_TRACE
299 &blk_trace_attr_group,
300#endif
297 NULL 301 NULL
298}; 302};
299 303
diff --git a/include/asm-frv/ftrace.h b/include/asm-frv/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-frv/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a654d724d3b0..7fa660fd449c 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -61,6 +61,30 @@
61#define BRANCH_PROFILE() 61#define BRANCH_PROFILE()
62#endif 62#endif
63 63
64#ifdef CONFIG_EVENT_TRACER
65#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
66 *(_ftrace_events) \
67 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
68#else
69#define FTRACE_EVENTS()
70#endif
71
72#ifdef CONFIG_TRACING
73#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
74 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
75 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
76#else
77#define TRACE_PRINTKS()
78#endif
79
80#ifdef CONFIG_FTRACE_SYSCALLS
81#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
82 *(__syscalls_metadata) \
83 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
84#else
85#define TRACE_SYSCALLS()
86#endif
87
64/* .data section */ 88/* .data section */
65#define DATA_DATA \ 89#define DATA_DATA \
66 *(.data) \ 90 *(.data) \
@@ -86,7 +110,10 @@
86 *(__verbose) \ 110 *(__verbose) \
87 VMLINUX_SYMBOL(__stop___verbose) = .; \ 111 VMLINUX_SYMBOL(__stop___verbose) = .; \
88 LIKELY_PROFILE() \ 112 LIKELY_PROFILE() \
89 BRANCH_PROFILE() 113 BRANCH_PROFILE() \
114 TRACE_PRINTKS() \
115 FTRACE_EVENTS() \
116 TRACE_SYSCALLS()
90 117
91#define RO_DATA(align) \ 118#define RO_DATA(align) \
92 . = ALIGN((align)); \ 119 . = ALIGN((align)); \
diff --git a/include/asm-m32r/ftrace.h b/include/asm-m32r/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-m32r/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-mn10300/ftrace.h b/include/asm-mn10300/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-mn10300/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 6e915878e88c..d960889e92ef 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
144 144
145#ifdef __KERNEL__ 145#ifdef __KERNEL__
146#if defined(CONFIG_BLK_DEV_IO_TRACE) 146#if defined(CONFIG_BLK_DEV_IO_TRACE)
147
148#include <linux/sysfs.h>
149
147struct blk_trace { 150struct blk_trace {
148 int trace_state; 151 int trace_state;
149 struct rchan *rchan; 152 struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
194extern int blk_trace_startstop(struct request_queue *q, int start); 197extern int blk_trace_startstop(struct request_queue *q, int start);
195extern int blk_trace_remove(struct request_queue *q); 198extern int blk_trace_remove(struct request_queue *q);
196 199
200extern struct attribute_group blk_trace_attr_group;
201
197#else /* !CONFIG_BLK_DEV_IO_TRACE */ 202#else /* !CONFIG_BLK_DEV_IO_TRACE */
198#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 203#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
199#define blk_trace_shutdown(q) do { } while (0) 204#define blk_trace_shutdown(q) do { } while (0)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d95da1020f1c..6faa7e549de4 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -68,6 +68,7 @@ struct ftrace_branch_data {
68 unsigned long miss; 68 unsigned long miss;
69 unsigned long hit; 69 unsigned long hit;
70 }; 70 };
71 unsigned long miss_hit[2];
71 }; 72 };
72}; 73};
73 74
@@ -125,10 +126,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
125 .line = __LINE__, \ 126 .line = __LINE__, \
126 }; \ 127 }; \
127 ______r = !!(cond); \ 128 ______r = !!(cond); \
128 if (______r) \ 129 ______f.miss_hit[______r]++; \
129 ______f.hit++; \
130 else \
131 ______f.miss++; \
132 ______r; \ 130 ______r; \
133 })) 131 }))
134#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 132#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index af0e01d4c663..eb5c2ba2f81a 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
71struct dentry *debugfs_create_blob(const char *name, mode_t mode, 71struct dentry *debugfs_create_blob(const char *name, mode_t mode,
72 struct dentry *parent, 72 struct dentry *parent,
73 struct debugfs_blob_wrapper *blob); 73 struct debugfs_blob_wrapper *blob);
74
75bool debugfs_initialized(void);
76
74#else 77#else
75 78
76#include <linux/err.h> 79#include <linux/err.h>
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
183 return ERR_PTR(-ENODEV); 186 return ERR_PTR(-ENODEV);
184} 187}
185 188
189static inline bool debugfs_initialized(void)
190{
191 return false;
192}
193
186#endif 194#endif
187 195
188#endif 196#endif
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a7f8134c594e..015a3d22cf74 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,15 +1,18 @@
1#ifndef _LINUX_FTRACE_H 1#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 2#define _LINUX_FTRACE_H
3 3
4#include <linux/linkage.h> 4#include <linux/trace_clock.h>
5#include <linux/fs.h>
6#include <linux/ktime.h>
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6#include <linux/linkage.h>
11#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include <linux/module.h>
9#include <linux/ktime.h>
12#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14
15#include <asm/ftrace.h>
13 16
14#ifdef CONFIG_FUNCTION_TRACER 17#ifdef CONFIG_FUNCTION_TRACER
15 18
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
95 loff_t *ppos); 98 loff_t *ppos);
96#endif 99#endif
97 100
101struct ftrace_func_command {
102 struct list_head list;
103 char *name;
104 int (*func)(char *func, char *cmd,
105 char *params, int enable);
106};
107
98#ifdef CONFIG_DYNAMIC_FTRACE 108#ifdef CONFIG_DYNAMIC_FTRACE
99/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ 109
100#include <asm/ftrace.h> 110int ftrace_arch_code_modify_prepare(void);
111int ftrace_arch_code_modify_post_process(void);
112
113struct seq_file;
114
115struct ftrace_probe_ops {
116 void (*func)(unsigned long ip,
117 unsigned long parent_ip,
118 void **data);
119 int (*callback)(unsigned long ip, void **data);
120 void (*free)(void **data);
121 int (*print)(struct seq_file *m,
122 unsigned long ip,
123 struct ftrace_probe_ops *ops,
124 void *data);
125};
126
127extern int
128register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
129 void *data);
130extern void
131unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
132 void *data);
133extern void
134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
135extern void unregister_ftrace_function_probe_all(char *glob);
101 136
102enum { 137enum {
103 FTRACE_FL_FREE = (1 << 0), 138 FTRACE_FL_FREE = (1 << 0),
@@ -110,15 +145,23 @@ enum {
110}; 145};
111 146
112struct dyn_ftrace { 147struct dyn_ftrace {
113 struct list_head list; 148 union {
114 unsigned long ip; /* address of mcount call-site */ 149 unsigned long ip; /* address of mcount call-site */
115 unsigned long flags; 150 struct dyn_ftrace *freelist;
116 struct dyn_arch_ftrace arch; 151 };
152 union {
153 unsigned long flags;
154 struct dyn_ftrace *newlist;
155 };
156 struct dyn_arch_ftrace arch;
117}; 157};
118 158
119int ftrace_force_update(void); 159int ftrace_force_update(void);
120void ftrace_set_filter(unsigned char *buf, int len, int reset); 160void ftrace_set_filter(unsigned char *buf, int len, int reset);
121 161
162int register_ftrace_command(struct ftrace_func_command *cmd);
163int unregister_ftrace_command(struct ftrace_func_command *cmd);
164
122/* defined in arch */ 165/* defined in arch */
123extern int ftrace_ip_converted(unsigned long ip); 166extern int ftrace_ip_converted(unsigned long ip);
124extern int ftrace_dyn_arch_init(void *data); 167extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
126extern void ftrace_caller(void); 169extern void ftrace_caller(void);
127extern void ftrace_call(void); 170extern void ftrace_call(void);
128extern void mcount_call(void); 171extern void mcount_call(void);
172
173#ifndef FTRACE_ADDR
174#define FTRACE_ADDR ((unsigned long)ftrace_caller)
175#endif
129#ifdef CONFIG_FUNCTION_GRAPH_TRACER 176#ifdef CONFIG_FUNCTION_GRAPH_TRACER
130extern void ftrace_graph_caller(void); 177extern void ftrace_graph_caller(void);
131extern int ftrace_enable_ftrace_graph_caller(void); 178extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
136#endif 183#endif
137 184
138/** 185/**
139 * ftrace_make_nop - convert code into top 186 * ftrace_make_nop - convert code into nop
140 * @mod: module structure if called by module load initialization 187 * @mod: module structure if called by module load initialization
141 * @rec: the mcount call site record 188 * @rec: the mcount call site record
142 * @addr: the address that the call site should be calling 189 * @addr: the address that the call site should be calling
@@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod,
181 */ 228 */
182extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 229extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
183 230
184
185/* May be defined in arch */ 231/* May be defined in arch */
186extern int ftrace_arch_read_dyn_info(char *buf, int size); 232extern int ftrace_arch_read_dyn_info(char *buf, int size);
187 233
@@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void);
198# define ftrace_disable_daemon() do { } while (0) 244# define ftrace_disable_daemon() do { } while (0)
199# define ftrace_enable_daemon() do { } while (0) 245# define ftrace_enable_daemon() do { } while (0)
200static inline void ftrace_release(void *start, unsigned long size) { } 246static inline void ftrace_release(void *start, unsigned long size) { }
247static inline int register_ftrace_command(struct ftrace_func_command *cmd)
248{
249 return -EINVAL;
250}
251static inline int unregister_ftrace_command(char *cmd_name)
252{
253 return -EINVAL;
254}
201#endif /* CONFIG_DYNAMIC_FTRACE */ 255#endif /* CONFIG_DYNAMIC_FTRACE */
202 256
203/* totally disable ftrace - can not re-enable after this */ 257/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled)
233#endif 287#endif
234} 288}
235 289
236#ifdef CONFIG_FRAME_POINTER 290#ifndef HAVE_ARCH_CALLER_ADDR
237/* TODO: need to fix this for ARM */ 291# ifdef CONFIG_FRAME_POINTER
238# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 292# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
239# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) 293# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
240# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) 294# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
241# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) 295# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
242# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) 296# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
243# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) 297# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
244# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) 298# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
245#else 299# else
246# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 300# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
247# define CALLER_ADDR1 0UL 301# define CALLER_ADDR1 0UL
248# define CALLER_ADDR2 0UL 302# define CALLER_ADDR2 0UL
249# define CALLER_ADDR3 0UL 303# define CALLER_ADDR3 0UL
250# define CALLER_ADDR4 0UL 304# define CALLER_ADDR4 0UL
251# define CALLER_ADDR5 0UL 305# define CALLER_ADDR5 0UL
252# define CALLER_ADDR6 0UL 306# define CALLER_ADDR6 0UL
253#endif 307# endif
308#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
254 309
255#ifdef CONFIG_IRQSOFF_TRACER 310#ifdef CONFIG_IRQSOFF_TRACER
256 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 311 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled)
268# define trace_preempt_off(a0, a1) do { } while (0) 323# define trace_preempt_off(a0, a1) do { } while (0)
269#endif 324#endif
270 325
271#ifdef CONFIG_TRACING
272extern int ftrace_dump_on_oops;
273
274extern void tracing_start(void);
275extern void tracing_stop(void);
276extern void ftrace_off_permanent(void);
277
278extern void
279ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
280
281/**
282 * ftrace_printk - printf formatting in the ftrace buffer
283 * @fmt: the printf format for printing
284 *
285 * Note: __ftrace_printk is an internal function for ftrace_printk and
286 * the @ip is passed in via the ftrace_printk macro.
287 *
288 * This function allows a kernel developer to debug fast path sections
289 * that printk is not appropriate for. By scattering in various
290 * printk like tracing in the code, a developer can quickly see
291 * where problems are occurring.
292 *
293 * This is intended as a debugging tool for the developer only.
294 * Please refrain from leaving ftrace_printks scattered around in
295 * your code.
296 */
297# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
298extern int
299__ftrace_printk(unsigned long ip, const char *fmt, ...)
300 __attribute__ ((format (printf, 2, 3)));
301extern void ftrace_dump(void);
302#else
303static inline void
304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
305static inline int
306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
307
308static inline void tracing_start(void) { }
309static inline void tracing_stop(void) { }
310static inline void ftrace_off_permanent(void) { }
311static inline int
312ftrace_printk(const char *fmt, ...)
313{
314 return 0;
315}
316static inline void ftrace_dump(void) { }
317#endif
318
319#ifdef CONFIG_FTRACE_MCOUNT_RECORD 326#ifdef CONFIG_FTRACE_MCOUNT_RECORD
320extern void ftrace_init(void); 327extern void ftrace_init(void);
321extern void ftrace_init_module(struct module *mod, 328extern void ftrace_init_module(struct module *mod,
@@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod,
327 unsigned long *start, unsigned long *end) { } 334 unsigned long *start, unsigned long *end) { }
328#endif 335#endif
329 336
330enum {
331 POWER_NONE = 0,
332 POWER_CSTATE = 1,
333 POWER_PSTATE = 2,
334};
335
336struct power_trace {
337#ifdef CONFIG_POWER_TRACER
338 ktime_t stamp;
339 ktime_t end;
340 int type;
341 int state;
342#endif
343};
344
345#ifdef CONFIG_POWER_TRACER
346extern void trace_power_start(struct power_trace *it, unsigned int type,
347 unsigned int state);
348extern void trace_power_mark(struct power_trace *it, unsigned int type,
349 unsigned int state);
350extern void trace_power_end(struct power_trace *it);
351#else
352static inline void trace_power_start(struct power_trace *it, unsigned int type,
353 unsigned int state) { }
354static inline void trace_power_mark(struct power_trace *it, unsigned int type,
355 unsigned int state) { }
356static inline void trace_power_end(struct power_trace *it) { }
357#endif
358
359
360/* 337/*
361 * Structure that defines an entry function trace. 338 * Structure that defines an entry function trace.
362 */ 339 */
@@ -398,8 +375,7 @@ struct ftrace_ret_stack {
398extern void return_to_handler(void); 375extern void return_to_handler(void);
399 376
400extern int 377extern int
401ftrace_push_return_trace(unsigned long ret, unsigned long long time, 378ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
402 unsigned long func, int *depth);
403extern void 379extern void
404ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); 380ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
405 381
@@ -514,6 +490,50 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
514 return tsk->trace & TSK_TRACE_FL_GRAPH; 490 return tsk->trace & TSK_TRACE_FL_GRAPH;
515} 491}
516 492
493extern int ftrace_dump_on_oops;
494
517#endif /* CONFIG_TRACING */ 495#endif /* CONFIG_TRACING */
518 496
497
498#ifdef CONFIG_HW_BRANCH_TRACER
499
500void trace_hw_branch(u64 from, u64 to);
501void trace_hw_branch_oops(void);
502
503#else /* CONFIG_HW_BRANCH_TRACER */
504
505static inline void trace_hw_branch(u64 from, u64 to) {}
506static inline void trace_hw_branch_oops(void) {}
507
508#endif /* CONFIG_HW_BRANCH_TRACER */
509
510/*
511 * A syscall entry in the ftrace syscalls array.
512 *
513 * @name: name of the syscall
514 * @nb_args: number of parameters it takes
515 * @types: list of types as strings
516 * @args: list of args as strings (args[i] matches types[i])
517 */
518struct syscall_metadata {
519 const char *name;
520 int nb_args;
521 const char **types;
522 const char **args;
523};
524
525#ifdef CONFIG_FTRACE_SYSCALLS
526extern void arch_init_ftrace_syscalls(void);
527extern struct syscall_metadata *syscall_nr_to_meta(int nr);
528extern void start_ftrace_syscalls(void);
529extern void stop_ftrace_syscalls(void);
530extern void ftrace_syscall_enter(struct pt_regs *regs);
531extern void ftrace_syscall_exit(struct pt_regs *regs);
532#else
533static inline void start_ftrace_syscalls(void) { }
534static inline void stop_ftrace_syscalls(void) { }
535static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
536static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
537#endif
538
519#endif /* _LINUX_FTRACE_H */ 539#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b05..dca7bf8cffe2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) 5#ifdef CONFIG_FTRACE_NMI_ENTER
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dda..faa1cf848bcd 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -15,55 +15,61 @@
15 * - bits 0-7 are the preemption count (max preemption depth: 256) 15 * - bits 0-7 are the preemption count (max preemption depth: 256)
16 * - bits 8-15 are the softirq count (max # of softirqs: 256) 16 * - bits 8-15 are the softirq count (max # of softirqs: 256)
17 * 17 *
18 * The hardirq count can be overridden per architecture, the default is: 18 * The hardirq count can in theory reach the same as NR_IRQS.
19 * In reality, the number of nested IRQS is limited to the stack
20 * size as well. For archs with over 1000 IRQS it is not practical
21 * to expect that they will all nest. We give a max of 10 bits for
22 * hardirq nesting. An arch may choose to give less than 10 bits.
23 * m68k expects it to be 8.
19 * 24 *
20 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) 25 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
21 * - ( bit 28 is the PREEMPT_ACTIVE flag. ) 26 * - bit 26 is the NMI_MASK
27 * - bit 28 is the PREEMPT_ACTIVE flag
22 * 28 *
23 * PREEMPT_MASK: 0x000000ff 29 * PREEMPT_MASK: 0x000000ff
24 * SOFTIRQ_MASK: 0x0000ff00 30 * SOFTIRQ_MASK: 0x0000ff00
25 * HARDIRQ_MASK: 0x0fff0000 31 * HARDIRQ_MASK: 0x03ff0000
32 * NMI_MASK: 0x04000000
26 */ 33 */
27#define PREEMPT_BITS 8 34#define PREEMPT_BITS 8
28#define SOFTIRQ_BITS 8 35#define SOFTIRQ_BITS 8
36#define NMI_BITS 1
29 37
30#ifndef HARDIRQ_BITS 38#define MAX_HARDIRQ_BITS 10
31#define HARDIRQ_BITS 12
32 39
33#ifndef MAX_HARDIRQS_PER_CPU 40#ifndef HARDIRQ_BITS
34#define MAX_HARDIRQS_PER_CPU NR_IRQS 41# define HARDIRQ_BITS MAX_HARDIRQ_BITS
35#endif 42#endif
36 43
37/* 44#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
38 * The hardirq mask has to be large enough to have space for potentially 45#error HARDIRQ_BITS too high!
39 * all IRQ sources in the system nesting on a single CPU.
40 */
41#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
42# error HARDIRQ_BITS is too low!
43#endif
44#endif 46#endif
45 47
46#define PREEMPT_SHIFT 0 48#define PREEMPT_SHIFT 0
47#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 49#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
48#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 50#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
51#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
49 52
50#define __IRQ_MASK(x) ((1UL << (x))-1) 53#define __IRQ_MASK(x) ((1UL << (x))-1)
51 54
52#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 55#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
53#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 56#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
54#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 57#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
58#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
55 59
56#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 60#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
57#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 61#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
58#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 62#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
63#define NMI_OFFSET (1UL << NMI_SHIFT)
59 64
60#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) 65#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
61#error PREEMPT_ACTIVE is too low! 66#error PREEMPT_ACTIVE is too low!
62#endif 67#endif
63 68
64#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 69#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
65#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 70#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
66#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 71#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
72 | NMI_MASK))
67 73
68/* 74/*
69 * Are we doing bottom half or hardware interrupt processing? 75 * Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
73#define in_softirq() (softirq_count()) 79#define in_softirq() (softirq_count())
74#define in_interrupt() (irq_count()) 80#define in_interrupt() (irq_count())
75 81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_MASK)
86
76#if defined(CONFIG_PREEMPT) 87#if defined(CONFIG_PREEMPT)
77# define PREEMPT_INATOMIC_BASE kernel_locked() 88# define PREEMPT_INATOMIC_BASE kernel_locked()
78# define PREEMPT_CHECK_OFFSET 1 89# define PREEMPT_CHECK_OFFSET 1
@@ -164,20 +175,24 @@ extern void irq_enter(void);
164 */ 175 */
165extern void irq_exit(void); 176extern void irq_exit(void);
166 177
167#define nmi_enter() \ 178#define nmi_enter() \
168 do { \ 179 do { \
169 ftrace_nmi_enter(); \ 180 ftrace_nmi_enter(); \
170 lockdep_off(); \ 181 BUG_ON(in_nmi()); \
171 rcu_nmi_enter(); \ 182 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
172 __irq_enter(); \ 183 lockdep_off(); \
184 rcu_nmi_enter(); \
185 trace_hardirq_enter(); \
173 } while (0) 186 } while (0)
174 187
175#define nmi_exit() \ 188#define nmi_exit() \
176 do { \ 189 do { \
177 __irq_exit(); \ 190 trace_hardirq_exit(); \
178 rcu_nmi_exit(); \ 191 rcu_nmi_exit(); \
179 lockdep_on(); \ 192 lockdep_on(); \
180 ftrace_nmi_exit(); \ 193 BUG_ON(!in_nmi()); \
194 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
195 ftrace_nmi_exit(); \
181 } while (0) 196 } while (0)
182 197
183#endif /* LINUX_HARDIRQ_H */ 198#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c68bffd182bb..ce2c07d99fc3 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -278,6 +278,11 @@ enum
278 NR_SOFTIRQS 278 NR_SOFTIRQS
279}; 279};
280 280
281/* map softirq index to softirq name. update 'softirq_to_name' in
282 * kernel/softirq.c when adding a new softirq.
283 */
284extern char *softirq_to_name[NR_SOFTIRQS];
285
281/* softirq mask and active fields moved to irq_cpustat_t in 286/* softirq mask and active fields moved to irq_cpustat_t in
282 * asm/hardirq.h to get better cache usage. KAO 287 * asm/hardirq.h to get better cache usage. KAO
283 */ 288 */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 74bde13224c9..b02a3f1d46a0 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -24,8 +24,8 @@
24# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) 24# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
25# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) 25# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
26# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) 26# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
27# define trace_softirq_enter() do { current->softirq_context++; } while (0) 27# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
28# define trace_softirq_exit() do { current->softirq_context--; } while (0) 28# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
29# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, 29# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
30#else 30#else
31# define trace_hardirqs_on() do { } while (0) 31# define trace_hardirqs_on() do { } while (0)
@@ -38,8 +38,8 @@
38# define trace_softirqs_enabled(p) 0 38# define trace_softirqs_enabled(p) 0
39# define trace_hardirq_enter() do { } while (0) 39# define trace_hardirq_enter() do { } while (0)
40# define trace_hardirq_exit() do { } while (0) 40# define trace_hardirq_exit() do { } while (0)
41# define trace_softirq_enter() do { } while (0) 41# define lockdep_softirq_enter() do { } while (0)
42# define trace_softirq_exit() do { } while (0) 42# define lockdep_softirq_exit() do { } while (0)
43# define INIT_TRACE_IRQFLAGS 43# define INIT_TRACE_IRQFLAGS
44#endif 44#endif
45 45
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e720b0da7751..e81f2637fdef 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -242,6 +242,19 @@ extern struct ratelimit_state printk_ratelimit_state;
242extern int printk_ratelimit(void); 242extern int printk_ratelimit(void);
243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
244 unsigned int interval_msec); 244 unsigned int interval_msec);
245
246/*
247 * Print a one-time message (analogous to WARN_ONCE() et al):
248 */
249#define printk_once(x...) ({ \
250 static int __print_once = 1; \
251 \
252 if (__print_once) { \
253 __print_once = 0; \
254 printk(x); \
255 } \
256})
257
245#else 258#else
246static inline int vprintk(const char *s, va_list args) 259static inline int vprintk(const char *s, va_list args)
247 __attribute__ ((format (printf, 1, 0))); 260 __attribute__ ((format (printf, 1, 0)));
@@ -253,6 +266,10 @@ static inline int printk_ratelimit(void) { return 0; }
253static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ 266static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
254 unsigned int interval_msec) \ 267 unsigned int interval_msec) \
255 { return false; } 268 { return false; }
269
270/* No effect, but we still get type checking even in the !PRINTK case: */
271#define printk_once(x...) printk(x)
272
256#endif 273#endif
257 274
258extern int printk_needs_cpu(int cpu); 275extern int printk_needs_cpu(int cpu);
@@ -371,6 +388,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
371#endif 388#endif
372 389
373/* 390/*
391 * General tracing related utility functions - trace_printk(),
392 * tracing_on/tracing_off and tracing_start()/tracing_stop
393 *
394 * Use tracing_on/tracing_off when you want to quickly turn on or off
395 * tracing. It simply enables or disables the recording of the trace events.
396 * This also corresponds to the user space debugfs/tracing/tracing_on
397 * file, which gives a means for the kernel and userspace to interact.
398 * Place a tracing_off() in the kernel where you want tracing to end.
399 * From user space, examine the trace, and then echo 1 > tracing_on
400 * to continue tracing.
401 *
402 * tracing_stop/tracing_start has slightly more overhead. It is used
403 * by things like suspend to ram where disabling the recording of the
404 * trace is not enough, but tracing must actually stop because things
405 * like calling smp_processor_id() may crash the system.
406 *
407 * Most likely, you want to use tracing_on/tracing_off.
408 */
409#ifdef CONFIG_RING_BUFFER
410void tracing_on(void);
411void tracing_off(void);
412/* trace_off_permanent stops recording with no way to bring it back */
413void tracing_off_permanent(void);
414int tracing_is_on(void);
415#else
416static inline void tracing_on(void) { }
417static inline void tracing_off(void) { }
418static inline void tracing_off_permanent(void) { }
419static inline int tracing_is_on(void) { return 0; }
420#endif
421#ifdef CONFIG_TRACING
422extern void tracing_start(void);
423extern void tracing_stop(void);
424extern void ftrace_off_permanent(void);
425
426extern void
427ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
428
429static inline void __attribute__ ((format (printf, 1, 2)))
430____trace_printk_check_format(const char *fmt, ...)
431{
432}
433#define __trace_printk_check_format(fmt, args...) \
434do { \
435 if (0) \
436 ____trace_printk_check_format(fmt, ##args); \
437} while (0)
438
439/**
440 * trace_printk - printf formatting in the ftrace buffer
441 * @fmt: the printf format for printing
442 *
443 * Note: __trace_printk is an internal function for trace_printk and
444 * the @ip is passed in via the trace_printk macro.
445 *
446 * This function allows a kernel developer to debug fast path sections
447 * that printk is not appropriate for. By scattering in various
448 * printk like tracing in the code, a developer can quickly see
449 * where problems are occurring.
450 *
451 * This is intended as a debugging tool for the developer only.
452 * Please refrain from leaving trace_printks scattered around in
453 * your code.
454 */
455
456#define trace_printk(fmt, args...) \
457do { \
458 __trace_printk_check_format(fmt, ##args); \
459 if (__builtin_constant_p(fmt)) { \
460 static const char *trace_printk_fmt \
461 __attribute__((section("__trace_printk_fmt"))) = \
462 __builtin_constant_p(fmt) ? fmt : NULL; \
463 \
464 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
465 } else \
466 __trace_printk(_THIS_IP_, fmt, ##args); \
467} while (0)
468
469extern int
470__trace_bprintk(unsigned long ip, const char *fmt, ...)
471 __attribute__ ((format (printf, 2, 3)));
472
473extern int
474__trace_printk(unsigned long ip, const char *fmt, ...)
475 __attribute__ ((format (printf, 2, 3)));
476
477/*
478 * The double __builtin_constant_p is because gcc will give us an error
479 * if we try to allocate the static variable to fmt if it is not a
480 * constant. Even with the outer if statement.
481 */
482#define ftrace_vprintk(fmt, vargs) \
483do { \
484 if (__builtin_constant_p(fmt)) { \
485 static const char *trace_printk_fmt \
486 __attribute__((section("__trace_printk_fmt"))) = \
487 __builtin_constant_p(fmt) ? fmt : NULL; \
488 \
489 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
490 } else \
491 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
492} while (0)
493
494extern int
495__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
496
497extern int
498__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
499
500extern void ftrace_dump(void);
501#else
502static inline void
503ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
504static inline int
505trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
506
507static inline void tracing_start(void) { }
508static inline void tracing_stop(void) { }
509static inline void ftrace_off_permanent(void) { }
510static inline int
511trace_printk(const char *fmt, ...)
512{
513 return 0;
514}
515static inline int
516ftrace_vprintk(const char *fmt, va_list ap)
517{
518 return 0;
519}
520static inline void ftrace_dump(void) { }
521#endif /* CONFIG_TRACING */
522
523/*
374 * Display an IP address in readable format. 524 * Display an IP address in readable format.
375 */ 525 */
376 526
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 3fdc10806d31..86a6c0f0518d 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -99,4 +99,10 @@ enum mem_add_context { BOOT, HOTPLUG };
99#define hotplug_memory_notifier(fn, pri) do { } while (0) 99#define hotplug_memory_notifier(fn, pri) do { } while (0)
100#endif 100#endif
101 101
102/*
103 * Kernel text modification mutex, used for code patching. Users of this lock
104 * can sleep.
105 */
106extern struct mutex text_mutex;
107
102#endif /* _LINUX_MEMORY_H_ */ 108#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/module.h b/include/linux/module.h
index 145a75528cc1..22d9878e868c 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -329,6 +329,11 @@ struct module
329 unsigned int num_tracepoints; 329 unsigned int num_tracepoints;
330#endif 330#endif
331 331
332#ifdef CONFIG_TRACING
333 const char **trace_bprintk_fmt_start;
334 unsigned int num_trace_bprintk_fmt;
335#endif
336
332#ifdef CONFIG_MODULE_UNLOAD 337#ifdef CONFIG_MODULE_UNLOAD
333 /* What modules depend on me? */ 338 /* What modules depend on me? */
334 struct list_head modules_which_use_me; 339 struct list_head modules_which_use_me;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b3b359660082..e1b7b2173885 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -8,7 +8,7 @@ struct ring_buffer;
8struct ring_buffer_iter; 8struct ring_buffer_iter;
9 9
10/* 10/*
11 * Don't reference this struct directly, use functions below. 11 * Don't refer to this struct directly, use functions below.
12 */ 12 */
13struct ring_buffer_event { 13struct ring_buffer_event {
14 u32 type:2, len:3, time_delta:27; 14 u32 type:2, len:3, time_delta:27;
@@ -18,10 +18,13 @@ struct ring_buffer_event {
18/** 18/**
19 * enum ring_buffer_type - internal ring buffer types 19 * enum ring_buffer_type - internal ring buffer types
20 * 20 *
21 * @RINGBUF_TYPE_PADDING: Left over page padding 21 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
22 * array is ignored 22 * If time_delta is 0:
23 * size is variable depending on how much 23 * array is ignored
24 * size is variable depending on how much
24 * padding is needed 25 * padding is needed
26 * If time_delta is non zero:
27 * everything else same as RINGBUF_TYPE_DATA
25 * 28 *
26 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta 29 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
27 * array[0] = time delta (28 .. 59) 30 * array[0] = time delta (28 .. 59)
@@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
65 return event->time_delta; 68 return event->time_delta;
66} 69}
67 70
71void ring_buffer_event_discard(struct ring_buffer_event *event);
72
68/* 73/*
69 * size is in bytes for each per CPU buffer. 74 * size is in bytes for each per CPU buffer.
70 */ 75 */
@@ -74,13 +79,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
74 79
75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); 80int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
76 81
77struct ring_buffer_event * 82struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
78ring_buffer_lock_reserve(struct ring_buffer *buffer, 83 unsigned long length);
79 unsigned long length,
80 unsigned long *flags);
81int ring_buffer_unlock_commit(struct ring_buffer *buffer, 84int ring_buffer_unlock_commit(struct ring_buffer *buffer,
82 struct ring_buffer_event *event, 85 struct ring_buffer_event *event);
83 unsigned long flags);
84int ring_buffer_write(struct ring_buffer *buffer, 86int ring_buffer_write(struct ring_buffer *buffer,
85 unsigned long length, void *data); 87 unsigned long length, void *data);
86 88
@@ -121,17 +123,19 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
121unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); 123unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
122unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 124unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
123 125
124u64 ring_buffer_time_stamp(int cpu); 126u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
125void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 127void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
128 int cpu, u64 *ts);
129void ring_buffer_set_clock(struct ring_buffer *buffer,
130 u64 (*clock)(void));
131
132size_t ring_buffer_page_len(void *page);
126 133
127void tracing_on(void);
128void tracing_off(void);
129void tracing_off_permanent(void);
130 134
131void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); 135void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
132void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); 136void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
133int ring_buffer_read_page(struct ring_buffer *buffer, 137int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
134 void **data_page, int cpu, int full); 138 size_t len, int cpu, int full);
135 139
136enum ring_buffer_flags { 140enum ring_buffer_flags {
137 RB_FL_OVERWRITE = 1 << 0, 141 RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 481fad3a9b42..5a50fdef5be5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -137,6 +137,8 @@ extern unsigned long nr_uninterruptible(void);
137extern unsigned long nr_active(void); 137extern unsigned long nr_active(void);
138extern unsigned long nr_iowait(void); 138extern unsigned long nr_iowait(void);
139 139
140extern unsigned long get_parent_ip(unsigned long addr);
141
140struct seq_file; 142struct seq_file;
141struct cfs_rq; 143struct cfs_rq;
142struct task_group; 144struct task_group;
@@ -1421,6 +1423,8 @@ struct task_struct {
1421 int curr_ret_stack; 1423 int curr_ret_stack;
1422 /* Stack of return addresses for return function tracing */ 1424 /* Stack of return addresses for return function tracing */
1423 struct ftrace_ret_stack *ret_stack; 1425 struct ftrace_ret_stack *ret_stack;
1426 /* time stamp for last schedule */
1427 unsigned long long ftrace_timestamp;
1424 /* 1428 /*
1425 * Number of functions that haven't been traced 1429 * Number of functions that haven't been traced
1426 * because of depth overrun. 1430 * because of depth overrun.
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 6ca6a7b66d75..f4523651fa42 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,7 @@
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <trace/kmemtrace.h>
17 18
18/* Size description struct for general caches. */ 19/* Size description struct for general caches. */
19struct cache_sizes { 20struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
28void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 29void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags); 30void *__kmalloc(size_t size, gfp_t flags);
30 31
31static inline void *kmalloc(size_t size, gfp_t flags) 32#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
32{ 38{
39 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
33 if (__builtin_constant_p(size)) { 52 if (__builtin_constant_p(size)) {
34 int i = 0; 53 int i = 0;
35 54
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
47found: 66found:
48#ifdef CONFIG_ZONE_DMA 67#ifdef CONFIG_ZONE_DMA
49 if (flags & GFP_DMA) 68 if (flags & GFP_DMA)
50 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, 69 cachep = malloc_sizes[i].cs_dmacachep;
51 flags); 70 else
52#endif 71#endif
53 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); 72 cachep = malloc_sizes[i].cs_cachep;
73
74 ret = kmem_cache_alloc_notrace(cachep, flags);
75
76 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags);
78
79 return ret;
54 } 80 }
55 return __kmalloc(size, flags); 81 return __kmalloc(size, flags);
56} 82}
@@ -59,8 +85,25 @@ found:
59extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 85extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
60extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 86extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
61 87
62static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 88#ifdef CONFIG_KMEMTRACE
89extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
90 gfp_t flags,
91 int nodeid);
92#else
93static __always_inline void *
94kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
95 gfp_t flags,
96 int nodeid)
97{
98 return kmem_cache_alloc_node(cachep, flags, nodeid);
99}
100#endif
101
102static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63{ 103{
104 struct kmem_cache *cachep;
105 void *ret;
106
64 if (__builtin_constant_p(size)) { 107 if (__builtin_constant_p(size)) {
65 int i = 0; 108 int i = 0;
66 109
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
78found: 121found:
79#ifdef CONFIG_ZONE_DMA 122#ifdef CONFIG_ZONE_DMA
80 if (flags & GFP_DMA) 123 if (flags & GFP_DMA)
81 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, 124 cachep = malloc_sizes[i].cs_dmacachep;
82 flags, node); 125 else
83#endif 126#endif
84 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, 127 cachep = malloc_sizes[i].cs_cachep;
85 flags, node); 128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130
131 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
132 ret, size, slab_buffer_size(cachep),
133 flags, node);
134
135 return ret;
86 } 136 }
87 return __kmalloc_node(size, flags, node); 137 return __kmalloc_node(size, flags, node);
88} 138}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab9..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
3 3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
7 gfp_t flags)
7{ 8{
8 return kmem_cache_alloc_node(cachep, flags, -1); 9 return kmem_cache_alloc_node(cachep, flags, -1);
9} 10}
10 11
11void *__kmalloc_node(size_t size, gfp_t flags, int node); 12void *__kmalloc_node(size_t size, gfp_t flags, int node);
12 13
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 14static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{ 15{
15 return __kmalloc_node(size, flags, node); 16 return __kmalloc_node(size, flags, node);
16} 17}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
23 * kmalloc is the normal method of allocating memory 24 * kmalloc is the normal method of allocating memory
24 * in the kernel. 25 * in the kernel.
25 */ 26 */
26static inline void *kmalloc(size_t size, gfp_t flags) 27static __always_inline void *kmalloc(size_t size, gfp_t flags)
27{ 28{
28 return __kmalloc_node(size, flags, -1); 29 return __kmalloc_node(size, flags, -1);
29} 30}
30 31
31static inline void *__kmalloc(size_t size, gfp_t flags) 32static __always_inline void *__kmalloc(size_t size, gfp_t flags)
32{ 33{
33 return kmalloc(size, flags); 34 return kmalloc(size, flags);
34} 35}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e37b6aa8a9fb..a1f90528e70b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <trace/kmemtrace.h>
13 14
14enum stat_item { 15enum stat_item {
15 ALLOC_FASTPATH, /* Allocation from cpu slab */ 16 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -217,13 +218,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
217void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 218void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
218void *__kmalloc(size_t size, gfp_t flags); 219void *__kmalloc(size_t size, gfp_t flags);
219 220
221#ifdef CONFIG_KMEMTRACE
222extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
223#else
224static __always_inline void *
225kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
226{
227 return kmem_cache_alloc(s, gfpflags);
228}
229#endif
230
220static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 231static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
221{ 232{
222 return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); 233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235
236 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
237 size, PAGE_SIZE << order, flags);
238
239 return ret;
223} 240}
224 241
225static __always_inline void *kmalloc(size_t size, gfp_t flags) 242static __always_inline void *kmalloc(size_t size, gfp_t flags)
226{ 243{
244 void *ret;
245
227 if (__builtin_constant_p(size)) { 246 if (__builtin_constant_p(size)) {
228 if (size > SLUB_MAX_SIZE) 247 if (size > SLUB_MAX_SIZE)
229 return kmalloc_large(size, flags); 248 return kmalloc_large(size, flags);
@@ -234,7 +253,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
234 if (!s) 253 if (!s)
235 return ZERO_SIZE_PTR; 254 return ZERO_SIZE_PTR;
236 255
237 return kmem_cache_alloc(s, flags); 256 ret = kmem_cache_alloc_notrace(s, flags);
257
258 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
259 _THIS_IP_, ret,
260 size, s->size, flags);
261
262 return ret;
238 } 263 }
239 } 264 }
240 return __kmalloc(size, flags); 265 return __kmalloc(size, flags);
@@ -244,8 +269,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
244void *__kmalloc_node(size_t size, gfp_t flags, int node); 269void *__kmalloc_node(size_t size, gfp_t flags, int node);
245void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 270void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
246 271
272#ifdef CONFIG_KMEMTRACE
273extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
274 gfp_t gfpflags,
275 int node);
276#else
277static __always_inline void *
278kmem_cache_alloc_node_notrace(struct kmem_cache *s,
279 gfp_t gfpflags,
280 int node)
281{
282 return kmem_cache_alloc_node(s, gfpflags, node);
283}
284#endif
285
247static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 286static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
248{ 287{
288 void *ret;
289
249 if (__builtin_constant_p(size) && 290 if (__builtin_constant_p(size) &&
250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 291 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
251 struct kmem_cache *s = kmalloc_slab(size); 292 struct kmem_cache *s = kmalloc_slab(size);
@@ -253,7 +294,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
253 if (!s) 294 if (!s)
254 return ZERO_SIZE_PTR; 295 return ZERO_SIZE_PTR;
255 296
256 return kmem_cache_alloc_node(s, flags, node); 297 ret = kmem_cache_alloc_node_notrace(s, flags, node);
298
299 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
300 _THIS_IP_, ret,
301 size, s->size, flags, node);
302
303 return ret;
257 } 304 }
258 return __kmalloc_node(size, flags, node); 305 return __kmalloc_node(size, flags, node);
259} 306}
diff --git a/include/linux/string.h b/include/linux/string.h
index 8852739f36df..3c877d686375 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> /* for inline */ 10#include <linux/compiler.h> /* for inline */
11#include <linux/types.h> /* for size_t */ 11#include <linux/types.h> /* for size_t */
12#include <linux/stddef.h> /* for NULL */ 12#include <linux/stddef.h> /* for NULL */
13#include <stdarg.h>
13 14
14extern char *strndup_user(const char __user *, long); 15extern char *strndup_user(const char __user *, long);
15extern void *memdup_user(const void __user *, size_t); 16extern void *memdup_user(const void __user *, size_t);
@@ -112,6 +113,12 @@ extern void argv_free(char **argv);
112 113
113extern bool sysfs_streq(const char *s1, const char *s2); 114extern bool sysfs_streq(const char *s1, const char *s2);
114 115
116#ifdef CONFIG_BINARY_PRINTF
117int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
118int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
119int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
120#endif
121
115extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 122extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
116 const void *from, size_t available); 123 const void *from, size_t available);
117 124
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f9f900cfd066..0cff9bb80b02 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -65,6 +65,7 @@ struct old_linux_dirent;
65#include <asm/signal.h> 65#include <asm/signal.h>
66#include <linux/quota.h> 66#include <linux/quota.h>
67#include <linux/key.h> 67#include <linux/key.h>
68#include <linux/ftrace.h>
68 69
69#define __SC_DECL1(t1, a1) t1 a1 70#define __SC_DECL1(t1, a1) t1 a1
70#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) 71#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
@@ -95,7 +96,46 @@ struct old_linux_dirent;
95#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 96#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
96#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 97#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
97 98
99#ifdef CONFIG_FTRACE_SYSCALLS
100#define __SC_STR_ADECL1(t, a) #a
101#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
102#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__)
103#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__)
104#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__)
105#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__)
106
107#define __SC_STR_TDECL1(t, a) #t
108#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__)
109#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__)
110#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__)
111#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
112#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
113
114#define SYSCALL_METADATA(sname, nb) \
115 static const struct syscall_metadata __used \
116 __attribute__((__aligned__(4))) \
117 __attribute__((section("__syscalls_metadata"))) \
118 __syscall_meta_##sname = { \
119 .name = "sys"#sname, \
120 .nb_args = nb, \
121 .types = types_##sname, \
122 .args = args_##sname, \
123 }
124
125#define SYSCALL_DEFINE0(sname) \
126 static const struct syscall_metadata __used \
127 __attribute__((__aligned__(4))) \
128 __attribute__((section("__syscalls_metadata"))) \
129 __syscall_meta_##sname = { \
130 .name = "sys_"#sname, \
131 .nb_args = 0, \
132 }; \
133 asmlinkage long sys_##sname(void)
134
135#else
98#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) 136#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
137#endif
138
99#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) 139#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
100#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) 140#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
101#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) 141#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
@@ -117,10 +157,26 @@ struct old_linux_dirent;
117#endif 157#endif
118#endif 158#endif
119 159
160#ifdef CONFIG_FTRACE_SYSCALLS
161#define SYSCALL_DEFINEx(x, sname, ...) \
162 static const char *types_##sname[] = { \
163 __SC_STR_TDECL##x(__VA_ARGS__) \
164 }; \
165 static const char *args_##sname[] = { \
166 __SC_STR_ADECL##x(__VA_ARGS__) \
167 }; \
168 SYSCALL_METADATA(sname, x); \
169 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
170#else
171#define SYSCALL_DEFINEx(x, sname, ...) \
172 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
173#endif
174
120#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS 175#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
121 176
122#define SYSCALL_DEFINE(name) static inline long SYSC_##name 177#define SYSCALL_DEFINE(name) static inline long SYSC_##name
123#define SYSCALL_DEFINEx(x, name, ...) \ 178
179#define __SYSCALL_DEFINEx(x, name, ...) \
124 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ 180 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \
125 static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ 181 static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
126 asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ 182 asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \
@@ -134,7 +190,7 @@ struct old_linux_dirent;
134#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ 190#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
135 191
136#define SYSCALL_DEFINE(name) asmlinkage long sys_##name 192#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
137#define SYSCALL_DEFINEx(x, name, ...) \ 193#define __SYSCALL_DEFINEx(x, name, ...) \
138 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) 194 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
139 195
140#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ 196#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 000000000000..7a8130384087
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,19 @@
1#ifndef _LINUX_TRACE_CLOCK_H
2#define _LINUX_TRACE_CLOCK_H
3
4/*
5 * 3 trace clock variants, with differing scalability/precision
6 * tradeoffs:
7 *
8 * - local: CPU-local trace clock
9 * - medium: scalable global clock with some jitter
10 * - global: globally monotonic, serialized clock
11 */
12#include <linux/compiler.h>
13#include <linux/types.h>
14
15extern u64 notrace trace_clock_local(void);
16extern u64 notrace trace_clock(void);
17extern u64 notrace trace_clock_global(void);
18
19#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 757005458366..d35a7ee7611f 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -31,8 +31,8 @@ struct tracepoint {
31 * Keep in sync with vmlinux.lds.h. 31 * Keep in sync with vmlinux.lds.h.
32 */ 32 */
33 33
34#define TPPROTO(args...) args 34#define TP_PROTO(args...) args
35#define TPARGS(args...) args 35#define TP_ARGS(args...) args
36 36
37#ifdef CONFIG_TRACEPOINTS 37#ifdef CONFIG_TRACEPOINTS
38 38
@@ -65,7 +65,7 @@ struct tracepoint {
65 { \ 65 { \
66 if (unlikely(__tracepoint_##name.state)) \ 66 if (unlikely(__tracepoint_##name.state)) \
67 __DO_TRACE(&__tracepoint_##name, \ 67 __DO_TRACE(&__tracepoint_##name, \
68 TPPROTO(proto), TPARGS(args)); \ 68 TP_PROTO(proto), TP_ARGS(args)); \
69 } \ 69 } \
70 static inline int register_trace_##name(void (*probe)(proto)) \ 70 static inline int register_trace_##name(void (*probe)(proto)) \
71 { \ 71 { \
@@ -153,4 +153,114 @@ static inline void tracepoint_synchronize_unregister(void)
153 synchronize_sched(); 153 synchronize_sched();
154} 154}
155 155
156#define PARAMS(args...) args
157#define TRACE_FORMAT(name, proto, args, fmt) \
158 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
159
160
161/*
162 * For use with the TRACE_EVENT macro:
163 *
164 * We define a tracepoint, its arguments, its printk format
165 * and its 'fast binay record' layout.
166 *
167 * Firstly, name your tracepoint via TRACE_EVENT(name : the
168 * 'subsystem_event' notation is fine.
169 *
170 * Think about this whole construct as the
171 * 'trace_sched_switch() function' from now on.
172 *
173 *
174 * TRACE_EVENT(sched_switch,
175 *
176 * *
177 * * A function has a regular function arguments
178 * * prototype, declare it via TP_PROTO():
179 * *
180 *
181 * TP_PROTO(struct rq *rq, struct task_struct *prev,
182 * struct task_struct *next),
183 *
184 * *
185 * * Define the call signature of the 'function'.
186 * * (Design sidenote: we use this instead of a
187 * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
188 * *
189 *
190 * TP_ARGS(rq, prev, next),
191 *
192 * *
193 * * Fast binary tracing: define the trace record via
194 * * TP_STRUCT__entry(). You can think about it like a
195 * * regular C structure local variable definition.
196 * *
197 * * This is how the trace record is structured and will
198 * * be saved into the ring buffer. These are the fields
199 * * that will be exposed to user-space in
200 * * /debug/tracing/events/<*>/format.
201 * *
202 * * The declared 'local variable' is called '__entry'
203 * *
204 * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
205 * *
206 * * pid_t prev_pid;
207 * *
208 * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
209 * *
210 * * char prev_comm[TASK_COMM_LEN];
211 * *
212 *
213 * TP_STRUCT__entry(
214 * __array( char, prev_comm, TASK_COMM_LEN )
215 * __field( pid_t, prev_pid )
216 * __field( int, prev_prio )
217 * __array( char, next_comm, TASK_COMM_LEN )
218 * __field( pid_t, next_pid )
219 * __field( int, next_prio )
220 * ),
221 *
222 * *
223 * * Assign the entry into the trace record, by embedding
224 * * a full C statement block into TP_fast_assign(). You
225 * * can refer to the trace record as '__entry' -
226 * * otherwise you can put arbitrary C code in here.
227 * *
228 * * Note: this C code will execute every time a trace event
229 * * happens, on an active tracepoint.
230 * *
231 *
232 * TP_fast_assign(
233 * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
234 * __entry->prev_pid = prev->pid;
235 * __entry->prev_prio = prev->prio;
236 * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
237 * __entry->next_pid = next->pid;
238 * __entry->next_prio = next->prio;
239 * )
240 *
241 * *
242 * * Formatted output of a trace record via TP_printk().
243 * * This is how the tracepoint will appear under ftrace
244 * * plugins that make use of this tracepoint.
245 * *
246 * * (raw-binary tracing wont actually perform this step.)
247 * *
248 *
249 * TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
250 * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
251 * __entry->next_comm, __entry->next_pid, __entry->next_prio),
252 *
253 * );
254 *
255 * This macro construct is thus used for the regular printk format
256 * tracing setup, it is used to construct a function pointer based
257 * tracepoint callback (this is used by programmatic plugins and
258 * can also by used by generic instrumentation like SystemTap), and
259 * it is also used to expose a structured trace record in
260 * /debug/tracing/events/.
261 */
262
263#define TRACE_EVENT(name, proto, args, struct, assign, print) \
264 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
265
156#endif 266#endif
diff --git a/include/trace/block.h b/include/trace/block.h
index 25c6a1fd5b77..25b7068b819e 100644
--- a/include/trace/block.h
+++ b/include/trace/block.h
@@ -5,72 +5,72 @@
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(block_rq_abort, 7DECLARE_TRACE(block_rq_abort,
8 TPPROTO(struct request_queue *q, struct request *rq), 8 TP_PROTO(struct request_queue *q, struct request *rq),
9 TPARGS(q, rq)); 9 TP_ARGS(q, rq));
10 10
11DECLARE_TRACE(block_rq_insert, 11DECLARE_TRACE(block_rq_insert,
12 TPPROTO(struct request_queue *q, struct request *rq), 12 TP_PROTO(struct request_queue *q, struct request *rq),
13 TPARGS(q, rq)); 13 TP_ARGS(q, rq));
14 14
15DECLARE_TRACE(block_rq_issue, 15DECLARE_TRACE(block_rq_issue,
16 TPPROTO(struct request_queue *q, struct request *rq), 16 TP_PROTO(struct request_queue *q, struct request *rq),
17 TPARGS(q, rq)); 17 TP_ARGS(q, rq));
18 18
19DECLARE_TRACE(block_rq_requeue, 19DECLARE_TRACE(block_rq_requeue,
20 TPPROTO(struct request_queue *q, struct request *rq), 20 TP_PROTO(struct request_queue *q, struct request *rq),
21 TPARGS(q, rq)); 21 TP_ARGS(q, rq));
22 22
23DECLARE_TRACE(block_rq_complete, 23DECLARE_TRACE(block_rq_complete,
24 TPPROTO(struct request_queue *q, struct request *rq), 24 TP_PROTO(struct request_queue *q, struct request *rq),
25 TPARGS(q, rq)); 25 TP_ARGS(q, rq));
26 26
27DECLARE_TRACE(block_bio_bounce, 27DECLARE_TRACE(block_bio_bounce,
28 TPPROTO(struct request_queue *q, struct bio *bio), 28 TP_PROTO(struct request_queue *q, struct bio *bio),
29 TPARGS(q, bio)); 29 TP_ARGS(q, bio));
30 30
31DECLARE_TRACE(block_bio_complete, 31DECLARE_TRACE(block_bio_complete,
32 TPPROTO(struct request_queue *q, struct bio *bio), 32 TP_PROTO(struct request_queue *q, struct bio *bio),
33 TPARGS(q, bio)); 33 TP_ARGS(q, bio));
34 34
35DECLARE_TRACE(block_bio_backmerge, 35DECLARE_TRACE(block_bio_backmerge,
36 TPPROTO(struct request_queue *q, struct bio *bio), 36 TP_PROTO(struct request_queue *q, struct bio *bio),
37 TPARGS(q, bio)); 37 TP_ARGS(q, bio));
38 38
39DECLARE_TRACE(block_bio_frontmerge, 39DECLARE_TRACE(block_bio_frontmerge,
40 TPPROTO(struct request_queue *q, struct bio *bio), 40 TP_PROTO(struct request_queue *q, struct bio *bio),
41 TPARGS(q, bio)); 41 TP_ARGS(q, bio));
42 42
43DECLARE_TRACE(block_bio_queue, 43DECLARE_TRACE(block_bio_queue,
44 TPPROTO(struct request_queue *q, struct bio *bio), 44 TP_PROTO(struct request_queue *q, struct bio *bio),
45 TPARGS(q, bio)); 45 TP_ARGS(q, bio));
46 46
47DECLARE_TRACE(block_getrq, 47DECLARE_TRACE(block_getrq,
48 TPPROTO(struct request_queue *q, struct bio *bio, int rw), 48 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
49 TPARGS(q, bio, rw)); 49 TP_ARGS(q, bio, rw));
50 50
51DECLARE_TRACE(block_sleeprq, 51DECLARE_TRACE(block_sleeprq,
52 TPPROTO(struct request_queue *q, struct bio *bio, int rw), 52 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
53 TPARGS(q, bio, rw)); 53 TP_ARGS(q, bio, rw));
54 54
55DECLARE_TRACE(block_plug, 55DECLARE_TRACE(block_plug,
56 TPPROTO(struct request_queue *q), 56 TP_PROTO(struct request_queue *q),
57 TPARGS(q)); 57 TP_ARGS(q));
58 58
59DECLARE_TRACE(block_unplug_timer, 59DECLARE_TRACE(block_unplug_timer,
60 TPPROTO(struct request_queue *q), 60 TP_PROTO(struct request_queue *q),
61 TPARGS(q)); 61 TP_ARGS(q));
62 62
63DECLARE_TRACE(block_unplug_io, 63DECLARE_TRACE(block_unplug_io,
64 TPPROTO(struct request_queue *q), 64 TP_PROTO(struct request_queue *q),
65 TPARGS(q)); 65 TP_ARGS(q));
66 66
67DECLARE_TRACE(block_split, 67DECLARE_TRACE(block_split,
68 TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), 68 TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
69 TPARGS(q, bio, pdu)); 69 TP_ARGS(q, bio, pdu));
70 70
71DECLARE_TRACE(block_remap, 71DECLARE_TRACE(block_remap,
72 TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev, 72 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
73 sector_t from, sector_t to), 73 sector_t from, sector_t to),
74 TPARGS(q, bio, dev, from, to)); 74 TP_ARGS(q, bio, dev, from, to));
75 75
76#endif 76#endif
diff --git a/include/trace/irq.h b/include/trace/irq.h
new file mode 100644
index 000000000000..ff5d4495dc37
--- /dev/null
+++ b/include/trace/irq.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_IRQ_H
2#define _TRACE_IRQ_H
3
4#include <linux/interrupt.h>
5#include <linux/tracepoint.h>
6
7#include <trace/irq_event_types.h>
8
9#endif
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h
new file mode 100644
index 000000000000..85964ebd47ec
--- /dev/null
+++ b/include/trace/irq_event_types.h
@@ -0,0 +1,55 @@
1
2/* use <trace/irq.h> instead */
3#ifndef TRACE_FORMAT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM irq
10
11/*
12 * Tracepoint for entry of interrupt handler:
13 */
14TRACE_FORMAT(irq_handler_entry,
15 TP_PROTO(int irq, struct irqaction *action),
16 TP_ARGS(irq, action),
17 TP_FMT("irq=%d handler=%s", irq, action->name)
18 );
19
20/*
21 * Tracepoint for return of an interrupt handler:
22 */
23TRACE_EVENT(irq_handler_exit,
24
25 TP_PROTO(int irq, struct irqaction *action, int ret),
26
27 TP_ARGS(irq, action, ret),
28
29 TP_STRUCT__entry(
30 __field( int, irq )
31 __field( int, ret )
32 ),
33
34 TP_fast_assign(
35 __entry->irq = irq;
36 __entry->ret = ret;
37 ),
38
39 TP_printk("irq=%d return=%s",
40 __entry->irq, __entry->ret ? "handled" : "unhandled")
41);
42
43TRACE_FORMAT(softirq_entry,
44 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
45 TP_ARGS(h, vec),
46 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
47 );
48
49TRACE_FORMAT(softirq_exit,
50 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
51 TP_ARGS(h, vec),
52 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
53 );
54
55#undef TRACE_SYSTEM
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
new file mode 100644
index 000000000000..ad8b7857855a
--- /dev/null
+++ b/include/trace/kmemtrace.h
@@ -0,0 +1,75 @@
1/*
2 * Copyright (C) 2008 Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#ifndef _LINUX_KMEMTRACE_H
8#define _LINUX_KMEMTRACE_H
9
10#ifdef __KERNEL__
11
12#include <linux/types.h>
13#include <linux/marker.h>
14
15enum kmemtrace_type_id {
16 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
17 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
18 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
19};
20
21#ifdef CONFIG_KMEMTRACE
22
23extern void kmemtrace_init(void);
24
25extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
26 unsigned long call_site,
27 const void *ptr,
28 size_t bytes_req,
29 size_t bytes_alloc,
30 gfp_t gfp_flags,
31 int node);
32
33extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
34 unsigned long call_site,
35 const void *ptr);
36
37#else /* CONFIG_KMEMTRACE */
38
39static inline void kmemtrace_init(void)
40{
41}
42
43static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
44 unsigned long call_site,
45 const void *ptr,
46 size_t bytes_req,
47 size_t bytes_alloc,
48 gfp_t gfp_flags,
49 int node)
50{
51}
52
53static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
54 unsigned long call_site,
55 const void *ptr)
56{
57}
58
59#endif /* CONFIG_KMEMTRACE */
60
61static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id,
62 unsigned long call_site,
63 const void *ptr,
64 size_t bytes_req,
65 size_t bytes_alloc,
66 gfp_t gfp_flags)
67{
68 kmemtrace_mark_alloc_node(type_id, call_site, ptr,
69 bytes_req, bytes_alloc, gfp_flags, -1);
70}
71
72#endif /* __KERNEL__ */
73
74#endif /* _LINUX_KMEMTRACE_H */
75
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
new file mode 100644
index 000000000000..5ca67df87f2a
--- /dev/null
+++ b/include/trace/lockdep.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_LOCKDEP_H
2#define _TRACE_LOCKDEP_H
3
4#include <linux/lockdep.h>
5#include <linux/tracepoint.h>
6
7#include <trace/lockdep_event_types.h>
8
9#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
new file mode 100644
index 000000000000..adccfcd2ec8f
--- /dev/null
+++ b/include/trace/lockdep_event_types.h
@@ -0,0 +1,44 @@
1
2#ifndef TRACE_FORMAT
3# error Do not include this file directly.
4# error Unless you know what you are doing.
5#endif
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM lock
9
10#ifdef CONFIG_LOCKDEP
11
12TRACE_FORMAT(lock_acquire,
13 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
14 int trylock, int read, int check,
15 struct lockdep_map *next_lock, unsigned long ip),
16 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
17 TP_FMT("%s%s%s", trylock ? "try " : "",
18 read ? "read " : "", lock->name)
19 );
20
21TRACE_FORMAT(lock_release,
22 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
23 TP_ARGS(lock, nested, ip),
24 TP_FMT("%s", lock->name)
25 );
26
27#ifdef CONFIG_LOCK_STAT
28
29TRACE_FORMAT(lock_contended,
30 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
31 TP_ARGS(lock, ip),
32 TP_FMT("%s", lock->name)
33 );
34
35TRACE_FORMAT(lock_acquired,
36 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
37 TP_ARGS(lock, ip),
38 TP_FMT("%s", lock->name)
39 );
40
41#endif
42#endif
43
44#undef TRACE_SYSTEM
diff --git a/include/trace/power.h b/include/trace/power.h
new file mode 100644
index 000000000000..ef204666e983
--- /dev/null
+++ b/include/trace/power.h
@@ -0,0 +1,32 @@
1#ifndef _TRACE_POWER_H
2#define _TRACE_POWER_H
3
4#include <linux/ktime.h>
5#include <linux/tracepoint.h>
6
7enum {
8 POWER_NONE = 0,
9 POWER_CSTATE = 1,
10 POWER_PSTATE = 2,
11};
12
13struct power_trace {
14 ktime_t stamp;
15 ktime_t end;
16 int type;
17 int state;
18};
19
20DECLARE_TRACE(power_start,
21 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
22 TP_ARGS(it, type, state));
23
24DECLARE_TRACE(power_mark,
25 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
26 TP_ARGS(it, type, state));
27
28DECLARE_TRACE(power_end,
29 TP_PROTO(struct power_trace *it),
30 TP_ARGS(it));
31
32#endif /* _TRACE_POWER_H */
diff --git a/include/trace/sched.h b/include/trace/sched.h
index 0d81098ee9fc..4e372a1a29bf 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -4,53 +4,6 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(sched_kthread_stop, 7#include <trace/sched_event_types.h>
8 TPPROTO(struct task_struct *t),
9 TPARGS(t));
10
11DECLARE_TRACE(sched_kthread_stop_ret,
12 TPPROTO(int ret),
13 TPARGS(ret));
14
15DECLARE_TRACE(sched_wait_task,
16 TPPROTO(struct rq *rq, struct task_struct *p),
17 TPARGS(rq, p));
18
19DECLARE_TRACE(sched_wakeup,
20 TPPROTO(struct rq *rq, struct task_struct *p, int success),
21 TPARGS(rq, p, success));
22
23DECLARE_TRACE(sched_wakeup_new,
24 TPPROTO(struct rq *rq, struct task_struct *p, int success),
25 TPARGS(rq, p, success));
26
27DECLARE_TRACE(sched_switch,
28 TPPROTO(struct rq *rq, struct task_struct *prev,
29 struct task_struct *next),
30 TPARGS(rq, prev, next));
31
32DECLARE_TRACE(sched_migrate_task,
33 TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
34 TPARGS(p, orig_cpu, dest_cpu));
35
36DECLARE_TRACE(sched_process_free,
37 TPPROTO(struct task_struct *p),
38 TPARGS(p));
39
40DECLARE_TRACE(sched_process_exit,
41 TPPROTO(struct task_struct *p),
42 TPARGS(p));
43
44DECLARE_TRACE(sched_process_wait,
45 TPPROTO(struct pid *pid),
46 TPARGS(pid));
47
48DECLARE_TRACE(sched_process_fork,
49 TPPROTO(struct task_struct *parent, struct task_struct *child),
50 TPARGS(parent, child));
51
52DECLARE_TRACE(sched_signal_send,
53 TPPROTO(int sig, struct task_struct *p),
54 TPARGS(sig, p));
55 8
56#endif 9#endif
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h
new file mode 100644
index 000000000000..63547dc1125f
--- /dev/null
+++ b/include/trace/sched_event_types.h
@@ -0,0 +1,337 @@
1
2/* use <trace/sched.h> instead */
3#ifndef TRACE_EVENT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM sched
10
11/*
12 * Tracepoint for calling kthread_stop, performed to end a kthread:
13 */
14TRACE_EVENT(sched_kthread_stop,
15
16 TP_PROTO(struct task_struct *t),
17
18 TP_ARGS(t),
19
20 TP_STRUCT__entry(
21 __array( char, comm, TASK_COMM_LEN )
22 __field( pid_t, pid )
23 ),
24
25 TP_fast_assign(
26 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 __entry->pid = t->pid;
28 ),
29
30 TP_printk("task %s:%d", __entry->comm, __entry->pid)
31);
32
33/*
34 * Tracepoint for the return value of the kthread stopping:
35 */
36TRACE_EVENT(sched_kthread_stop_ret,
37
38 TP_PROTO(int ret),
39
40 TP_ARGS(ret),
41
42 TP_STRUCT__entry(
43 __field( int, ret )
44 ),
45
46 TP_fast_assign(
47 __entry->ret = ret;
48 ),
49
50 TP_printk("ret %d", __entry->ret)
51);
52
53/*
54 * Tracepoint for waiting on task to unschedule:
55 *
56 * (NOTE: the 'rq' argument is not used by generic trace events,
57 * but used by the latency tracer plugin. )
58 */
59TRACE_EVENT(sched_wait_task,
60
61 TP_PROTO(struct rq *rq, struct task_struct *p),
62
63 TP_ARGS(rq, p),
64
65 TP_STRUCT__entry(
66 __array( char, comm, TASK_COMM_LEN )
67 __field( pid_t, pid )
68 __field( int, prio )
69 ),
70
71 TP_fast_assign(
72 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
73 __entry->pid = p->pid;
74 __entry->prio = p->prio;
75 ),
76
77 TP_printk("task %s:%d [%d]",
78 __entry->comm, __entry->pid, __entry->prio)
79);
80
81/*
82 * Tracepoint for waking up a task:
83 *
84 * (NOTE: the 'rq' argument is not used by generic trace events,
85 * but used by the latency tracer plugin. )
86 */
87TRACE_EVENT(sched_wakeup,
88
89 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
90
91 TP_ARGS(rq, p, success),
92
93 TP_STRUCT__entry(
94 __array( char, comm, TASK_COMM_LEN )
95 __field( pid_t, pid )
96 __field( int, prio )
97 __field( int, success )
98 ),
99
100 TP_fast_assign(
101 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
102 __entry->pid = p->pid;
103 __entry->prio = p->prio;
104 __entry->success = success;
105 ),
106
107 TP_printk("task %s:%d [%d] success=%d",
108 __entry->comm, __entry->pid, __entry->prio,
109 __entry->success)
110);
111
112/*
113 * Tracepoint for waking up a new task:
114 *
115 * (NOTE: the 'rq' argument is not used by generic trace events,
116 * but used by the latency tracer plugin. )
117 */
118TRACE_EVENT(sched_wakeup_new,
119
120 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
121
122 TP_ARGS(rq, p, success),
123
124 TP_STRUCT__entry(
125 __array( char, comm, TASK_COMM_LEN )
126 __field( pid_t, pid )
127 __field( int, prio )
128 __field( int, success )
129 ),
130
131 TP_fast_assign(
132 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
133 __entry->pid = p->pid;
134 __entry->prio = p->prio;
135 __entry->success = success;
136 ),
137
138 TP_printk("task %s:%d [%d] success=%d",
139 __entry->comm, __entry->pid, __entry->prio,
140 __entry->success)
141);
142
143/*
144 * Tracepoint for task switches, performed by the scheduler:
145 *
146 * (NOTE: the 'rq' argument is not used by generic trace events,
147 * but used by the latency tracer plugin. )
148 */
149TRACE_EVENT(sched_switch,
150
151 TP_PROTO(struct rq *rq, struct task_struct *prev,
152 struct task_struct *next),
153
154 TP_ARGS(rq, prev, next),
155
156 TP_STRUCT__entry(
157 __array( char, prev_comm, TASK_COMM_LEN )
158 __field( pid_t, prev_pid )
159 __field( int, prev_prio )
160 __array( char, next_comm, TASK_COMM_LEN )
161 __field( pid_t, next_pid )
162 __field( int, next_prio )
163 ),
164
165 TP_fast_assign(
166 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
167 __entry->prev_pid = prev->pid;
168 __entry->prev_prio = prev->prio;
169 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
170 __entry->next_pid = next->pid;
171 __entry->next_prio = next->prio;
172 ),
173
174 TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
175 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
176 __entry->next_comm, __entry->next_pid, __entry->next_prio)
177);
178
179/*
180 * Tracepoint for a task being migrated:
181 */
182TRACE_EVENT(sched_migrate_task,
183
184 TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
185
186 TP_ARGS(p, orig_cpu, dest_cpu),
187
188 TP_STRUCT__entry(
189 __array( char, comm, TASK_COMM_LEN )
190 __field( pid_t, pid )
191 __field( int, prio )
192 __field( int, orig_cpu )
193 __field( int, dest_cpu )
194 ),
195
196 TP_fast_assign(
197 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
198 __entry->pid = p->pid;
199 __entry->prio = p->prio;
200 __entry->orig_cpu = orig_cpu;
201 __entry->dest_cpu = dest_cpu;
202 ),
203
204 TP_printk("task %s:%d [%d] from: %d to: %d",
205 __entry->comm, __entry->pid, __entry->prio,
206 __entry->orig_cpu, __entry->dest_cpu)
207);
208
209/*
210 * Tracepoint for freeing a task:
211 */
212TRACE_EVENT(sched_process_free,
213
214 TP_PROTO(struct task_struct *p),
215
216 TP_ARGS(p),
217
218 TP_STRUCT__entry(
219 __array( char, comm, TASK_COMM_LEN )
220 __field( pid_t, pid )
221 __field( int, prio )
222 ),
223
224 TP_fast_assign(
225 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
226 __entry->pid = p->pid;
227 __entry->prio = p->prio;
228 ),
229
230 TP_printk("task %s:%d [%d]",
231 __entry->comm, __entry->pid, __entry->prio)
232);
233
234/*
235 * Tracepoint for a task exiting:
236 */
237TRACE_EVENT(sched_process_exit,
238
239 TP_PROTO(struct task_struct *p),
240
241 TP_ARGS(p),
242
243 TP_STRUCT__entry(
244 __array( char, comm, TASK_COMM_LEN )
245 __field( pid_t, pid )
246 __field( int, prio )
247 ),
248
249 TP_fast_assign(
250 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
251 __entry->pid = p->pid;
252 __entry->prio = p->prio;
253 ),
254
255 TP_printk("task %s:%d [%d]",
256 __entry->comm, __entry->pid, __entry->prio)
257);
258
259/*
260 * Tracepoint for a waiting task:
261 */
262TRACE_EVENT(sched_process_wait,
263
264 TP_PROTO(struct pid *pid),
265
266 TP_ARGS(pid),
267
268 TP_STRUCT__entry(
269 __array( char, comm, TASK_COMM_LEN )
270 __field( pid_t, pid )
271 __field( int, prio )
272 ),
273
274 TP_fast_assign(
275 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
276 __entry->pid = pid_nr(pid);
277 __entry->prio = current->prio;
278 ),
279
280 TP_printk("task %s:%d [%d]",
281 __entry->comm, __entry->pid, __entry->prio)
282);
283
284/*
285 * Tracepoint for do_fork:
286 */
287TRACE_EVENT(sched_process_fork,
288
289 TP_PROTO(struct task_struct *parent, struct task_struct *child),
290
291 TP_ARGS(parent, child),
292
293 TP_STRUCT__entry(
294 __array( char, parent_comm, TASK_COMM_LEN )
295 __field( pid_t, parent_pid )
296 __array( char, child_comm, TASK_COMM_LEN )
297 __field( pid_t, child_pid )
298 ),
299
300 TP_fast_assign(
301 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
302 __entry->parent_pid = parent->pid;
303 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
304 __entry->child_pid = child->pid;
305 ),
306
307 TP_printk("parent %s:%d child %s:%d",
308 __entry->parent_comm, __entry->parent_pid,
309 __entry->child_comm, __entry->child_pid)
310);
311
312/*
313 * Tracepoint for sending a signal:
314 */
315TRACE_EVENT(sched_signal_send,
316
317 TP_PROTO(int sig, struct task_struct *p),
318
319 TP_ARGS(sig, p),
320
321 TP_STRUCT__entry(
322 __field( int, sig )
323 __array( char, comm, TASK_COMM_LEN )
324 __field( pid_t, pid )
325 ),
326
327 TP_fast_assign(
328 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
329 __entry->pid = p->pid;
330 __entry->sig = sig;
331 ),
332
333 TP_printk("sig: %d task %s:%d",
334 __entry->sig, __entry->comm, __entry->pid)
335);
336
337#undef TRACE_SYSTEM
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
new file mode 100644
index 000000000000..df56f5694be6
--- /dev/null
+++ b/include/trace/trace_event_types.h
@@ -0,0 +1,5 @@
1/* trace/<type>_event_types.h here */
2
3#include <trace/sched_event_types.h>
4#include <trace/irq_event_types.h>
5#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
new file mode 100644
index 000000000000..fd13750ca4ba
--- /dev/null
+++ b/include/trace/trace_events.h
@@ -0,0 +1,5 @@
1/* trace/<type>.h here */
2
3#include <trace/sched.h>
4#include <trace/irq.h>
5#include <trace/lockdep.h>
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h
new file mode 100644
index 000000000000..7626523deeba
--- /dev/null
+++ b/include/trace/workqueue.h
@@ -0,0 +1,25 @@
1#ifndef __TRACE_WORKQUEUE_H
2#define __TRACE_WORKQUEUE_H
3
4#include <linux/tracepoint.h>
5#include <linux/workqueue.h>
6#include <linux/sched.h>
7
8DECLARE_TRACE(workqueue_insertion,
9 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
10 TP_ARGS(wq_thread, work));
11
12DECLARE_TRACE(workqueue_execution,
13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
14 TP_ARGS(wq_thread, work));
15
16/* Trace the creation of one workqueue thread on a cpu */
17DECLARE_TRACE(workqueue_creation,
18 TP_PROTO(struct task_struct *wq_thread, int cpu),
19 TP_ARGS(wq_thread, cpu));
20
21DECLARE_TRACE(workqueue_destruction,
22 TP_PROTO(struct task_struct *wq_thread),
23 TP_ARGS(wq_thread));
24
25#endif /* __TRACE_WORKQUEUE_H */
diff --git a/init/Kconfig b/init/Kconfig
index 14c483d2b7c9..9d8cf2d2f840 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1005,7 +1005,7 @@ config TRACEPOINTS
1005 1005
1006config MARKERS 1006config MARKERS
1007 bool "Activate markers" 1007 bool "Activate markers"
1008 depends on TRACEPOINTS 1008 select TRACEPOINTS
1009 help 1009 help
1010 Place an empty function call at each marker site. Can be 1010 Place an empty function call at each marker site. Can be
1011 dynamically changed for a probe function. 1011 dynamically changed for a probe function.
diff --git a/init/main.c b/init/main.c
index 07c8658ffca5..3585f073d636 100644
--- a/init/main.c
+++ b/init/main.c
@@ -71,6 +71,7 @@
71#include <asm/setup.h> 71#include <asm/setup.h>
72#include <asm/sections.h> 72#include <asm/sections.h>
73#include <asm/cacheflush.h> 73#include <asm/cacheflush.h>
74#include <trace/kmemtrace.h>
74 75
75#ifdef CONFIG_X86_LOCAL_APIC 76#ifdef CONFIG_X86_LOCAL_APIC
76#include <asm/smp.h> 77#include <asm/smp.h>
@@ -648,6 +649,7 @@ asmlinkage void __init start_kernel(void)
648 enable_debug_pagealloc(); 649 enable_debug_pagealloc();
649 cpu_hotplug_init(); 650 cpu_hotplug_init();
650 kmem_cache_init(); 651 kmem_cache_init();
652 kmemtrace_init();
651 debug_objects_mem_init(); 653 debug_objects_mem_init();
652 idr_init_cache(); 654 idr_init_cache();
653 setup_per_cpu_pageset(); 655 setup_per_cpu_pageset();
@@ -769,6 +771,7 @@ static void __init do_basic_setup(void)
769{ 771{
770 rcu_init_sched(); /* needed by module_init stage. */ 772 rcu_init_sched(); /* needed by module_init stage. */
771 init_workqueues(); 773 init_workqueues();
774 cpuset_init_smp();
772 usermodehelper_init(); 775 usermodehelper_init();
773 driver_init(); 776 driver_init();
774 init_irq_proc(); 777 init_irq_proc();
@@ -863,8 +866,6 @@ static int __init kernel_init(void * unused)
863 smp_init(); 866 smp_init();
864 sched_init_smp(); 867 sched_init_smp();
865 868
866 cpuset_init_smp();
867
868 do_basic_setup(); 869 do_basic_setup();
869 870
870 /* 871 /*
diff --git a/kernel/extable.c b/kernel/extable.c
index e136ed8d82ba..b54a6017b6b5 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -15,11 +15,22 @@
15 along with this program; if not, write to the Free Software 15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18#include <linux/ftrace.h>
19#include <linux/memory.h>
18#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/mutex.h>
19#include <linux/init.h> 22#include <linux/init.h>
20#include <linux/ftrace.h> 23
21#include <asm/uaccess.h>
22#include <asm/sections.h> 24#include <asm/sections.h>
25#include <asm/uaccess.h>
26
27/*
28 * mutex protecting text section modification (dynamic code patching).
29 * some users need to sleep (allocating memory...) while they hold this lock.
30 *
31 * NOT exported to modules - patching kernel text is a really delicate matter.
32 */
33DEFINE_MUTEX(text_mutex);
23 34
24extern struct exception_table_entry __start___ex_table[]; 35extern struct exception_table_entry __start___ex_table[];
25extern struct exception_table_entry __stop___ex_table[]; 36extern struct exception_table_entry __stop___ex_table[];
@@ -41,7 +52,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
41 return e; 52 return e;
42} 53}
43 54
44__notrace_funcgraph int core_kernel_text(unsigned long addr) 55int core_kernel_text(unsigned long addr)
45{ 56{
46 if (addr >= (unsigned long)_stext && 57 if (addr >= (unsigned long)_stext &&
47 addr <= (unsigned long)_etext) 58 addr <= (unsigned long)_etext)
@@ -54,7 +65,7 @@ __notrace_funcgraph int core_kernel_text(unsigned long addr)
54 return 0; 65 return 0;
55} 66}
56 67
57__notrace_funcgraph int __kernel_text_address(unsigned long addr) 68int __kernel_text_address(unsigned long addr)
58{ 69{
59 if (core_kernel_text(addr)) 70 if (core_kernel_text(addr))
60 return 1; 71 return 1;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 9ebf77968871..343acecae629 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/rculist.h> 18#include <linux/rculist.h>
19#include <linux/hash.h> 19#include <linux/hash.h>
20#include <trace/irq.h>
20#include <linux/bootmem.h> 21#include <linux/bootmem.h>
21 22
22#include "internals.h" 23#include "internals.h"
@@ -338,6 +339,9 @@ irqreturn_t no_action(int cpl, void *dev_id)
338 return IRQ_NONE; 339 return IRQ_NONE;
339} 340}
340 341
342DEFINE_TRACE(irq_handler_entry);
343DEFINE_TRACE(irq_handler_exit);
344
341/** 345/**
342 * handle_IRQ_event - irq action chain handler 346 * handle_IRQ_event - irq action chain handler
343 * @irq: the interrupt number 347 * @irq: the interrupt number
@@ -356,7 +360,9 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
356 local_irq_enable_in_hardirq(); 360 local_irq_enable_in_hardirq();
357 361
358 do { 362 do {
363 trace_irq_handler_entry(irq, action);
359 ret = action->handler(irq, action->dev_id); 364 ret = action->handler(irq, action->dev_id);
365 trace_irq_handler_exit(irq, action, ret);
360 if (ret == IRQ_HANDLED) 366 if (ret == IRQ_HANDLED)
361 status |= action->flags; 367 status |= action->flags;
362 retval |= ret; 368 retval |= ret;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7ba8cd9845cb..5016bfb682b9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -43,6 +43,7 @@
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/debugfs.h> 44#include <linux/debugfs.h>
45#include <linux/kdebug.h> 45#include <linux/kdebug.h>
46#include <linux/memory.h>
46 47
47#include <asm-generic/sections.h> 48#include <asm-generic/sections.h>
48#include <asm/cacheflush.h> 49#include <asm/cacheflush.h>
@@ -699,9 +700,10 @@ int __kprobes register_kprobe(struct kprobe *p)
699 goto out; 700 goto out;
700 } 701 }
701 702
703 mutex_lock(&text_mutex);
702 ret = arch_prepare_kprobe(p); 704 ret = arch_prepare_kprobe(p);
703 if (ret) 705 if (ret)
704 goto out; 706 goto out_unlock_text;
705 707
706 INIT_HLIST_NODE(&p->hlist); 708 INIT_HLIST_NODE(&p->hlist);
707 hlist_add_head_rcu(&p->hlist, 709 hlist_add_head_rcu(&p->hlist,
@@ -710,6 +712,8 @@ int __kprobes register_kprobe(struct kprobe *p)
710 if (kprobe_enabled) 712 if (kprobe_enabled)
711 arch_arm_kprobe(p); 713 arch_arm_kprobe(p);
712 714
715out_unlock_text:
716 mutex_unlock(&text_mutex);
713out: 717out:
714 mutex_unlock(&kprobe_mutex); 718 mutex_unlock(&kprobe_mutex);
715 719
@@ -746,8 +750,11 @@ valid_p:
746 * enabled and not gone - otherwise, the breakpoint would 750 * enabled and not gone - otherwise, the breakpoint would
747 * already have been removed. We save on flushing icache. 751 * already have been removed. We save on flushing icache.
748 */ 752 */
749 if (kprobe_enabled && !kprobe_gone(old_p)) 753 if (kprobe_enabled && !kprobe_gone(old_p)) {
754 mutex_lock(&text_mutex);
750 arch_disarm_kprobe(p); 755 arch_disarm_kprobe(p);
756 mutex_unlock(&text_mutex);
757 }
751 hlist_del_rcu(&old_p->hlist); 758 hlist_del_rcu(&old_p->hlist);
752 } else { 759 } else {
753 if (p->break_handler && !kprobe_gone(p)) 760 if (p->break_handler && !kprobe_gone(p))
@@ -912,10 +919,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
912 ri->rp = rp; 919 ri->rp = rp;
913 ri->task = current; 920 ri->task = current;
914 921
915 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 922 if (rp->entry_handler && rp->entry_handler(ri, regs))
916 spin_unlock_irqrestore(&rp->lock, flags);
917 return 0; 923 return 0;
918 }
919 924
920 arch_prepare_kretprobe(ri, regs); 925 arch_prepare_kretprobe(ri, regs);
921 926
@@ -1280,12 +1285,14 @@ static void __kprobes enable_all_kprobes(void)
1280 if (kprobe_enabled) 1285 if (kprobe_enabled)
1281 goto already_enabled; 1286 goto already_enabled;
1282 1287
1288 mutex_lock(&text_mutex);
1283 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1289 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1284 head = &kprobe_table[i]; 1290 head = &kprobe_table[i];
1285 hlist_for_each_entry_rcu(p, node, head, hlist) 1291 hlist_for_each_entry_rcu(p, node, head, hlist)
1286 if (!kprobe_gone(p)) 1292 if (!kprobe_gone(p))
1287 arch_arm_kprobe(p); 1293 arch_arm_kprobe(p);
1288 } 1294 }
1295 mutex_unlock(&text_mutex);
1289 1296
1290 kprobe_enabled = true; 1297 kprobe_enabled = true;
1291 printk(KERN_INFO "Kprobes globally enabled\n"); 1298 printk(KERN_INFO "Kprobes globally enabled\n");
@@ -1310,6 +1317,7 @@ static void __kprobes disable_all_kprobes(void)
1310 1317
1311 kprobe_enabled = false; 1318 kprobe_enabled = false;
1312 printk(KERN_INFO "Kprobes globally disabled\n"); 1319 printk(KERN_INFO "Kprobes globally disabled\n");
1320 mutex_lock(&text_mutex);
1313 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1321 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1314 head = &kprobe_table[i]; 1322 head = &kprobe_table[i];
1315 hlist_for_each_entry_rcu(p, node, head, hlist) { 1323 hlist_for_each_entry_rcu(p, node, head, hlist) {
@@ -1318,6 +1326,7 @@ static void __kprobes disable_all_kprobes(void)
1318 } 1326 }
1319 } 1327 }
1320 1328
1329 mutex_unlock(&text_mutex);
1321 mutex_unlock(&kprobe_mutex); 1330 mutex_unlock(&kprobe_mutex);
1322 /* Allow all currently running kprobes to complete */ 1331 /* Allow all currently running kprobes to complete */
1323 synchronize_sched(); 1332 synchronize_sched();
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3673a3f44d9d..81b5f33970b8 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
42#include <linux/hash.h> 42#include <linux/hash.h>
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <trace/lockdep.h>
45 46
46#include <asm/sections.h> 47#include <asm/sections.h>
47 48
@@ -433,13 +434,6 @@ atomic_t nr_find_usage_forwards_checks;
433atomic_t nr_find_usage_forwards_recursions; 434atomic_t nr_find_usage_forwards_recursions;
434atomic_t nr_find_usage_backwards_checks; 435atomic_t nr_find_usage_backwards_checks;
435atomic_t nr_find_usage_backwards_recursions; 436atomic_t nr_find_usage_backwards_recursions;
436# define debug_atomic_inc(ptr) atomic_inc(ptr)
437# define debug_atomic_dec(ptr) atomic_dec(ptr)
438# define debug_atomic_read(ptr) atomic_read(ptr)
439#else
440# define debug_atomic_inc(ptr) do { } while (0)
441# define debug_atomic_dec(ptr) do { } while (0)
442# define debug_atomic_read(ptr) 0
443#endif 437#endif
444 438
445/* 439/*
@@ -1900,9 +1894,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1900 curr->comm, task_pid_nr(curr)); 1894 curr->comm, task_pid_nr(curr));
1901 print_lock(this); 1895 print_lock(this);
1902 if (forwards) 1896 if (forwards)
1903 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); 1897 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
1904 else 1898 else
1905 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); 1899 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
1906 print_lock_name(other); 1900 print_lock_name(other);
1907 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 1901 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1908 1902
@@ -2015,7 +2009,8 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2015 enum lock_usage_bit bit, const char *name); 2009 enum lock_usage_bit bit, const char *name);
2016 2010
2017static int 2011static int
2018mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) 2012mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2013 enum lock_usage_bit new_bit)
2019{ 2014{
2020 int excl_bit = exclusive_bit(new_bit); 2015 int excl_bit = exclusive_bit(new_bit);
2021 int read = new_bit & 1; 2016 int read = new_bit & 1;
@@ -2043,7 +2038,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit)
2043 * states. 2038 * states.
2044 */ 2039 */
2045 if ((!read || !dir || STRICT_READ_CHECKS) && 2040 if ((!read || !dir || STRICT_READ_CHECKS) &&
2046 !usage(curr, this, excl_bit, state_name(new_bit))) 2041 !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2047 return 0; 2042 return 0;
2048 2043
2049 /* 2044 /*
@@ -2929,6 +2924,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
2929} 2924}
2930EXPORT_SYMBOL_GPL(lock_set_class); 2925EXPORT_SYMBOL_GPL(lock_set_class);
2931 2926
2927DEFINE_TRACE(lock_acquire);
2928
2932/* 2929/*
2933 * We are not always called with irqs disabled - do that here, 2930 * We are not always called with irqs disabled - do that here,
2934 * and also avoid lockdep recursion: 2931 * and also avoid lockdep recursion:
@@ -2939,6 +2936,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2939{ 2936{
2940 unsigned long flags; 2937 unsigned long flags;
2941 2938
2939 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
2940
2942 if (unlikely(current->lockdep_recursion)) 2941 if (unlikely(current->lockdep_recursion))
2943 return; 2942 return;
2944 2943
@@ -2953,11 +2952,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2953} 2952}
2954EXPORT_SYMBOL_GPL(lock_acquire); 2953EXPORT_SYMBOL_GPL(lock_acquire);
2955 2954
2955DEFINE_TRACE(lock_release);
2956
2956void lock_release(struct lockdep_map *lock, int nested, 2957void lock_release(struct lockdep_map *lock, int nested,
2957 unsigned long ip) 2958 unsigned long ip)
2958{ 2959{
2959 unsigned long flags; 2960 unsigned long flags;
2960 2961
2962 trace_lock_release(lock, nested, ip);
2963
2961 if (unlikely(current->lockdep_recursion)) 2964 if (unlikely(current->lockdep_recursion))
2962 return; 2965 return;
2963 2966
@@ -3106,10 +3109,14 @@ found_it:
3106 lock->ip = ip; 3109 lock->ip = ip;
3107} 3110}
3108 3111
3112DEFINE_TRACE(lock_contended);
3113
3109void lock_contended(struct lockdep_map *lock, unsigned long ip) 3114void lock_contended(struct lockdep_map *lock, unsigned long ip)
3110{ 3115{
3111 unsigned long flags; 3116 unsigned long flags;
3112 3117
3118 trace_lock_contended(lock, ip);
3119
3113 if (unlikely(!lock_stat)) 3120 if (unlikely(!lock_stat))
3114 return; 3121 return;
3115 3122
@@ -3125,10 +3132,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3125} 3132}
3126EXPORT_SYMBOL_GPL(lock_contended); 3133EXPORT_SYMBOL_GPL(lock_contended);
3127 3134
3135DEFINE_TRACE(lock_acquired);
3136
3128void lock_acquired(struct lockdep_map *lock, unsigned long ip) 3137void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3129{ 3138{
3130 unsigned long flags; 3139 unsigned long flags;
3131 3140
3141 trace_lock_acquired(lock, ip);
3142
3132 if (unlikely(!lock_stat)) 3143 if (unlikely(!lock_stat))
3133 return; 3144 return;
3134 3145
diff --git a/kernel/module.c b/kernel/module.c
index f77ac320d0b5..41f50605eed0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2766,7 +2766,7 @@ int is_module_address(unsigned long addr)
2766 2766
2767 2767
2768/* Is this a valid kernel address? */ 2768/* Is this a valid kernel address? */
2769__notrace_funcgraph struct module *__module_text_address(unsigned long addr) 2769struct module *__module_text_address(unsigned long addr)
2770{ 2770{
2771 struct module *mod; 2771 struct module *mod;
2772 2772
diff --git a/kernel/relay.c b/kernel/relay.c
index 8f2179c8056f..824b91ac10f1 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -677,9 +677,7 @@ int relay_late_setup_files(struct rchan *chan,
677 */ 677 */
678 for_each_online_cpu(i) { 678 for_each_online_cpu(i) {
679 if (unlikely(!chan->buf[i])) { 679 if (unlikely(!chan->buf[i])) {
680 printk(KERN_ERR "relay_late_setup_files: CPU %u " 680 WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
681 "has no buffer, it must have!\n", i);
682 BUG();
683 err = -EINVAL; 681 err = -EINVAL;
684 break; 682 break;
685 } 683 }
diff --git a/kernel/sched.c b/kernel/sched.c
index 73513f4e19df..f01cb63d1356 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4781,10 +4781,7 @@ void scheduler_tick(void)
4781#endif 4781#endif
4782} 4782}
4783 4783
4784#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 4784unsigned long get_parent_ip(unsigned long addr)
4785 defined(CONFIG_PREEMPT_TRACER))
4786
4787static inline unsigned long get_parent_ip(unsigned long addr)
4788{ 4785{
4789 if (in_lock_functions(addr)) { 4786 if (in_lock_functions(addr)) {
4790 addr = CALLER_ADDR2; 4787 addr = CALLER_ADDR2;
@@ -4794,6 +4791,9 @@ static inline unsigned long get_parent_ip(unsigned long addr)
4794 return addr; 4791 return addr;
4795} 4792}
4796 4793
4794#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4795 defined(CONFIG_PREEMPT_TRACER))
4796
4797void __kprobes add_preempt_count(int val) 4797void __kprobes add_preempt_count(int val)
4798{ 4798{
4799#ifdef CONFIG_DEBUG_PREEMPT 4799#ifdef CONFIG_DEBUG_PREEMPT
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 390f33234bd0..819f17ac796e 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -25,6 +25,7 @@
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/hardirq.h>
28#include <linux/module.h> 29#include <linux/module.h>
29#include <linux/percpu.h> 30#include <linux/percpu.h>
30#include <linux/ktime.h> 31#include <linux/ktime.h>
@@ -154,6 +155,17 @@ u64 sched_clock_cpu(int cpu)
154 return sched_clock(); 155 return sched_clock();
155 156
156 scd = cpu_sdc(cpu); 157 scd = cpu_sdc(cpu);
158
159 /*
160 * Normally this is not called in NMI context - but if it is,
161 * trying to do any locking here is totally lethal.
162 */
163 if (unlikely(in_nmi()))
164 return scd->clock;
165
166 if (unlikely(!sched_clock_running))
167 return 0ull;
168
157 WARN_ON_ONCE(!irqs_disabled()); 169 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock(); 170 now = sched_clock();
159 171
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 57d3f67f6f38..65ff3e3961b4 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -21,8 +21,10 @@
21#include <linux/freezer.h> 21#include <linux/freezer.h>
22#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
24#include <linux/smp.h> 25#include <linux/smp.h>
25#include <linux/tick.h> 26#include <linux/tick.h>
27#include <trace/irq.h>
26 28
27#include <asm/irq.h> 29#include <asm/irq.h>
28/* 30/*
@@ -52,6 +54,11 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
52 54
53static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 55static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
54 56
57char *softirq_to_name[NR_SOFTIRQS] = {
58 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
59 "TASKLET", "SCHED", "HRTIMER", "RCU"
60};
61
55/* 62/*
56 * we cannot loop indefinitely here to avoid userspace starvation, 63 * we cannot loop indefinitely here to avoid userspace starvation,
57 * but we also don't want to introduce a worst case 1/HZ latency 64 * but we also don't want to introduce a worst case 1/HZ latency
@@ -79,13 +86,23 @@ static void __local_bh_disable(unsigned long ip)
79 WARN_ON_ONCE(in_irq()); 86 WARN_ON_ONCE(in_irq());
80 87
81 raw_local_irq_save(flags); 88 raw_local_irq_save(flags);
82 add_preempt_count(SOFTIRQ_OFFSET); 89 /*
90 * The preempt tracer hooks into add_preempt_count and will break
91 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
92 * is set and before current->softirq_enabled is cleared.
93 * We must manually increment preempt_count here and manually
94 * call the trace_preempt_off later.
95 */
96 preempt_count() += SOFTIRQ_OFFSET;
83 /* 97 /*
84 * Were softirqs turned off above: 98 * Were softirqs turned off above:
85 */ 99 */
86 if (softirq_count() == SOFTIRQ_OFFSET) 100 if (softirq_count() == SOFTIRQ_OFFSET)
87 trace_softirqs_off(ip); 101 trace_softirqs_off(ip);
88 raw_local_irq_restore(flags); 102 raw_local_irq_restore(flags);
103
104 if (preempt_count() == SOFTIRQ_OFFSET)
105 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
89} 106}
90#else /* !CONFIG_TRACE_IRQFLAGS */ 107#else /* !CONFIG_TRACE_IRQFLAGS */
91static inline void __local_bh_disable(unsigned long ip) 108static inline void __local_bh_disable(unsigned long ip)
@@ -169,6 +186,9 @@ EXPORT_SYMBOL(local_bh_enable_ip);
169 */ 186 */
170#define MAX_SOFTIRQ_RESTART 10 187#define MAX_SOFTIRQ_RESTART 10
171 188
189DEFINE_TRACE(softirq_entry);
190DEFINE_TRACE(softirq_exit);
191
172asmlinkage void __do_softirq(void) 192asmlinkage void __do_softirq(void)
173{ 193{
174 struct softirq_action *h; 194 struct softirq_action *h;
@@ -180,7 +200,7 @@ asmlinkage void __do_softirq(void)
180 account_system_vtime(current); 200 account_system_vtime(current);
181 201
182 __local_bh_disable((unsigned long)__builtin_return_address(0)); 202 __local_bh_disable((unsigned long)__builtin_return_address(0));
183 trace_softirq_enter(); 203 lockdep_softirq_enter();
184 204
185 cpu = smp_processor_id(); 205 cpu = smp_processor_id();
186restart: 206restart:
@@ -195,12 +215,14 @@ restart:
195 if (pending & 1) { 215 if (pending & 1) {
196 int prev_count = preempt_count(); 216 int prev_count = preempt_count();
197 217
218 trace_softirq_entry(h, softirq_vec);
198 h->action(h); 219 h->action(h);
199 220 trace_softirq_exit(h, softirq_vec);
200 if (unlikely(prev_count != preempt_count())) { 221 if (unlikely(prev_count != preempt_count())) {
201 printk(KERN_ERR "huh, entered softirq %td %p" 222 printk(KERN_ERR "huh, entered softirq %td %s %p"
202 "with preempt_count %08x," 223 "with preempt_count %08x,"
203 " exited with %08x?\n", h - softirq_vec, 224 " exited with %08x?\n", h - softirq_vec,
225 softirq_to_name[h - softirq_vec],
204 h->action, prev_count, preempt_count()); 226 h->action, prev_count, preempt_count());
205 preempt_count() = prev_count; 227 preempt_count() = prev_count;
206 } 228 }
@@ -220,7 +242,7 @@ restart:
220 if (pending) 242 if (pending)
221 wakeup_softirqd(); 243 wakeup_softirqd();
222 244
223 trace_softirq_exit(); 245 lockdep_softirq_exit();
224 246
225 account_system_vtime(current); 247 account_system_vtime(current);
226 _local_bh_enable(); 248 _local_bh_enable();
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 34e707e5ab87..8a4d72931042 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -9,6 +9,9 @@ config USER_STACKTRACE_SUPPORT
9config NOP_TRACER 9config NOP_TRACER
10 bool 10 bool
11 11
12config HAVE_FTRACE_NMI_ENTER
13 bool
14
12config HAVE_FUNCTION_TRACER 15config HAVE_FUNCTION_TRACER
13 bool 16 bool
14 17
@@ -31,12 +34,20 @@ config HAVE_FTRACE_MCOUNT_RECORD
31config HAVE_HW_BRANCH_TRACER 34config HAVE_HW_BRANCH_TRACER
32 bool 35 bool
33 36
37config HAVE_FTRACE_SYSCALLS
38 bool
39
34config TRACER_MAX_TRACE 40config TRACER_MAX_TRACE
35 bool 41 bool
36 42
37config RING_BUFFER 43config RING_BUFFER
38 bool 44 bool
39 45
46config FTRACE_NMI_ENTER
47 bool
48 depends on HAVE_FTRACE_NMI_ENTER
49 default y
50
40config TRACING 51config TRACING
41 bool 52 bool
42 select DEBUG_FS 53 select DEBUG_FS
@@ -44,13 +55,29 @@ config TRACING
44 select STACKTRACE if STACKTRACE_SUPPORT 55 select STACKTRACE if STACKTRACE_SUPPORT
45 select TRACEPOINTS 56 select TRACEPOINTS
46 select NOP_TRACER 57 select NOP_TRACER
58 select BINARY_PRINTF
59
60#
61# Minimum requirements an architecture has to meet for us to
62# be able to offer generic tracing facilities:
63#
64config TRACING_SUPPORT
65 bool
66 # PPC32 has no irqflags tracing support, but it can use most of the
67 # tracers anyway, they were tested to build and work. Note that new
68 # exceptions to this list aren't welcomed, better implement the
69 # irqflags tracing for your architecture.
70 depends on TRACE_IRQFLAGS_SUPPORT || PPC32
71 depends on STACKTRACE_SUPPORT
72 default y
73
74if TRACING_SUPPORT
47 75
48menu "Tracers" 76menu "Tracers"
49 77
50config FUNCTION_TRACER 78config FUNCTION_TRACER
51 bool "Kernel Function Tracer" 79 bool "Kernel Function Tracer"
52 depends on HAVE_FUNCTION_TRACER 80 depends on HAVE_FUNCTION_TRACER
53 depends on DEBUG_KERNEL
54 select FRAME_POINTER 81 select FRAME_POINTER
55 select KALLSYMS 82 select KALLSYMS
56 select TRACING 83 select TRACING
@@ -83,7 +110,6 @@ config IRQSOFF_TRACER
83 default n 110 default n
84 depends on TRACE_IRQFLAGS_SUPPORT 111 depends on TRACE_IRQFLAGS_SUPPORT
85 depends on GENERIC_TIME 112 depends on GENERIC_TIME
86 depends on DEBUG_KERNEL
87 select TRACE_IRQFLAGS 113 select TRACE_IRQFLAGS
88 select TRACING 114 select TRACING
89 select TRACER_MAX_TRACE 115 select TRACER_MAX_TRACE
@@ -106,7 +132,6 @@ config PREEMPT_TRACER
106 default n 132 default n
107 depends on GENERIC_TIME 133 depends on GENERIC_TIME
108 depends on PREEMPT 134 depends on PREEMPT
109 depends on DEBUG_KERNEL
110 select TRACING 135 select TRACING
111 select TRACER_MAX_TRACE 136 select TRACER_MAX_TRACE
112 help 137 help
@@ -127,13 +152,13 @@ config SYSPROF_TRACER
127 bool "Sysprof Tracer" 152 bool "Sysprof Tracer"
128 depends on X86 153 depends on X86
129 select TRACING 154 select TRACING
155 select CONTEXT_SWITCH_TRACER
130 help 156 help
131 This tracer provides the trace needed by the 'Sysprof' userspace 157 This tracer provides the trace needed by the 'Sysprof' userspace
132 tool. 158 tool.
133 159
134config SCHED_TRACER 160config SCHED_TRACER
135 bool "Scheduling Latency Tracer" 161 bool "Scheduling Latency Tracer"
136 depends on DEBUG_KERNEL
137 select TRACING 162 select TRACING
138 select CONTEXT_SWITCH_TRACER 163 select CONTEXT_SWITCH_TRACER
139 select TRACER_MAX_TRACE 164 select TRACER_MAX_TRACE
@@ -143,16 +168,30 @@ config SCHED_TRACER
143 168
144config CONTEXT_SWITCH_TRACER 169config CONTEXT_SWITCH_TRACER
145 bool "Trace process context switches" 170 bool "Trace process context switches"
146 depends on DEBUG_KERNEL
147 select TRACING 171 select TRACING
148 select MARKERS 172 select MARKERS
149 help 173 help
150 This tracer gets called from the context switch and records 174 This tracer gets called from the context switch and records
151 all switching of tasks. 175 all switching of tasks.
152 176
177config EVENT_TRACER
178 bool "Trace various events in the kernel"
179 select TRACING
180 help
181 This tracer hooks to various trace points in the kernel
182 allowing the user to pick and choose which trace point they
183 want to trace.
184
185config FTRACE_SYSCALLS
186 bool "Trace syscalls"
187 depends on HAVE_FTRACE_SYSCALLS
188 select TRACING
189 select KALLSYMS
190 help
191 Basic tracer to catch the syscall entry and exit events.
192
153config BOOT_TRACER 193config BOOT_TRACER
154 bool "Trace boot initcalls" 194 bool "Trace boot initcalls"
155 depends on DEBUG_KERNEL
156 select TRACING 195 select TRACING
157 select CONTEXT_SWITCH_TRACER 196 select CONTEXT_SWITCH_TRACER
158 help 197 help
@@ -165,13 +204,11 @@ config BOOT_TRACER
165 representation of the delays during initcalls - but the raw 204 representation of the delays during initcalls - but the raw
166 /debug/tracing/trace text output is readable too. 205 /debug/tracing/trace text output is readable too.
167 206
168 ( Note that tracing self tests can't be enabled if this tracer is 207 You must pass in ftrace=initcall to the kernel command line
169 selected, because the self-tests are an initcall as well and that 208 to enable this on bootup.
170 would invalidate the boot trace. )
171 209
172config TRACE_BRANCH_PROFILING 210config TRACE_BRANCH_PROFILING
173 bool "Trace likely/unlikely profiler" 211 bool "Trace likely/unlikely profiler"
174 depends on DEBUG_KERNEL
175 select TRACING 212 select TRACING
176 help 213 help
177 This tracer profiles all the the likely and unlikely macros 214 This tracer profiles all the the likely and unlikely macros
@@ -224,7 +261,6 @@ config BRANCH_TRACER
224 261
225config POWER_TRACER 262config POWER_TRACER
226 bool "Trace power consumption behavior" 263 bool "Trace power consumption behavior"
227 depends on DEBUG_KERNEL
228 depends on X86 264 depends on X86
229 select TRACING 265 select TRACING
230 help 266 help
@@ -236,7 +272,6 @@ config POWER_TRACER
236config STACK_TRACER 272config STACK_TRACER
237 bool "Trace max stack" 273 bool "Trace max stack"
238 depends on HAVE_FUNCTION_TRACER 274 depends on HAVE_FUNCTION_TRACER
239 depends on DEBUG_KERNEL
240 select FUNCTION_TRACER 275 select FUNCTION_TRACER
241 select STACKTRACE 276 select STACKTRACE
242 select KALLSYMS 277 select KALLSYMS
@@ -266,11 +301,66 @@ config HW_BRANCH_TRACER
266 This tracer records all branches on the system in a circular 301 This tracer records all branches on the system in a circular
267 buffer giving access to the last N branches for each cpu. 302 buffer giving access to the last N branches for each cpu.
268 303
304config KMEMTRACE
305 bool "Trace SLAB allocations"
306 select TRACING
307 help
308 kmemtrace provides tracing for slab allocator functions, such as
309 kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
310 data is then fed to the userspace application in order to analyse
311 allocation hotspots, internal fragmentation and so on, making it
312 possible to see how well an allocator performs, as well as debug
313 and profile kernel code.
314
315 This requires an userspace application to use. See
316 Documentation/vm/kmemtrace.txt for more information.
317
318 Saying Y will make the kernel somewhat larger and slower. However,
319 if you disable kmemtrace at run-time or boot-time, the performance
320 impact is minimal (depending on the arch the kernel is built for).
321
322 If unsure, say N.
323
324config WORKQUEUE_TRACER
325 bool "Trace workqueues"
326 select TRACING
327 help
328 The workqueue tracer provides some statistical informations
329 about each cpu workqueue thread such as the number of the
330 works inserted and executed since their creation. It can help
331 to evaluate the amount of work each of them have to perform.
332 For example it can help a developer to decide whether he should
333 choose a per cpu workqueue instead of a singlethreaded one.
334
335config BLK_DEV_IO_TRACE
336 bool "Support for tracing block io actions"
337 depends on SYSFS
338 depends on BLOCK
339 select RELAY
340 select DEBUG_FS
341 select TRACEPOINTS
342 select TRACING
343 select STACKTRACE
344 help
345 Say Y here if you want to be able to trace the block layer actions
346 on a given queue. Tracing allows you to see any traffic happening
347 on a block device queue. For more information (and the userspace
348 support tools needed), fetch the blktrace tools from:
349
350 git://git.kernel.dk/blktrace.git
351
352 Tracing also is possible using the ftrace interface, e.g.:
353
354 echo 1 > /sys/block/sda/sda1/trace/enable
355 echo blk > /sys/kernel/debug/tracing/current_tracer
356 cat /sys/kernel/debug/tracing/trace_pipe
357
358 If unsure, say N.
359
269config DYNAMIC_FTRACE 360config DYNAMIC_FTRACE
270 bool "enable/disable ftrace tracepoints dynamically" 361 bool "enable/disable ftrace tracepoints dynamically"
271 depends on FUNCTION_TRACER 362 depends on FUNCTION_TRACER
272 depends on HAVE_DYNAMIC_FTRACE 363 depends on HAVE_DYNAMIC_FTRACE
273 depends on DEBUG_KERNEL
274 default y 364 default y
275 help 365 help
276 This option will modify all the calls to ftrace dynamically 366 This option will modify all the calls to ftrace dynamically
@@ -296,7 +386,7 @@ config FTRACE_SELFTEST
296 386
297config FTRACE_STARTUP_TEST 387config FTRACE_STARTUP_TEST
298 bool "Perform a startup test on ftrace" 388 bool "Perform a startup test on ftrace"
299 depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER 389 depends on TRACING
300 select FTRACE_SELFTEST 390 select FTRACE_SELFTEST
301 help 391 help
302 This option performs a series of startup tests on ftrace. On bootup 392 This option performs a series of startup tests on ftrace. On bootup
@@ -306,7 +396,7 @@ config FTRACE_STARTUP_TEST
306 396
307config MMIOTRACE 397config MMIOTRACE
308 bool "Memory mapped IO tracing" 398 bool "Memory mapped IO tracing"
309 depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI 399 depends on HAVE_MMIOTRACE_SUPPORT && PCI
310 select TRACING 400 select TRACING
311 help 401 help
312 Mmiotrace traces Memory Mapped I/O access and is meant for 402 Mmiotrace traces Memory Mapped I/O access and is meant for
@@ -328,3 +418,6 @@ config MMIOTRACE_TEST
328 Say N, unless you absolutely know what you are doing. 418 Say N, unless you absolutely know what you are doing.
329 419
330endmenu 420endmenu
421
422endif # TRACING_SUPPORT
423
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 349d5a93653f..2630f5121ec1 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -19,6 +19,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
19obj-$(CONFIG_RING_BUFFER) += ring_buffer.o 19obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
20 20
21obj-$(CONFIG_TRACING) += trace.o 21obj-$(CONFIG_TRACING) += trace.o
22obj-$(CONFIG_TRACING) += trace_clock.o
23obj-$(CONFIG_TRACING) += trace_output.o
24obj-$(CONFIG_TRACING) += trace_stat.o
25obj-$(CONFIG_TRACING) += trace_printk.o
22obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 26obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
23obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o 27obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
24obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 28obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
@@ -33,5 +37,14 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o 37obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
34obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o 38obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
35obj-$(CONFIG_POWER_TRACER) += trace_power.o 39obj-$(CONFIG_POWER_TRACER) += trace_power.o
40obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
41obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
42obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
43obj-$(CONFIG_EVENT_TRACER) += trace_events.o
44obj-$(CONFIG_EVENT_TRACER) += events.o
45obj-$(CONFIG_EVENT_TRACER) += trace_export.o
46obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
47obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
48obj-$(CONFIG_EVENT_TRACER) += trace_events_filter.o
36 49
37libftrace-y := ftrace.o 50libftrace-y := ftrace.o
diff --git a/block/blktrace.c b/kernel/trace/blktrace.c
index 028120a0965a..947c5b3f90c4 100644
--- a/block/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -24,15 +24,32 @@
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/time.h> 25#include <linux/time.h>
26#include <trace/block.h> 26#include <trace/block.h>
27#include <asm/uaccess.h> 27#include <linux/uaccess.h>
28#include "trace_output.h"
28 29
29static unsigned int blktrace_seq __read_mostly = 1; 30static unsigned int blktrace_seq __read_mostly = 1;
30 31
32static struct trace_array *blk_tr;
33static bool blk_tracer_enabled __read_mostly;
34
35/* Select an alternative, minimalistic output than the original one */
36#define TRACE_BLK_OPT_CLASSIC 0x1
37
38static struct tracer_opt blk_tracer_opts[] = {
39 /* Default disable the minimalistic output */
40 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
41 { }
42};
43
44static struct tracer_flags blk_tracer_flags = {
45 .val = 0,
46 .opts = blk_tracer_opts,
47};
48
31/* Global reference count of probes */ 49/* Global reference count of probes */
32static DEFINE_MUTEX(blk_probe_mutex);
33static atomic_t blk_probes_ref = ATOMIC_INIT(0); 50static atomic_t blk_probes_ref = ATOMIC_INIT(0);
34 51
35static int blk_register_tracepoints(void); 52static void blk_register_tracepoints(void);
36static void blk_unregister_tracepoints(void); 53static void blk_unregister_tracepoints(void);
37 54
38/* 55/*
@@ -42,19 +59,39 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
42 const void *data, size_t len) 59 const void *data, size_t len)
43{ 60{
44 struct blk_io_trace *t; 61 struct blk_io_trace *t;
62 struct ring_buffer_event *event = NULL;
63 int pc = 0;
64 int cpu = smp_processor_id();
65 bool blk_tracer = blk_tracer_enabled;
66
67 if (blk_tracer) {
68 pc = preempt_count();
69 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
70 sizeof(*t) + len,
71 0, pc);
72 if (!event)
73 return;
74 t = ring_buffer_event_data(event);
75 goto record_it;
76 }
77
78 if (!bt->rchan)
79 return;
45 80
46 t = relay_reserve(bt->rchan, sizeof(*t) + len); 81 t = relay_reserve(bt->rchan, sizeof(*t) + len);
47 if (t) { 82 if (t) {
48 const int cpu = smp_processor_id();
49
50 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 83 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
51 t->time = ktime_to_ns(ktime_get()); 84 t->time = ktime_to_ns(ktime_get());
85record_it:
52 t->device = bt->dev; 86 t->device = bt->dev;
53 t->action = action; 87 t->action = action;
54 t->pid = pid; 88 t->pid = pid;
55 t->cpu = cpu; 89 t->cpu = cpu;
56 t->pdu_len = len; 90 t->pdu_len = len;
57 memcpy((void *) t + sizeof(*t), data, len); 91 memcpy((void *) t + sizeof(*t), data, len);
92
93 if (blk_tracer)
94 trace_buffer_unlock_commit(blk_tr, event, 0, pc);
58 } 95 }
59} 96}
60 97
@@ -90,6 +127,10 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
90 unsigned long flags; 127 unsigned long flags;
91 char *buf; 128 char *buf;
92 129
130 if (unlikely(bt->trace_state != Blktrace_running &&
131 !blk_tracer_enabled))
132 return;
133
93 local_irq_save(flags); 134 local_irq_save(flags);
94 buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); 135 buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
95 va_start(args, fmt); 136 va_start(args, fmt);
@@ -117,11 +158,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
117/* 158/*
118 * Data direction bit lookup 159 * Data direction bit lookup
119 */ 160 */
120static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; 161static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
162 BLK_TC_ACT(BLK_TC_WRITE) };
121 163
122/* The ilog2() calls fall out because they're constant */ 164/* The ilog2() calls fall out because they're constant */
123#define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \ 165#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
124 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) ) 166 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
125 167
126/* 168/*
127 * The worker for the various blk_add_trace*() types. Fills out a 169 * The worker for the various blk_add_trace*() types. Fills out a
@@ -131,13 +173,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
131 int rw, u32 what, int error, int pdu_len, void *pdu_data) 173 int rw, u32 what, int error, int pdu_len, void *pdu_data)
132{ 174{
133 struct task_struct *tsk = current; 175 struct task_struct *tsk = current;
176 struct ring_buffer_event *event = NULL;
134 struct blk_io_trace *t; 177 struct blk_io_trace *t;
135 unsigned long flags; 178 unsigned long flags = 0;
136 unsigned long *sequence; 179 unsigned long *sequence;
137 pid_t pid; 180 pid_t pid;
138 int cpu; 181 int cpu, pc = 0;
182 bool blk_tracer = blk_tracer_enabled;
139 183
140 if (unlikely(bt->trace_state != Blktrace_running)) 184 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
141 return; 185 return;
142 186
143 what |= ddir_act[rw & WRITE]; 187 what |= ddir_act[rw & WRITE];
@@ -150,6 +194,20 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
150 pid = tsk->pid; 194 pid = tsk->pid;
151 if (unlikely(act_log_check(bt, what, sector, pid))) 195 if (unlikely(act_log_check(bt, what, sector, pid)))
152 return; 196 return;
197 cpu = raw_smp_processor_id();
198
199 if (blk_tracer) {
200 tracing_record_cmdline(current);
201
202 pc = preempt_count();
203 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
204 sizeof(*t) + pdu_len,
205 0, pc);
206 if (!event)
207 return;
208 t = ring_buffer_event_data(event);
209 goto record_it;
210 }
153 211
154 /* 212 /*
155 * A word about the locking here - we disable interrupts to reserve 213 * A word about the locking here - we disable interrupts to reserve
@@ -163,23 +221,35 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
163 221
164 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); 222 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
165 if (t) { 223 if (t) {
166 cpu = smp_processor_id();
167 sequence = per_cpu_ptr(bt->sequence, cpu); 224 sequence = per_cpu_ptr(bt->sequence, cpu);
168 225
169 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 226 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
170 t->sequence = ++(*sequence); 227 t->sequence = ++(*sequence);
171 t->time = ktime_to_ns(ktime_get()); 228 t->time = ktime_to_ns(ktime_get());
229record_it:
230 /*
231 * These two are not needed in ftrace as they are in the
232 * generic trace_entry, filled by tracing_generic_entry_update,
233 * but for the trace_event->bin() synthesizer benefit we do it
234 * here too.
235 */
236 t->cpu = cpu;
237 t->pid = pid;
238
172 t->sector = sector; 239 t->sector = sector;
173 t->bytes = bytes; 240 t->bytes = bytes;
174 t->action = what; 241 t->action = what;
175 t->pid = pid;
176 t->device = bt->dev; 242 t->device = bt->dev;
177 t->cpu = cpu;
178 t->error = error; 243 t->error = error;
179 t->pdu_len = pdu_len; 244 t->pdu_len = pdu_len;
180 245
181 if (pdu_len) 246 if (pdu_len)
182 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); 247 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
248
249 if (blk_tracer) {
250 trace_buffer_unlock_commit(blk_tr, event, 0, pc);
251 return;
252 }
183 } 253 }
184 254
185 local_irq_restore(flags); 255 local_irq_restore(flags);
@@ -188,7 +258,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
188static struct dentry *blk_tree_root; 258static struct dentry *blk_tree_root;
189static DEFINE_MUTEX(blk_tree_mutex); 259static DEFINE_MUTEX(blk_tree_mutex);
190 260
191static void blk_trace_cleanup(struct blk_trace *bt) 261static void blk_trace_free(struct blk_trace *bt)
192{ 262{
193 debugfs_remove(bt->msg_file); 263 debugfs_remove(bt->msg_file);
194 debugfs_remove(bt->dropped_file); 264 debugfs_remove(bt->dropped_file);
@@ -196,10 +266,13 @@ static void blk_trace_cleanup(struct blk_trace *bt)
196 free_percpu(bt->sequence); 266 free_percpu(bt->sequence);
197 free_percpu(bt->msg_data); 267 free_percpu(bt->msg_data);
198 kfree(bt); 268 kfree(bt);
199 mutex_lock(&blk_probe_mutex); 269}
270
271static void blk_trace_cleanup(struct blk_trace *bt)
272{
273 blk_trace_free(bt);
200 if (atomic_dec_and_test(&blk_probes_ref)) 274 if (atomic_dec_and_test(&blk_probes_ref))
201 blk_unregister_tracepoints(); 275 blk_unregister_tracepoints();
202 mutex_unlock(&blk_probe_mutex);
203} 276}
204 277
205int blk_trace_remove(struct request_queue *q) 278int blk_trace_remove(struct request_queue *q)
@@ -210,8 +283,7 @@ int blk_trace_remove(struct request_queue *q)
210 if (!bt) 283 if (!bt)
211 return -EINVAL; 284 return -EINVAL;
212 285
213 if (bt->trace_state == Blktrace_setup || 286 if (bt->trace_state != Blktrace_running)
214 bt->trace_state == Blktrace_stopped)
215 blk_trace_cleanup(bt); 287 blk_trace_cleanup(bt);
216 288
217 return 0; 289 return 0;
@@ -354,11 +426,11 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
354 if (buts->name[i] == '/') 426 if (buts->name[i] == '/')
355 buts->name[i] = '_'; 427 buts->name[i] = '_';
356 428
357 ret = -ENOMEM;
358 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 429 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
359 if (!bt) 430 if (!bt)
360 goto err; 431 return -ENOMEM;
361 432
433 ret = -ENOMEM;
362 bt->sequence = alloc_percpu(unsigned long); 434 bt->sequence = alloc_percpu(unsigned long);
363 if (!bt->sequence) 435 if (!bt->sequence)
364 goto err; 436 goto err;
@@ -369,11 +441,15 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
369 441
370 ret = -ENOENT; 442 ret = -ENOENT;
371 443
444 mutex_lock(&blk_tree_mutex);
372 if (!blk_tree_root) { 445 if (!blk_tree_root) {
373 blk_tree_root = debugfs_create_dir("block", NULL); 446 blk_tree_root = debugfs_create_dir("block", NULL);
374 if (!blk_tree_root) 447 if (!blk_tree_root) {
375 return -ENOMEM; 448 mutex_unlock(&blk_tree_mutex);
449 goto err;
450 }
376 } 451 }
452 mutex_unlock(&blk_tree_mutex);
377 453
378 dir = debugfs_create_dir(buts->name, blk_tree_root); 454 dir = debugfs_create_dir(buts->name, blk_tree_root);
379 455
@@ -385,7 +461,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
385 atomic_set(&bt->dropped, 0); 461 atomic_set(&bt->dropped, 0);
386 462
387 ret = -EIO; 463 ret = -EIO;
388 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); 464 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
465 &blk_dropped_fops);
389 if (!bt->dropped_file) 466 if (!bt->dropped_file)
390 goto err; 467 goto err;
391 468
@@ -410,14 +487,6 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
410 bt->pid = buts->pid; 487 bt->pid = buts->pid;
411 bt->trace_state = Blktrace_setup; 488 bt->trace_state = Blktrace_setup;
412 489
413 mutex_lock(&blk_probe_mutex);
414 if (atomic_add_return(1, &blk_probes_ref) == 1) {
415 ret = blk_register_tracepoints();
416 if (ret)
417 goto probe_err;
418 }
419 mutex_unlock(&blk_probe_mutex);
420
421 ret = -EBUSY; 490 ret = -EBUSY;
422 old_bt = xchg(&q->blk_trace, bt); 491 old_bt = xchg(&q->blk_trace, bt);
423 if (old_bt) { 492 if (old_bt) {
@@ -425,22 +494,12 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
425 goto err; 494 goto err;
426 } 495 }
427 496
497 if (atomic_inc_return(&blk_probes_ref) == 1)
498 blk_register_tracepoints();
499
428 return 0; 500 return 0;
429probe_err:
430 atomic_dec(&blk_probes_ref);
431 mutex_unlock(&blk_probe_mutex);
432err: 501err:
433 if (bt) { 502 blk_trace_free(bt);
434 if (bt->msg_file)
435 debugfs_remove(bt->msg_file);
436 if (bt->dropped_file)
437 debugfs_remove(bt->dropped_file);
438 free_percpu(bt->sequence);
439 free_percpu(bt->msg_data);
440 if (bt->rchan)
441 relay_close(bt->rchan);
442 kfree(bt);
443 }
444 return ret; 503 return ret;
445} 504}
446 505
@@ -467,10 +526,10 @@ EXPORT_SYMBOL_GPL(blk_trace_setup);
467 526
468int blk_trace_startstop(struct request_queue *q, int start) 527int blk_trace_startstop(struct request_queue *q, int start)
469{ 528{
470 struct blk_trace *bt;
471 int ret; 529 int ret;
530 struct blk_trace *bt = q->blk_trace;
472 531
473 if ((bt = q->blk_trace) == NULL) 532 if (bt == NULL)
474 return -EINVAL; 533 return -EINVAL;
475 534
476 /* 535 /*
@@ -503,7 +562,7 @@ EXPORT_SYMBOL_GPL(blk_trace_startstop);
503/** 562/**
504 * blk_trace_ioctl: - handle the ioctls associated with tracing 563 * blk_trace_ioctl: - handle the ioctls associated with tracing
505 * @bdev: the block device 564 * @bdev: the block device
506 * @cmd: the ioctl cmd 565 * @cmd: the ioctl cmd
507 * @arg: the argument data, if any 566 * @arg: the argument data, if any
508 * 567 *
509 **/ 568 **/
@@ -606,12 +665,14 @@ static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
606 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 665 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
607} 666}
608 667
609static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq) 668static void blk_add_trace_rq_requeue(struct request_queue *q,
669 struct request *rq)
610{ 670{
611 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 671 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
612} 672}
613 673
614static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq) 674static void blk_add_trace_rq_complete(struct request_queue *q,
675 struct request *rq)
615{ 676{
616 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); 677 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
617} 678}
@@ -648,12 +709,14 @@ static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
648 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); 709 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
649} 710}
650 711
651static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio) 712static void blk_add_trace_bio_backmerge(struct request_queue *q,
713 struct bio *bio)
652{ 714{
653 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 715 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
654} 716}
655 717
656static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio) 718static void blk_add_trace_bio_frontmerge(struct request_queue *q,
719 struct bio *bio)
657{ 720{
658 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 721 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
659} 722}
@@ -663,7 +726,8 @@ static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
663 blk_add_trace_bio(q, bio, BLK_TA_QUEUE); 726 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
664} 727}
665 728
666static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw) 729static void blk_add_trace_getrq(struct request_queue *q,
730 struct bio *bio, int rw)
667{ 731{
668 if (bio) 732 if (bio)
669 blk_add_trace_bio(q, bio, BLK_TA_GETRQ); 733 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
@@ -676,7 +740,8 @@ static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw
676} 740}
677 741
678 742
679static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw) 743static void blk_add_trace_sleeprq(struct request_queue *q,
744 struct bio *bio, int rw)
680{ 745{
681 if (bio) 746 if (bio)
682 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); 747 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
@@ -684,7 +749,8 @@ static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int
684 struct blk_trace *bt = q->blk_trace; 749 struct blk_trace *bt = q->blk_trace;
685 750
686 if (bt) 751 if (bt)
687 __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); 752 __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
753 0, 0, NULL);
688 } 754 }
689} 755}
690 756
@@ -795,7 +861,7 @@ void blk_add_driver_data(struct request_queue *q,
795} 861}
796EXPORT_SYMBOL_GPL(blk_add_driver_data); 862EXPORT_SYMBOL_GPL(blk_add_driver_data);
797 863
798static int blk_register_tracepoints(void) 864static void blk_register_tracepoints(void)
799{ 865{
800 int ret; 866 int ret;
801 867
@@ -833,7 +899,6 @@ static int blk_register_tracepoints(void)
833 WARN_ON(ret); 899 WARN_ON(ret);
834 ret = register_trace_block_remap(blk_add_trace_remap); 900 ret = register_trace_block_remap(blk_add_trace_remap);
835 WARN_ON(ret); 901 WARN_ON(ret);
836 return 0;
837} 902}
838 903
839static void blk_unregister_tracepoints(void) 904static void blk_unregister_tracepoints(void)
@@ -858,3 +923,627 @@ static void blk_unregister_tracepoints(void)
858 923
859 tracepoint_synchronize_unregister(); 924 tracepoint_synchronize_unregister();
860} 925}
926
927/*
928 * struct blk_io_tracer formatting routines
929 */
930
931static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
932{
933 int i = 0;
934 int tc = t->action >> BLK_TC_SHIFT;
935
936 if (t->action == BLK_TN_MESSAGE) {
937 rwbs[i++] = 'N';
938 goto out;
939 }
940
941 if (tc & BLK_TC_DISCARD)
942 rwbs[i++] = 'D';
943 else if (tc & BLK_TC_WRITE)
944 rwbs[i++] = 'W';
945 else if (t->bytes)
946 rwbs[i++] = 'R';
947 else
948 rwbs[i++] = 'N';
949
950 if (tc & BLK_TC_AHEAD)
951 rwbs[i++] = 'A';
952 if (tc & BLK_TC_BARRIER)
953 rwbs[i++] = 'B';
954 if (tc & BLK_TC_SYNC)
955 rwbs[i++] = 'S';
956 if (tc & BLK_TC_META)
957 rwbs[i++] = 'M';
958out:
959 rwbs[i] = '\0';
960}
961
962static inline
963const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
964{
965 return (const struct blk_io_trace *)ent;
966}
967
968static inline const void *pdu_start(const struct trace_entry *ent)
969{
970 return te_blk_io_trace(ent) + 1;
971}
972
973static inline u32 t_sec(const struct trace_entry *ent)
974{
975 return te_blk_io_trace(ent)->bytes >> 9;
976}
977
978static inline unsigned long long t_sector(const struct trace_entry *ent)
979{
980 return te_blk_io_trace(ent)->sector;
981}
982
983static inline __u16 t_error(const struct trace_entry *ent)
984{
985 return te_blk_io_trace(ent)->error;
986}
987
988static __u64 get_pdu_int(const struct trace_entry *ent)
989{
990 const __u64 *val = pdu_start(ent);
991 return be64_to_cpu(*val);
992}
993
994static void get_pdu_remap(const struct trace_entry *ent,
995 struct blk_io_trace_remap *r)
996{
997 const struct blk_io_trace_remap *__r = pdu_start(ent);
998 __u64 sector = __r->sector;
999
1000 r->device = be32_to_cpu(__r->device);
1001 r->device_from = be32_to_cpu(__r->device_from);
1002 r->sector = be64_to_cpu(sector);
1003}
1004
1005typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1006
1007static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1008{
1009 char rwbs[6];
1010 unsigned long long ts = iter->ts;
1011 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1012 unsigned secs = (unsigned long)ts;
1013 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1014
1015 fill_rwbs(rwbs, t);
1016
1017 return trace_seq_printf(&iter->seq,
1018 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1019 MAJOR(t->device), MINOR(t->device), iter->cpu,
1020 secs, nsec_rem, iter->ent->pid, act, rwbs);
1021}
1022
1023static int blk_log_action(struct trace_iterator *iter, const char *act)
1024{
1025 char rwbs[6];
1026 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1027
1028 fill_rwbs(rwbs, t);
1029 return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1030 MAJOR(t->device), MINOR(t->device), act, rwbs);
1031}
1032
1033static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1034{
1035 char cmd[TASK_COMM_LEN];
1036
1037 trace_find_cmdline(ent->pid, cmd);
1038
1039 if (t_sec(ent))
1040 return trace_seq_printf(s, "%llu + %u [%s]\n",
1041 t_sector(ent), t_sec(ent), cmd);
1042 return trace_seq_printf(s, "[%s]\n", cmd);
1043}
1044
1045static int blk_log_with_error(struct trace_seq *s,
1046 const struct trace_entry *ent)
1047{
1048 if (t_sec(ent))
1049 return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent),
1050 t_sec(ent), t_error(ent));
1051 return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent));
1052}
1053
1054static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1055{
1056 struct blk_io_trace_remap r = { .device = 0, };
1057
1058 get_pdu_remap(ent, &r);
1059 return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1060 t_sector(ent),
1061 t_sec(ent), MAJOR(r.device), MINOR(r.device),
1062 (unsigned long long)r.sector);
1063}
1064
1065static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1066{
1067 char cmd[TASK_COMM_LEN];
1068
1069 trace_find_cmdline(ent->pid, cmd);
1070
1071 return trace_seq_printf(s, "[%s]\n", cmd);
1072}
1073
1074static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1075{
1076 char cmd[TASK_COMM_LEN];
1077
1078 trace_find_cmdline(ent->pid, cmd);
1079
1080 return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1081}
1082
1083static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1084{
1085 char cmd[TASK_COMM_LEN];
1086
1087 trace_find_cmdline(ent->pid, cmd);
1088
1089 return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1090 get_pdu_int(ent), cmd);
1091}
1092
1093static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1094{
1095 int ret;
1096 const struct blk_io_trace *t = te_blk_io_trace(ent);
1097
1098 ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1099 if (ret)
1100 return trace_seq_putc(s, '\n');
1101 return ret;
1102}
1103
1104/*
1105 * struct tracer operations
1106 */
1107
1108static void blk_tracer_print_header(struct seq_file *m)
1109{
1110 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1111 return;
1112 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1113 "# | | | | | |\n");
1114}
1115
1116static void blk_tracer_start(struct trace_array *tr)
1117{
1118 blk_tracer_enabled = true;
1119 trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1120}
1121
1122static int blk_tracer_init(struct trace_array *tr)
1123{
1124 blk_tr = tr;
1125 blk_tracer_start(tr);
1126 return 0;
1127}
1128
1129static void blk_tracer_stop(struct trace_array *tr)
1130{
1131 blk_tracer_enabled = false;
1132 trace_flags |= TRACE_ITER_CONTEXT_INFO;
1133}
1134
1135static void blk_tracer_reset(struct trace_array *tr)
1136{
1137 blk_tracer_stop(tr);
1138}
1139
1140static const struct {
1141 const char *act[2];
1142 int (*print)(struct trace_seq *s, const struct trace_entry *ent);
1143} what2act[] = {
1144 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1145 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1146 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1147 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1148 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1149 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1150 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1151 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1152 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1153 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1154 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1155 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1156 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1157 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1158 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1159};
1160
1161static enum print_line_t print_one_line(struct trace_iterator *iter,
1162 bool classic)
1163{
1164 struct trace_seq *s = &iter->seq;
1165 const struct blk_io_trace *t;
1166 u16 what;
1167 int ret;
1168 bool long_act;
1169 blk_log_action_t *log_action;
1170
1171 t = te_blk_io_trace(iter->ent);
1172 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
1173 long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
1174 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1175
1176 if (t->action == BLK_TN_MESSAGE) {
1177 ret = log_action(iter, long_act ? "message" : "m");
1178 if (ret)
1179 ret = blk_log_msg(s, iter->ent);
1180 goto out;
1181 }
1182
1183 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1184 ret = trace_seq_printf(s, "Bad pc action %x\n", what);
1185 else {
1186 ret = log_action(iter, what2act[what].act[long_act]);
1187 if (ret)
1188 ret = what2act[what].print(s, iter->ent);
1189 }
1190out:
1191 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1192}
1193
1194static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1195 int flags)
1196{
1197 if (!trace_print_context(iter))
1198 return TRACE_TYPE_PARTIAL_LINE;
1199
1200 return print_one_line(iter, false);
1201}
1202
1203static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1204{
1205 struct trace_seq *s = &iter->seq;
1206 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1207 const int offset = offsetof(struct blk_io_trace, sector);
1208 struct blk_io_trace old = {
1209 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1210 .time = iter->ts,
1211 };
1212
1213 if (!trace_seq_putmem(s, &old, offset))
1214 return 0;
1215 return trace_seq_putmem(s, &t->sector,
1216 sizeof(old) - offset + t->pdu_len);
1217}
1218
1219static enum print_line_t
1220blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
1221{
1222 return blk_trace_synthesize_old_trace(iter) ?
1223 TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1224}
1225
1226static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1227{
1228 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1229 return TRACE_TYPE_UNHANDLED;
1230
1231 return print_one_line(iter, true);
1232}
1233
1234static struct tracer blk_tracer __read_mostly = {
1235 .name = "blk",
1236 .init = blk_tracer_init,
1237 .reset = blk_tracer_reset,
1238 .start = blk_tracer_start,
1239 .stop = blk_tracer_stop,
1240 .print_header = blk_tracer_print_header,
1241 .print_line = blk_tracer_print_line,
1242 .flags = &blk_tracer_flags,
1243};
1244
1245static struct trace_event trace_blk_event = {
1246 .type = TRACE_BLK,
1247 .trace = blk_trace_event_print,
1248 .binary = blk_trace_event_print_binary,
1249};
1250
1251static int __init init_blk_tracer(void)
1252{
1253 if (!register_ftrace_event(&trace_blk_event)) {
1254 pr_warning("Warning: could not register block events\n");
1255 return 1;
1256 }
1257
1258 if (register_tracer(&blk_tracer) != 0) {
1259 pr_warning("Warning: could not register the block tracer\n");
1260 unregister_ftrace_event(&trace_blk_event);
1261 return 1;
1262 }
1263
1264 return 0;
1265}
1266
1267device_initcall(init_blk_tracer);
1268
1269static int blk_trace_remove_queue(struct request_queue *q)
1270{
1271 struct blk_trace *bt;
1272
1273 bt = xchg(&q->blk_trace, NULL);
1274 if (bt == NULL)
1275 return -EINVAL;
1276
1277 if (atomic_dec_and_test(&blk_probes_ref))
1278 blk_unregister_tracepoints();
1279
1280 blk_trace_free(bt);
1281 return 0;
1282}
1283
1284/*
1285 * Setup everything required to start tracing
1286 */
1287static int blk_trace_setup_queue(struct request_queue *q, dev_t dev)
1288{
1289 struct blk_trace *old_bt, *bt = NULL;
1290 int ret = -ENOMEM;
1291
1292 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1293 if (!bt)
1294 return -ENOMEM;
1295
1296 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1297 if (!bt->msg_data)
1298 goto free_bt;
1299
1300 bt->dev = dev;
1301 bt->act_mask = (u16)-1;
1302 bt->end_lba = -1ULL;
1303
1304 old_bt = xchg(&q->blk_trace, bt);
1305 if (old_bt != NULL) {
1306 (void)xchg(&q->blk_trace, old_bt);
1307 ret = -EBUSY;
1308 goto free_bt;
1309 }
1310
1311 if (atomic_inc_return(&blk_probes_ref) == 1)
1312 blk_register_tracepoints();
1313 return 0;
1314
1315free_bt:
1316 blk_trace_free(bt);
1317 return ret;
1318}
1319
1320/*
1321 * sysfs interface to enable and configure tracing
1322 */
1323
1324static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1325 struct device_attribute *attr,
1326 char *buf);
1327static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1328 struct device_attribute *attr,
1329 const char *buf, size_t count);
1330#define BLK_TRACE_DEVICE_ATTR(_name) \
1331 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1332 sysfs_blk_trace_attr_show, \
1333 sysfs_blk_trace_attr_store)
1334
1335static BLK_TRACE_DEVICE_ATTR(enable);
1336static BLK_TRACE_DEVICE_ATTR(act_mask);
1337static BLK_TRACE_DEVICE_ATTR(pid);
1338static BLK_TRACE_DEVICE_ATTR(start_lba);
1339static BLK_TRACE_DEVICE_ATTR(end_lba);
1340
1341static struct attribute *blk_trace_attrs[] = {
1342 &dev_attr_enable.attr,
1343 &dev_attr_act_mask.attr,
1344 &dev_attr_pid.attr,
1345 &dev_attr_start_lba.attr,
1346 &dev_attr_end_lba.attr,
1347 NULL
1348};
1349
1350struct attribute_group blk_trace_attr_group = {
1351 .name = "trace",
1352 .attrs = blk_trace_attrs,
1353};
1354
1355static const struct {
1356 int mask;
1357 const char *str;
1358} mask_maps[] = {
1359 { BLK_TC_READ, "read" },
1360 { BLK_TC_WRITE, "write" },
1361 { BLK_TC_BARRIER, "barrier" },
1362 { BLK_TC_SYNC, "sync" },
1363 { BLK_TC_QUEUE, "queue" },
1364 { BLK_TC_REQUEUE, "requeue" },
1365 { BLK_TC_ISSUE, "issue" },
1366 { BLK_TC_COMPLETE, "complete" },
1367 { BLK_TC_FS, "fs" },
1368 { BLK_TC_PC, "pc" },
1369 { BLK_TC_AHEAD, "ahead" },
1370 { BLK_TC_META, "meta" },
1371 { BLK_TC_DISCARD, "discard" },
1372 { BLK_TC_DRV_DATA, "drv_data" },
1373};
1374
1375static int blk_trace_str2mask(const char *str)
1376{
1377 int i;
1378 int mask = 0;
1379 char *s, *token;
1380
1381 s = kstrdup(str, GFP_KERNEL);
1382 if (s == NULL)
1383 return -ENOMEM;
1384 s = strstrip(s);
1385
1386 while (1) {
1387 token = strsep(&s, ",");
1388 if (token == NULL)
1389 break;
1390
1391 if (*token == '\0')
1392 continue;
1393
1394 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1395 if (strcasecmp(token, mask_maps[i].str) == 0) {
1396 mask |= mask_maps[i].mask;
1397 break;
1398 }
1399 }
1400 if (i == ARRAY_SIZE(mask_maps)) {
1401 mask = -EINVAL;
1402 break;
1403 }
1404 }
1405 kfree(s);
1406
1407 return mask;
1408}
1409
1410static ssize_t blk_trace_mask2str(char *buf, int mask)
1411{
1412 int i;
1413 char *p = buf;
1414
1415 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1416 if (mask & mask_maps[i].mask) {
1417 p += sprintf(p, "%s%s",
1418 (p == buf) ? "" : ",", mask_maps[i].str);
1419 }
1420 }
1421 *p++ = '\n';
1422
1423 return p - buf;
1424}
1425
1426static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1427{
1428 if (bdev->bd_disk == NULL)
1429 return NULL;
1430
1431 return bdev_get_queue(bdev);
1432}
1433
1434static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1435 struct device_attribute *attr,
1436 char *buf)
1437{
1438 struct hd_struct *p = dev_to_part(dev);
1439 struct request_queue *q;
1440 struct block_device *bdev;
1441 ssize_t ret = -ENXIO;
1442
1443 lock_kernel();
1444 bdev = bdget(part_devt(p));
1445 if (bdev == NULL)
1446 goto out_unlock_kernel;
1447
1448 q = blk_trace_get_queue(bdev);
1449 if (q == NULL)
1450 goto out_bdput;
1451
1452 mutex_lock(&bdev->bd_mutex);
1453
1454 if (attr == &dev_attr_enable) {
1455 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1456 goto out_unlock_bdev;
1457 }
1458
1459 if (q->blk_trace == NULL)
1460 ret = sprintf(buf, "disabled\n");
1461 else if (attr == &dev_attr_act_mask)
1462 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1463 else if (attr == &dev_attr_pid)
1464 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1465 else if (attr == &dev_attr_start_lba)
1466 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1467 else if (attr == &dev_attr_end_lba)
1468 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1469
1470out_unlock_bdev:
1471 mutex_unlock(&bdev->bd_mutex);
1472out_bdput:
1473 bdput(bdev);
1474out_unlock_kernel:
1475 unlock_kernel();
1476 return ret;
1477}
1478
1479static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1480 struct device_attribute *attr,
1481 const char *buf, size_t count)
1482{
1483 struct block_device *bdev;
1484 struct request_queue *q;
1485 struct hd_struct *p;
1486 u64 value;
1487 ssize_t ret = -EINVAL;
1488
1489 if (count == 0)
1490 goto out;
1491
1492 if (attr == &dev_attr_act_mask) {
1493 if (sscanf(buf, "%llx", &value) != 1) {
1494 /* Assume it is a list of trace category names */
1495 ret = blk_trace_str2mask(buf);
1496 if (ret < 0)
1497 goto out;
1498 value = ret;
1499 }
1500 } else if (sscanf(buf, "%llu", &value) != 1)
1501 goto out;
1502
1503 ret = -ENXIO;
1504
1505 lock_kernel();
1506 p = dev_to_part(dev);
1507 bdev = bdget(part_devt(p));
1508 if (bdev == NULL)
1509 goto out_unlock_kernel;
1510
1511 q = blk_trace_get_queue(bdev);
1512 if (q == NULL)
1513 goto out_bdput;
1514
1515 mutex_lock(&bdev->bd_mutex);
1516
1517 if (attr == &dev_attr_enable) {
1518 if (value)
1519 ret = blk_trace_setup_queue(q, bdev->bd_dev);
1520 else
1521 ret = blk_trace_remove_queue(q);
1522 goto out_unlock_bdev;
1523 }
1524
1525 ret = 0;
1526 if (q->blk_trace == NULL)
1527 ret = blk_trace_setup_queue(q, bdev->bd_dev);
1528
1529 if (ret == 0) {
1530 if (attr == &dev_attr_act_mask)
1531 q->blk_trace->act_mask = value;
1532 else if (attr == &dev_attr_pid)
1533 q->blk_trace->pid = value;
1534 else if (attr == &dev_attr_start_lba)
1535 q->blk_trace->start_lba = value;
1536 else if (attr == &dev_attr_end_lba)
1537 q->blk_trace->end_lba = value;
1538 }
1539
1540out_unlock_bdev:
1541 mutex_unlock(&bdev->bd_mutex);
1542out_bdput:
1543 bdput(bdev);
1544out_unlock_kernel:
1545 unlock_kernel();
1546out:
1547 return ret ? ret : count;
1548}
1549
diff --git a/kernel/trace/events.c b/kernel/trace/events.c
new file mode 100644
index 000000000000..246f2aa6dc46
--- /dev/null
+++ b/kernel/trace/events.c
@@ -0,0 +1,14 @@
1/*
2 * This is the place to register all trace points as events.
3 */
4
5#include <linux/stringify.h>
6
7#include <trace/trace_events.h>
8
9#include "trace_output.h"
10
11#include "trace_events_stage_1.h"
12#include "trace_events_stage_2.h"
13#include "trace_events_stage_3.h"
14
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fdf913dfc7e8..f1ed080406c3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -27,6 +27,9 @@
27#include <linux/sysctl.h> 27#include <linux/sysctl.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/hash.h>
31
32#include <trace/sched.h>
30 33
31#include <asm/ftrace.h> 34#include <asm/ftrace.h>
32 35
@@ -44,14 +47,14 @@
44 ftrace_kill(); \ 47 ftrace_kill(); \
45 } while (0) 48 } while (0)
46 49
50/* hash bits for specific function selection */
51#define FTRACE_HASH_BITS 7
52#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
53
47/* ftrace_enabled is a method to turn ftrace on or off */ 54/* ftrace_enabled is a method to turn ftrace on or off */
48int ftrace_enabled __read_mostly; 55int ftrace_enabled __read_mostly;
49static int last_ftrace_enabled; 56static int last_ftrace_enabled;
50 57
51/* set when tracing only a pid */
52struct pid *ftrace_pid_trace;
53static struct pid * const ftrace_swapper_pid = &init_struct_pid;
54
55/* Quick disabling of function tracer. */ 58/* Quick disabling of function tracer. */
56int function_trace_stop; 59int function_trace_stop;
57 60
@@ -61,9 +64,7 @@ int function_trace_stop;
61 */ 64 */
62static int ftrace_disabled __read_mostly; 65static int ftrace_disabled __read_mostly;
63 66
64static DEFINE_SPINLOCK(ftrace_lock); 67static DEFINE_MUTEX(ftrace_lock);
65static DEFINE_MUTEX(ftrace_sysctl_lock);
66static DEFINE_MUTEX(ftrace_start_lock);
67 68
68static struct ftrace_ops ftrace_list_end __read_mostly = 69static struct ftrace_ops ftrace_list_end __read_mostly =
69{ 70{
@@ -134,9 +135,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
134 135
135static int __register_ftrace_function(struct ftrace_ops *ops) 136static int __register_ftrace_function(struct ftrace_ops *ops)
136{ 137{
137 /* should not be called from interrupt context */
138 spin_lock(&ftrace_lock);
139
140 ops->next = ftrace_list; 138 ops->next = ftrace_list;
141 /* 139 /*
142 * We are entering ops into the ftrace_list but another 140 * We are entering ops into the ftrace_list but another
@@ -172,18 +170,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
172#endif 170#endif
173 } 171 }
174 172
175 spin_unlock(&ftrace_lock);
176
177 return 0; 173 return 0;
178} 174}
179 175
180static int __unregister_ftrace_function(struct ftrace_ops *ops) 176static int __unregister_ftrace_function(struct ftrace_ops *ops)
181{ 177{
182 struct ftrace_ops **p; 178 struct ftrace_ops **p;
183 int ret = 0;
184
185 /* should not be called from interrupt context */
186 spin_lock(&ftrace_lock);
187 179
188 /* 180 /*
189 * If we are removing the last function, then simply point 181 * If we are removing the last function, then simply point
@@ -192,17 +184,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
192 if (ftrace_list == ops && ops->next == &ftrace_list_end) { 184 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193 ftrace_trace_function = ftrace_stub; 185 ftrace_trace_function = ftrace_stub;
194 ftrace_list = &ftrace_list_end; 186 ftrace_list = &ftrace_list_end;
195 goto out; 187 return 0;
196 } 188 }
197 189
198 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) 190 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
199 if (*p == ops) 191 if (*p == ops)
200 break; 192 break;
201 193
202 if (*p != ops) { 194 if (*p != ops)
203 ret = -1; 195 return -1;
204 goto out;
205 }
206 196
207 *p = (*p)->next; 197 *p = (*p)->next;
208 198
@@ -223,21 +213,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
223 } 213 }
224 } 214 }
225 215
226 out: 216 return 0;
227 spin_unlock(&ftrace_lock);
228
229 return ret;
230} 217}
231 218
232static void ftrace_update_pid_func(void) 219static void ftrace_update_pid_func(void)
233{ 220{
234 ftrace_func_t func; 221 ftrace_func_t func;
235 222
236 /* should not be called from interrupt context */
237 spin_lock(&ftrace_lock);
238
239 if (ftrace_trace_function == ftrace_stub) 223 if (ftrace_trace_function == ftrace_stub)
240 goto out; 224 return;
241 225
242 func = ftrace_trace_function; 226 func = ftrace_trace_function;
243 227
@@ -254,23 +238,29 @@ static void ftrace_update_pid_func(void)
254#else 238#else
255 __ftrace_trace_function = func; 239 __ftrace_trace_function = func;
256#endif 240#endif
257
258 out:
259 spin_unlock(&ftrace_lock);
260} 241}
261 242
243/* set when tracing only a pid */
244struct pid *ftrace_pid_trace;
245static struct pid * const ftrace_swapper_pid = &init_struct_pid;
246
262#ifdef CONFIG_DYNAMIC_FTRACE 247#ifdef CONFIG_DYNAMIC_FTRACE
248
263#ifndef CONFIG_FTRACE_MCOUNT_RECORD 249#ifndef CONFIG_FTRACE_MCOUNT_RECORD
264# error Dynamic ftrace depends on MCOUNT_RECORD 250# error Dynamic ftrace depends on MCOUNT_RECORD
265#endif 251#endif
266 252
267/* 253static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
268 * Since MCOUNT_ADDR may point to mcount itself, we do not want 254
269 * to get it confused by reading a reference in the code as we 255struct ftrace_func_probe {
270 * are parsing on objcopy output of text. Use a variable for 256 struct hlist_node node;
271 * it instead. 257 struct ftrace_probe_ops *ops;
272 */ 258 unsigned long flags;
273static unsigned long mcount_addr = MCOUNT_ADDR; 259 unsigned long ip;
260 void *data;
261 struct rcu_head rcu;
262};
263
274 264
275enum { 265enum {
276 FTRACE_ENABLE_CALLS = (1 << 0), 266 FTRACE_ENABLE_CALLS = (1 << 0),
@@ -284,13 +274,13 @@ enum {
284 274
285static int ftrace_filtered; 275static int ftrace_filtered;
286 276
287static LIST_HEAD(ftrace_new_addrs); 277static struct dyn_ftrace *ftrace_new_addrs;
288 278
289static DEFINE_MUTEX(ftrace_regex_lock); 279static DEFINE_MUTEX(ftrace_regex_lock);
290 280
291struct ftrace_page { 281struct ftrace_page {
292 struct ftrace_page *next; 282 struct ftrace_page *next;
293 unsigned long index; 283 int index;
294 struct dyn_ftrace records[]; 284 struct dyn_ftrace records[];
295}; 285};
296 286
@@ -305,6 +295,19 @@ static struct ftrace_page *ftrace_pages;
305 295
306static struct dyn_ftrace *ftrace_free_records; 296static struct dyn_ftrace *ftrace_free_records;
307 297
298/*
299 * This is a double for. Do not use 'break' to break out of the loop,
300 * you must use a goto.
301 */
302#define do_for_each_ftrace_rec(pg, rec) \
303 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
304 int _____i; \
305 for (_____i = 0; _____i < pg->index; _____i++) { \
306 rec = &pg->records[_____i];
307
308#define while_for_each_ftrace_rec() \
309 } \
310 }
308 311
309#ifdef CONFIG_KPROBES 312#ifdef CONFIG_KPROBES
310 313
@@ -338,7 +341,7 @@ static inline int record_frozen(struct dyn_ftrace *rec)
338 341
339static void ftrace_free_rec(struct dyn_ftrace *rec) 342static void ftrace_free_rec(struct dyn_ftrace *rec)
340{ 343{
341 rec->ip = (unsigned long)ftrace_free_records; 344 rec->freelist = ftrace_free_records;
342 ftrace_free_records = rec; 345 ftrace_free_records = rec;
343 rec->flags |= FTRACE_FL_FREE; 346 rec->flags |= FTRACE_FL_FREE;
344} 347}
@@ -349,23 +352,22 @@ void ftrace_release(void *start, unsigned long size)
349 struct ftrace_page *pg; 352 struct ftrace_page *pg;
350 unsigned long s = (unsigned long)start; 353 unsigned long s = (unsigned long)start;
351 unsigned long e = s + size; 354 unsigned long e = s + size;
352 int i;
353 355
354 if (ftrace_disabled || !start) 356 if (ftrace_disabled || !start)
355 return; 357 return;
356 358
357 /* should not be called from interrupt context */ 359 mutex_lock(&ftrace_lock);
358 spin_lock(&ftrace_lock); 360 do_for_each_ftrace_rec(pg, rec) {
359 361 if ((rec->ip >= s) && (rec->ip < e)) {
360 for (pg = ftrace_pages_start; pg; pg = pg->next) { 362 /*
361 for (i = 0; i < pg->index; i++) { 363 * rec->ip is changed in ftrace_free_rec()
362 rec = &pg->records[i]; 364 * It should not between s and e if record was freed.
363 365 */
364 if ((rec->ip >= s) && (rec->ip < e)) 366 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
365 ftrace_free_rec(rec); 367 ftrace_free_rec(rec);
366 } 368 }
367 } 369 } while_for_each_ftrace_rec();
368 spin_unlock(&ftrace_lock); 370 mutex_unlock(&ftrace_lock);
369} 371}
370 372
371static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) 373static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
@@ -382,7 +384,7 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
382 return NULL; 384 return NULL;
383 } 385 }
384 386
385 ftrace_free_records = (void *)rec->ip; 387 ftrace_free_records = rec->freelist;
386 memset(rec, 0, sizeof(*rec)); 388 memset(rec, 0, sizeof(*rec));
387 return rec; 389 return rec;
388 } 390 }
@@ -414,8 +416,8 @@ ftrace_record_ip(unsigned long ip)
414 return NULL; 416 return NULL;
415 417
416 rec->ip = ip; 418 rec->ip = ip;
417 419 rec->newlist = ftrace_new_addrs;
418 list_add(&rec->list, &ftrace_new_addrs); 420 ftrace_new_addrs = rec;
419 421
420 return rec; 422 return rec;
421} 423}
@@ -461,10 +463,10 @@ static void ftrace_bug(int failed, unsigned long ip)
461static int 463static int
462__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 464__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
463{ 465{
464 unsigned long ip, fl;
465 unsigned long ftrace_addr; 466 unsigned long ftrace_addr;
467 unsigned long ip, fl;
466 468
467 ftrace_addr = (unsigned long)ftrace_caller; 469 ftrace_addr = (unsigned long)FTRACE_ADDR;
468 470
469 ip = rec->ip; 471 ip = rec->ip;
470 472
@@ -473,7 +475,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
473 * it is not enabled then do nothing. 475 * it is not enabled then do nothing.
474 * 476 *
475 * If this record is not to be traced and 477 * If this record is not to be traced and
476 * it is enabled then disabled it. 478 * it is enabled then disable it.
477 * 479 *
478 */ 480 */
479 if (rec->flags & FTRACE_FL_NOTRACE) { 481 if (rec->flags & FTRACE_FL_NOTRACE) {
@@ -493,7 +495,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
493 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) 495 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
494 return 0; 496 return 0;
495 497
496 /* Record is not filtered and is not enabled do nothing */ 498 /* Record is not filtered or enabled, do nothing */
497 if (!fl) 499 if (!fl)
498 return 0; 500 return 0;
499 501
@@ -515,7 +517,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
515 517
516 } else { 518 } else {
517 519
518 /* if record is not enabled do nothing */ 520 /* if record is not enabled, do nothing */
519 if (!(rec->flags & FTRACE_FL_ENABLED)) 521 if (!(rec->flags & FTRACE_FL_ENABLED))
520 return 0; 522 return 0;
521 523
@@ -531,41 +533,41 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
531 533
532static void ftrace_replace_code(int enable) 534static void ftrace_replace_code(int enable)
533{ 535{
534 int i, failed;
535 struct dyn_ftrace *rec; 536 struct dyn_ftrace *rec;
536 struct ftrace_page *pg; 537 struct ftrace_page *pg;
538 int failed;
537 539
538 for (pg = ftrace_pages_start; pg; pg = pg->next) { 540 do_for_each_ftrace_rec(pg, rec) {
539 for (i = 0; i < pg->index; i++) { 541 /*
540 rec = &pg->records[i]; 542 * Skip over free records, records that have
541 543 * failed and not converted.
542 /* 544 */
543 * Skip over free records and records that have 545 if (rec->flags & FTRACE_FL_FREE ||
544 * failed. 546 rec->flags & FTRACE_FL_FAILED ||
545 */ 547 !(rec->flags & FTRACE_FL_CONVERTED))
546 if (rec->flags & FTRACE_FL_FREE || 548 continue;
547 rec->flags & FTRACE_FL_FAILED)
548 continue;
549 549
550 /* ignore updates to this record's mcount site */ 550 /* ignore updates to this record's mcount site */
551 if (get_kprobe((void *)rec->ip)) { 551 if (get_kprobe((void *)rec->ip)) {
552 freeze_record(rec); 552 freeze_record(rec);
553 continue; 553 continue;
554 } else { 554 } else {
555 unfreeze_record(rec); 555 unfreeze_record(rec);
556 } 556 }
557 557
558 failed = __ftrace_replace_code(rec, enable); 558 failed = __ftrace_replace_code(rec, enable);
559 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 559 if (failed) {
560 rec->flags |= FTRACE_FL_FAILED; 560 rec->flags |= FTRACE_FL_FAILED;
561 if ((system_state == SYSTEM_BOOTING) || 561 if ((system_state == SYSTEM_BOOTING) ||
562 !core_kernel_text(rec->ip)) { 562 !core_kernel_text(rec->ip)) {
563 ftrace_free_rec(rec); 563 ftrace_free_rec(rec);
564 } else 564 } else {
565 ftrace_bug(failed, rec->ip); 565 ftrace_bug(failed, rec->ip);
566 } 566 /* Stop processing */
567 return;
568 }
567 } 569 }
568 } 570 } while_for_each_ftrace_rec();
569} 571}
570 572
571static int 573static int
@@ -576,7 +578,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
576 578
577 ip = rec->ip; 579 ip = rec->ip;
578 580
579 ret = ftrace_make_nop(mod, rec, mcount_addr); 581 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
580 if (ret) { 582 if (ret) {
581 ftrace_bug(ret, ip); 583 ftrace_bug(ret, ip);
582 rec->flags |= FTRACE_FL_FAILED; 584 rec->flags |= FTRACE_FL_FAILED;
@@ -585,6 +587,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
585 return 1; 587 return 1;
586} 588}
587 589
590/*
591 * archs can override this function if they must do something
592 * before the modifying code is performed.
593 */
594int __weak ftrace_arch_code_modify_prepare(void)
595{
596 return 0;
597}
598
599/*
600 * archs can override this function if they must do something
601 * after the modifying code is performed.
602 */
603int __weak ftrace_arch_code_modify_post_process(void)
604{
605 return 0;
606}
607
588static int __ftrace_modify_code(void *data) 608static int __ftrace_modify_code(void *data)
589{ 609{
590 int *command = data; 610 int *command = data;
@@ -607,7 +627,17 @@ static int __ftrace_modify_code(void *data)
607 627
608static void ftrace_run_update_code(int command) 628static void ftrace_run_update_code(int command)
609{ 629{
630 int ret;
631
632 ret = ftrace_arch_code_modify_prepare();
633 FTRACE_WARN_ON(ret);
634 if (ret)
635 return;
636
610 stop_machine(__ftrace_modify_code, &command, NULL); 637 stop_machine(__ftrace_modify_code, &command, NULL);
638
639 ret = ftrace_arch_code_modify_post_process();
640 FTRACE_WARN_ON(ret);
611} 641}
612 642
613static ftrace_func_t saved_ftrace_func; 643static ftrace_func_t saved_ftrace_func;
@@ -631,13 +661,10 @@ static void ftrace_startup(int command)
631 if (unlikely(ftrace_disabled)) 661 if (unlikely(ftrace_disabled))
632 return; 662 return;
633 663
634 mutex_lock(&ftrace_start_lock);
635 ftrace_start_up++; 664 ftrace_start_up++;
636 command |= FTRACE_ENABLE_CALLS; 665 command |= FTRACE_ENABLE_CALLS;
637 666
638 ftrace_startup_enable(command); 667 ftrace_startup_enable(command);
639
640 mutex_unlock(&ftrace_start_lock);
641} 668}
642 669
643static void ftrace_shutdown(int command) 670static void ftrace_shutdown(int command)
@@ -645,7 +672,6 @@ static void ftrace_shutdown(int command)
645 if (unlikely(ftrace_disabled)) 672 if (unlikely(ftrace_disabled))
646 return; 673 return;
647 674
648 mutex_lock(&ftrace_start_lock);
649 ftrace_start_up--; 675 ftrace_start_up--;
650 if (!ftrace_start_up) 676 if (!ftrace_start_up)
651 command |= FTRACE_DISABLE_CALLS; 677 command |= FTRACE_DISABLE_CALLS;
@@ -656,11 +682,9 @@ static void ftrace_shutdown(int command)
656 } 682 }
657 683
658 if (!command || !ftrace_enabled) 684 if (!command || !ftrace_enabled)
659 goto out; 685 return;
660 686
661 ftrace_run_update_code(command); 687 ftrace_run_update_code(command);
662 out:
663 mutex_unlock(&ftrace_start_lock);
664} 688}
665 689
666static void ftrace_startup_sysctl(void) 690static void ftrace_startup_sysctl(void)
@@ -670,7 +694,6 @@ static void ftrace_startup_sysctl(void)
670 if (unlikely(ftrace_disabled)) 694 if (unlikely(ftrace_disabled))
671 return; 695 return;
672 696
673 mutex_lock(&ftrace_start_lock);
674 /* Force update next time */ 697 /* Force update next time */
675 saved_ftrace_func = NULL; 698 saved_ftrace_func = NULL;
676 /* ftrace_start_up is true if we want ftrace running */ 699 /* ftrace_start_up is true if we want ftrace running */
@@ -678,7 +701,6 @@ static void ftrace_startup_sysctl(void)
678 command |= FTRACE_ENABLE_CALLS; 701 command |= FTRACE_ENABLE_CALLS;
679 702
680 ftrace_run_update_code(command); 703 ftrace_run_update_code(command);
681 mutex_unlock(&ftrace_start_lock);
682} 704}
683 705
684static void ftrace_shutdown_sysctl(void) 706static void ftrace_shutdown_sysctl(void)
@@ -688,13 +710,11 @@ static void ftrace_shutdown_sysctl(void)
688 if (unlikely(ftrace_disabled)) 710 if (unlikely(ftrace_disabled))
689 return; 711 return;
690 712
691 mutex_lock(&ftrace_start_lock);
692 /* ftrace_start_up is true if ftrace is running */ 713 /* ftrace_start_up is true if ftrace is running */
693 if (ftrace_start_up) 714 if (ftrace_start_up)
694 command |= FTRACE_DISABLE_CALLS; 715 command |= FTRACE_DISABLE_CALLS;
695 716
696 ftrace_run_update_code(command); 717 ftrace_run_update_code(command);
697 mutex_unlock(&ftrace_start_lock);
698} 718}
699 719
700static cycle_t ftrace_update_time; 720static cycle_t ftrace_update_time;
@@ -703,19 +723,21 @@ unsigned long ftrace_update_tot_cnt;
703 723
704static int ftrace_update_code(struct module *mod) 724static int ftrace_update_code(struct module *mod)
705{ 725{
706 struct dyn_ftrace *p, *t; 726 struct dyn_ftrace *p;
707 cycle_t start, stop; 727 cycle_t start, stop;
708 728
709 start = ftrace_now(raw_smp_processor_id()); 729 start = ftrace_now(raw_smp_processor_id());
710 ftrace_update_cnt = 0; 730 ftrace_update_cnt = 0;
711 731
712 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { 732 while (ftrace_new_addrs) {
713 733
714 /* If something went wrong, bail without enabling anything */ 734 /* If something went wrong, bail without enabling anything */
715 if (unlikely(ftrace_disabled)) 735 if (unlikely(ftrace_disabled))
716 return -1; 736 return -1;
717 737
718 list_del_init(&p->list); 738 p = ftrace_new_addrs;
739 ftrace_new_addrs = p->newlist;
740 p->flags = 0L;
719 741
720 /* convert record (i.e, patch mcount-call with NOP) */ 742 /* convert record (i.e, patch mcount-call with NOP) */
721 if (ftrace_code_disable(mod, p)) { 743 if (ftrace_code_disable(mod, p)) {
@@ -781,13 +803,16 @@ enum {
781 FTRACE_ITER_CONT = (1 << 1), 803 FTRACE_ITER_CONT = (1 << 1),
782 FTRACE_ITER_NOTRACE = (1 << 2), 804 FTRACE_ITER_NOTRACE = (1 << 2),
783 FTRACE_ITER_FAILURES = (1 << 3), 805 FTRACE_ITER_FAILURES = (1 << 3),
806 FTRACE_ITER_PRINTALL = (1 << 4),
807 FTRACE_ITER_HASH = (1 << 5),
784}; 808};
785 809
786#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 810#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
787 811
788struct ftrace_iterator { 812struct ftrace_iterator {
789 struct ftrace_page *pg; 813 struct ftrace_page *pg;
790 unsigned idx; 814 int hidx;
815 int idx;
791 unsigned flags; 816 unsigned flags;
792 unsigned char buffer[FTRACE_BUFF_MAX+1]; 817 unsigned char buffer[FTRACE_BUFF_MAX+1];
793 unsigned buffer_idx; 818 unsigned buffer_idx;
@@ -795,15 +820,89 @@ struct ftrace_iterator {
795}; 820};
796 821
797static void * 822static void *
823t_hash_next(struct seq_file *m, void *v, loff_t *pos)
824{
825 struct ftrace_iterator *iter = m->private;
826 struct hlist_node *hnd = v;
827 struct hlist_head *hhd;
828
829 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
830
831 (*pos)++;
832
833 retry:
834 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
835 return NULL;
836
837 hhd = &ftrace_func_hash[iter->hidx];
838
839 if (hlist_empty(hhd)) {
840 iter->hidx++;
841 hnd = NULL;
842 goto retry;
843 }
844
845 if (!hnd)
846 hnd = hhd->first;
847 else {
848 hnd = hnd->next;
849 if (!hnd) {
850 iter->hidx++;
851 goto retry;
852 }
853 }
854
855 return hnd;
856}
857
858static void *t_hash_start(struct seq_file *m, loff_t *pos)
859{
860 struct ftrace_iterator *iter = m->private;
861 void *p = NULL;
862
863 iter->flags |= FTRACE_ITER_HASH;
864
865 return t_hash_next(m, p, pos);
866}
867
868static int t_hash_show(struct seq_file *m, void *v)
869{
870 struct ftrace_func_probe *rec;
871 struct hlist_node *hnd = v;
872 char str[KSYM_SYMBOL_LEN];
873
874 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
875
876 if (rec->ops->print)
877 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
878
879 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
880 seq_printf(m, "%s:", str);
881
882 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
883 seq_printf(m, "%s", str);
884
885 if (rec->data)
886 seq_printf(m, ":%p", rec->data);
887 seq_putc(m, '\n');
888
889 return 0;
890}
891
892static void *
798t_next(struct seq_file *m, void *v, loff_t *pos) 893t_next(struct seq_file *m, void *v, loff_t *pos)
799{ 894{
800 struct ftrace_iterator *iter = m->private; 895 struct ftrace_iterator *iter = m->private;
801 struct dyn_ftrace *rec = NULL; 896 struct dyn_ftrace *rec = NULL;
802 897
898 if (iter->flags & FTRACE_ITER_HASH)
899 return t_hash_next(m, v, pos);
900
803 (*pos)++; 901 (*pos)++;
804 902
805 /* should not be called from interrupt context */ 903 if (iter->flags & FTRACE_ITER_PRINTALL)
806 spin_lock(&ftrace_lock); 904 return NULL;
905
807 retry: 906 retry:
808 if (iter->idx >= iter->pg->index) { 907 if (iter->idx >= iter->pg->index) {
809 if (iter->pg->next) { 908 if (iter->pg->next) {
@@ -832,7 +931,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
832 goto retry; 931 goto retry;
833 } 932 }
834 } 933 }
835 spin_unlock(&ftrace_lock);
836 934
837 return rec; 935 return rec;
838} 936}
@@ -842,6 +940,23 @@ static void *t_start(struct seq_file *m, loff_t *pos)
842 struct ftrace_iterator *iter = m->private; 940 struct ftrace_iterator *iter = m->private;
843 void *p = NULL; 941 void *p = NULL;
844 942
943 mutex_lock(&ftrace_lock);
944 /*
945 * For set_ftrace_filter reading, if we have the filter
946 * off, we can short cut and just print out that all
947 * functions are enabled.
948 */
949 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
950 if (*pos > 0)
951 return t_hash_start(m, pos);
952 iter->flags |= FTRACE_ITER_PRINTALL;
953 (*pos)++;
954 return iter;
955 }
956
957 if (iter->flags & FTRACE_ITER_HASH)
958 return t_hash_start(m, pos);
959
845 if (*pos > 0) { 960 if (*pos > 0) {
846 if (iter->idx < 0) 961 if (iter->idx < 0)
847 return p; 962 return p;
@@ -851,18 +966,31 @@ static void *t_start(struct seq_file *m, loff_t *pos)
851 966
852 p = t_next(m, p, pos); 967 p = t_next(m, p, pos);
853 968
969 if (!p)
970 return t_hash_start(m, pos);
971
854 return p; 972 return p;
855} 973}
856 974
857static void t_stop(struct seq_file *m, void *p) 975static void t_stop(struct seq_file *m, void *p)
858{ 976{
977 mutex_unlock(&ftrace_lock);
859} 978}
860 979
861static int t_show(struct seq_file *m, void *v) 980static int t_show(struct seq_file *m, void *v)
862{ 981{
982 struct ftrace_iterator *iter = m->private;
863 struct dyn_ftrace *rec = v; 983 struct dyn_ftrace *rec = v;
864 char str[KSYM_SYMBOL_LEN]; 984 char str[KSYM_SYMBOL_LEN];
865 985
986 if (iter->flags & FTRACE_ITER_HASH)
987 return t_hash_show(m, v);
988
989 if (iter->flags & FTRACE_ITER_PRINTALL) {
990 seq_printf(m, "#### all functions enabled ####\n");
991 return 0;
992 }
993
866 if (!rec) 994 if (!rec)
867 return 0; 995 return 0;
868 996
@@ -941,23 +1069,16 @@ static void ftrace_filter_reset(int enable)
941 struct ftrace_page *pg; 1069 struct ftrace_page *pg;
942 struct dyn_ftrace *rec; 1070 struct dyn_ftrace *rec;
943 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1071 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
944 unsigned i;
945 1072
946 /* should not be called from interrupt context */ 1073 mutex_lock(&ftrace_lock);
947 spin_lock(&ftrace_lock);
948 if (enable) 1074 if (enable)
949 ftrace_filtered = 0; 1075 ftrace_filtered = 0;
950 pg = ftrace_pages_start; 1076 do_for_each_ftrace_rec(pg, rec) {
951 while (pg) { 1077 if (rec->flags & FTRACE_FL_FAILED)
952 for (i = 0; i < pg->index; i++) { 1078 continue;
953 rec = &pg->records[i]; 1079 rec->flags &= ~type;
954 if (rec->flags & FTRACE_FL_FAILED) 1080 } while_for_each_ftrace_rec();
955 continue; 1081 mutex_unlock(&ftrace_lock);
956 rec->flags &= ~type;
957 }
958 pg = pg->next;
959 }
960 spin_unlock(&ftrace_lock);
961} 1082}
962 1083
963static int 1084static int
@@ -1008,16 +1129,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
1008 return ftrace_regex_open(inode, file, 0); 1129 return ftrace_regex_open(inode, file, 0);
1009} 1130}
1010 1131
1011static ssize_t
1012ftrace_regex_read(struct file *file, char __user *ubuf,
1013 size_t cnt, loff_t *ppos)
1014{
1015 if (file->f_mode & FMODE_READ)
1016 return seq_read(file, ubuf, cnt, ppos);
1017 else
1018 return -EPERM;
1019}
1020
1021static loff_t 1132static loff_t
1022ftrace_regex_lseek(struct file *file, loff_t offset, int origin) 1133ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1023{ 1134{
@@ -1038,86 +1149,536 @@ enum {
1038 MATCH_END_ONLY, 1149 MATCH_END_ONLY,
1039}; 1150};
1040 1151
1041static void 1152/*
1042ftrace_match(unsigned char *buff, int len, int enable) 1153 * (static function - no need for kernel doc)
1154 *
1155 * Pass in a buffer containing a glob and this function will
1156 * set search to point to the search part of the buffer and
1157 * return the type of search it is (see enum above).
1158 * This does modify buff.
1159 *
1160 * Returns enum type.
1161 * search returns the pointer to use for comparison.
1162 * not returns 1 if buff started with a '!'
1163 * 0 otherwise.
1164 */
1165static int
1166ftrace_setup_glob(char *buff, int len, char **search, int *not)
1043{ 1167{
1044 char str[KSYM_SYMBOL_LEN];
1045 char *search = NULL;
1046 struct ftrace_page *pg;
1047 struct dyn_ftrace *rec;
1048 int type = MATCH_FULL; 1168 int type = MATCH_FULL;
1049 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1169 int i;
1050 unsigned i, match = 0, search_len = 0;
1051 int not = 0;
1052 1170
1053 if (buff[0] == '!') { 1171 if (buff[0] == '!') {
1054 not = 1; 1172 *not = 1;
1055 buff++; 1173 buff++;
1056 len--; 1174 len--;
1057 } 1175 } else
1176 *not = 0;
1177
1178 *search = buff;
1058 1179
1059 for (i = 0; i < len; i++) { 1180 for (i = 0; i < len; i++) {
1060 if (buff[i] == '*') { 1181 if (buff[i] == '*') {
1061 if (!i) { 1182 if (!i) {
1062 search = buff + i + 1; 1183 *search = buff + 1;
1063 type = MATCH_END_ONLY; 1184 type = MATCH_END_ONLY;
1064 search_len = len - (i + 1);
1065 } else { 1185 } else {
1066 if (type == MATCH_END_ONLY) { 1186 if (type == MATCH_END_ONLY)
1067 type = MATCH_MIDDLE_ONLY; 1187 type = MATCH_MIDDLE_ONLY;
1068 } else { 1188 else
1069 match = i;
1070 type = MATCH_FRONT_ONLY; 1189 type = MATCH_FRONT_ONLY;
1071 }
1072 buff[i] = 0; 1190 buff[i] = 0;
1073 break; 1191 break;
1074 } 1192 }
1075 } 1193 }
1076 } 1194 }
1077 1195
1078 /* should not be called from interrupt context */ 1196 return type;
1079 spin_lock(&ftrace_lock); 1197}
1080 if (enable) 1198
1081 ftrace_filtered = 1; 1199static int ftrace_match(char *str, char *regex, int len, int type)
1082 pg = ftrace_pages_start; 1200{
1083 while (pg) { 1201 int matched = 0;
1084 for (i = 0; i < pg->index; i++) { 1202 char *ptr;
1085 int matched = 0; 1203
1086 char *ptr; 1204 switch (type) {
1087 1205 case MATCH_FULL:
1088 rec = &pg->records[i]; 1206 if (strcmp(str, regex) == 0)
1089 if (rec->flags & FTRACE_FL_FAILED) 1207 matched = 1;
1208 break;
1209 case MATCH_FRONT_ONLY:
1210 if (strncmp(str, regex, len) == 0)
1211 matched = 1;
1212 break;
1213 case MATCH_MIDDLE_ONLY:
1214 if (strstr(str, regex))
1215 matched = 1;
1216 break;
1217 case MATCH_END_ONLY:
1218 ptr = strstr(str, regex);
1219 if (ptr && (ptr[len] == 0))
1220 matched = 1;
1221 break;
1222 }
1223
1224 return matched;
1225}
1226
1227static int
1228ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1229{
1230 char str[KSYM_SYMBOL_LEN];
1231
1232 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1233 return ftrace_match(str, regex, len, type);
1234}
1235
1236static void ftrace_match_records(char *buff, int len, int enable)
1237{
1238 unsigned int search_len;
1239 struct ftrace_page *pg;
1240 struct dyn_ftrace *rec;
1241 unsigned long flag;
1242 char *search;
1243 int type;
1244 int not;
1245
1246 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1247 type = ftrace_setup_glob(buff, len, &search, &not);
1248
1249 search_len = strlen(search);
1250
1251 mutex_lock(&ftrace_lock);
1252 do_for_each_ftrace_rec(pg, rec) {
1253
1254 if (rec->flags & FTRACE_FL_FAILED)
1255 continue;
1256
1257 if (ftrace_match_record(rec, search, search_len, type)) {
1258 if (not)
1259 rec->flags &= ~flag;
1260 else
1261 rec->flags |= flag;
1262 }
1263 /*
1264 * Only enable filtering if we have a function that
1265 * is filtered on.
1266 */
1267 if (enable && (rec->flags & FTRACE_FL_FILTER))
1268 ftrace_filtered = 1;
1269 } while_for_each_ftrace_rec();
1270 mutex_unlock(&ftrace_lock);
1271}
1272
1273static int
1274ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1275 char *regex, int len, int type)
1276{
1277 char str[KSYM_SYMBOL_LEN];
1278 char *modname;
1279
1280 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1281
1282 if (!modname || strcmp(modname, mod))
1283 return 0;
1284
1285 /* blank search means to match all funcs in the mod */
1286 if (len)
1287 return ftrace_match(str, regex, len, type);
1288 else
1289 return 1;
1290}
1291
1292static void ftrace_match_module_records(char *buff, char *mod, int enable)
1293{
1294 unsigned search_len = 0;
1295 struct ftrace_page *pg;
1296 struct dyn_ftrace *rec;
1297 int type = MATCH_FULL;
1298 char *search = buff;
1299 unsigned long flag;
1300 int not = 0;
1301
1302 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1303
1304 /* blank or '*' mean the same */
1305 if (strcmp(buff, "*") == 0)
1306 buff[0] = 0;
1307
1308 /* handle the case of 'dont filter this module' */
1309 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1310 buff[0] = 0;
1311 not = 1;
1312 }
1313
1314 if (strlen(buff)) {
1315 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1316 search_len = strlen(search);
1317 }
1318
1319 mutex_lock(&ftrace_lock);
1320 do_for_each_ftrace_rec(pg, rec) {
1321
1322 if (rec->flags & FTRACE_FL_FAILED)
1323 continue;
1324
1325 if (ftrace_match_module_record(rec, mod,
1326 search, search_len, type)) {
1327 if (not)
1328 rec->flags &= ~flag;
1329 else
1330 rec->flags |= flag;
1331 }
1332 if (enable && (rec->flags & FTRACE_FL_FILTER))
1333 ftrace_filtered = 1;
1334
1335 } while_for_each_ftrace_rec();
1336 mutex_unlock(&ftrace_lock);
1337}
1338
1339/*
1340 * We register the module command as a template to show others how
1341 * to register the a command as well.
1342 */
1343
1344static int
1345ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1346{
1347 char *mod;
1348
1349 /*
1350 * cmd == 'mod' because we only registered this func
1351 * for the 'mod' ftrace_func_command.
1352 * But if you register one func with multiple commands,
1353 * you can tell which command was used by the cmd
1354 * parameter.
1355 */
1356
1357 /* we must have a module name */
1358 if (!param)
1359 return -EINVAL;
1360
1361 mod = strsep(&param, ":");
1362 if (!strlen(mod))
1363 return -EINVAL;
1364
1365 ftrace_match_module_records(func, mod, enable);
1366 return 0;
1367}
1368
1369static struct ftrace_func_command ftrace_mod_cmd = {
1370 .name = "mod",
1371 .func = ftrace_mod_callback,
1372};
1373
1374static int __init ftrace_mod_cmd_init(void)
1375{
1376 return register_ftrace_command(&ftrace_mod_cmd);
1377}
1378device_initcall(ftrace_mod_cmd_init);
1379
1380static void
1381function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1382{
1383 struct ftrace_func_probe *entry;
1384 struct hlist_head *hhd;
1385 struct hlist_node *n;
1386 unsigned long key;
1387 int resched;
1388
1389 key = hash_long(ip, FTRACE_HASH_BITS);
1390
1391 hhd = &ftrace_func_hash[key];
1392
1393 if (hlist_empty(hhd))
1394 return;
1395
1396 /*
1397 * Disable preemption for these calls to prevent a RCU grace
1398 * period. This syncs the hash iteration and freeing of items
1399 * on the hash. rcu_read_lock is too dangerous here.
1400 */
1401 resched = ftrace_preempt_disable();
1402 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1403 if (entry->ip == ip)
1404 entry->ops->func(ip, parent_ip, &entry->data);
1405 }
1406 ftrace_preempt_enable(resched);
1407}
1408
1409static struct ftrace_ops trace_probe_ops __read_mostly =
1410{
1411 .func = function_trace_probe_call,
1412};
1413
1414static int ftrace_probe_registered;
1415
1416static void __enable_ftrace_function_probe(void)
1417{
1418 int i;
1419
1420 if (ftrace_probe_registered)
1421 return;
1422
1423 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1424 struct hlist_head *hhd = &ftrace_func_hash[i];
1425 if (hhd->first)
1426 break;
1427 }
1428 /* Nothing registered? */
1429 if (i == FTRACE_FUNC_HASHSIZE)
1430 return;
1431
1432 __register_ftrace_function(&trace_probe_ops);
1433 ftrace_startup(0);
1434 ftrace_probe_registered = 1;
1435}
1436
1437static void __disable_ftrace_function_probe(void)
1438{
1439 int i;
1440
1441 if (!ftrace_probe_registered)
1442 return;
1443
1444 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1445 struct hlist_head *hhd = &ftrace_func_hash[i];
1446 if (hhd->first)
1447 return;
1448 }
1449
1450 /* no more funcs left */
1451 __unregister_ftrace_function(&trace_probe_ops);
1452 ftrace_shutdown(0);
1453 ftrace_probe_registered = 0;
1454}
1455
1456
1457static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1458{
1459 struct ftrace_func_probe *entry =
1460 container_of(rhp, struct ftrace_func_probe, rcu);
1461
1462 if (entry->ops->free)
1463 entry->ops->free(&entry->data);
1464 kfree(entry);
1465}
1466
1467
1468int
1469register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1470 void *data)
1471{
1472 struct ftrace_func_probe *entry;
1473 struct ftrace_page *pg;
1474 struct dyn_ftrace *rec;
1475 int type, len, not;
1476 unsigned long key;
1477 int count = 0;
1478 char *search;
1479
1480 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1481 len = strlen(search);
1482
1483 /* we do not support '!' for function probes */
1484 if (WARN_ON(not))
1485 return -EINVAL;
1486
1487 mutex_lock(&ftrace_lock);
1488 do_for_each_ftrace_rec(pg, rec) {
1489
1490 if (rec->flags & FTRACE_FL_FAILED)
1491 continue;
1492
1493 if (!ftrace_match_record(rec, search, len, type))
1494 continue;
1495
1496 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1497 if (!entry) {
1498 /* If we did not process any, then return error */
1499 if (!count)
1500 count = -ENOMEM;
1501 goto out_unlock;
1502 }
1503
1504 count++;
1505
1506 entry->data = data;
1507
1508 /*
1509 * The caller might want to do something special
1510 * for each function we find. We call the callback
1511 * to give the caller an opportunity to do so.
1512 */
1513 if (ops->callback) {
1514 if (ops->callback(rec->ip, &entry->data) < 0) {
1515 /* caller does not like this func */
1516 kfree(entry);
1090 continue; 1517 continue;
1091 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1092 switch (type) {
1093 case MATCH_FULL:
1094 if (strcmp(str, buff) == 0)
1095 matched = 1;
1096 break;
1097 case MATCH_FRONT_ONLY:
1098 if (memcmp(str, buff, match) == 0)
1099 matched = 1;
1100 break;
1101 case MATCH_MIDDLE_ONLY:
1102 if (strstr(str, search))
1103 matched = 1;
1104 break;
1105 case MATCH_END_ONLY:
1106 ptr = strstr(str, search);
1107 if (ptr && (ptr[search_len] == 0))
1108 matched = 1;
1109 break;
1110 } 1518 }
1111 if (matched) { 1519 }
1112 if (not) 1520
1113 rec->flags &= ~flag; 1521 entry->ops = ops;
1114 else 1522 entry->ip = rec->ip;
1115 rec->flags |= flag; 1523
1524 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1525 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1526
1527 } while_for_each_ftrace_rec();
1528 __enable_ftrace_function_probe();
1529
1530 out_unlock:
1531 mutex_unlock(&ftrace_lock);
1532
1533 return count;
1534}
1535
1536enum {
1537 PROBE_TEST_FUNC = 1,
1538 PROBE_TEST_DATA = 2
1539};
1540
1541static void
1542__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1543 void *data, int flags)
1544{
1545 struct ftrace_func_probe *entry;
1546 struct hlist_node *n, *tmp;
1547 char str[KSYM_SYMBOL_LEN];
1548 int type = MATCH_FULL;
1549 int i, len = 0;
1550 char *search;
1551
1552 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1553 glob = NULL;
1554 else {
1555 int not;
1556
1557 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1558 len = strlen(search);
1559
1560 /* we do not support '!' for function probes */
1561 if (WARN_ON(not))
1562 return;
1563 }
1564
1565 mutex_lock(&ftrace_lock);
1566 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1567 struct hlist_head *hhd = &ftrace_func_hash[i];
1568
1569 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1570
1571 /* break up if statements for readability */
1572 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
1573 continue;
1574
1575 if ((flags & PROBE_TEST_DATA) && entry->data != data)
1576 continue;
1577
1578 /* do this last, since it is the most expensive */
1579 if (glob) {
1580 kallsyms_lookup(entry->ip, NULL, NULL,
1581 NULL, str);
1582 if (!ftrace_match(str, glob, len, type))
1583 continue;
1116 } 1584 }
1585
1586 hlist_del(&entry->node);
1587 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1117 } 1588 }
1118 pg = pg->next;
1119 } 1589 }
1120 spin_unlock(&ftrace_lock); 1590 __disable_ftrace_function_probe();
1591 mutex_unlock(&ftrace_lock);
1592}
1593
1594void
1595unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1596 void *data)
1597{
1598 __unregister_ftrace_function_probe(glob, ops, data,
1599 PROBE_TEST_FUNC | PROBE_TEST_DATA);
1600}
1601
1602void
1603unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
1604{
1605 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
1606}
1607
1608void unregister_ftrace_function_probe_all(char *glob)
1609{
1610 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
1611}
1612
1613static LIST_HEAD(ftrace_commands);
1614static DEFINE_MUTEX(ftrace_cmd_mutex);
1615
1616int register_ftrace_command(struct ftrace_func_command *cmd)
1617{
1618 struct ftrace_func_command *p;
1619 int ret = 0;
1620
1621 mutex_lock(&ftrace_cmd_mutex);
1622 list_for_each_entry(p, &ftrace_commands, list) {
1623 if (strcmp(cmd->name, p->name) == 0) {
1624 ret = -EBUSY;
1625 goto out_unlock;
1626 }
1627 }
1628 list_add(&cmd->list, &ftrace_commands);
1629 out_unlock:
1630 mutex_unlock(&ftrace_cmd_mutex);
1631
1632 return ret;
1633}
1634
1635int unregister_ftrace_command(struct ftrace_func_command *cmd)
1636{
1637 struct ftrace_func_command *p, *n;
1638 int ret = -ENODEV;
1639
1640 mutex_lock(&ftrace_cmd_mutex);
1641 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1642 if (strcmp(cmd->name, p->name) == 0) {
1643 ret = 0;
1644 list_del_init(&p->list);
1645 goto out_unlock;
1646 }
1647 }
1648 out_unlock:
1649 mutex_unlock(&ftrace_cmd_mutex);
1650
1651 return ret;
1652}
1653
1654static int ftrace_process_regex(char *buff, int len, int enable)
1655{
1656 char *func, *command, *next = buff;
1657 struct ftrace_func_command *p;
1658 int ret = -EINVAL;
1659
1660 func = strsep(&next, ":");
1661
1662 if (!next) {
1663 ftrace_match_records(func, len, enable);
1664 return 0;
1665 }
1666
1667 /* command found */
1668
1669 command = strsep(&next, ":");
1670
1671 mutex_lock(&ftrace_cmd_mutex);
1672 list_for_each_entry(p, &ftrace_commands, list) {
1673 if (strcmp(p->name, command) == 0) {
1674 ret = p->func(func, command, next, enable);
1675 goto out_unlock;
1676 }
1677 }
1678 out_unlock:
1679 mutex_unlock(&ftrace_cmd_mutex);
1680
1681 return ret;
1121} 1682}
1122 1683
1123static ssize_t 1684static ssize_t
@@ -1187,7 +1748,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
1187 if (isspace(ch)) { 1748 if (isspace(ch)) {
1188 iter->filtered++; 1749 iter->filtered++;
1189 iter->buffer[iter->buffer_idx] = 0; 1750 iter->buffer[iter->buffer_idx] = 0;
1190 ftrace_match(iter->buffer, iter->buffer_idx, enable); 1751 ret = ftrace_process_regex(iter->buffer,
1752 iter->buffer_idx, enable);
1753 if (ret)
1754 goto out;
1191 iter->buffer_idx = 0; 1755 iter->buffer_idx = 0;
1192 } else 1756 } else
1193 iter->flags |= FTRACE_ITER_CONT; 1757 iter->flags |= FTRACE_ITER_CONT;
@@ -1226,7 +1790,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1226 if (reset) 1790 if (reset)
1227 ftrace_filter_reset(enable); 1791 ftrace_filter_reset(enable);
1228 if (buf) 1792 if (buf)
1229 ftrace_match(buf, len, enable); 1793 ftrace_match_records(buf, len, enable);
1230 mutex_unlock(&ftrace_regex_lock); 1794 mutex_unlock(&ftrace_regex_lock);
1231} 1795}
1232 1796
@@ -1276,15 +1840,13 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1276 if (iter->buffer_idx) { 1840 if (iter->buffer_idx) {
1277 iter->filtered++; 1841 iter->filtered++;
1278 iter->buffer[iter->buffer_idx] = 0; 1842 iter->buffer[iter->buffer_idx] = 0;
1279 ftrace_match(iter->buffer, iter->buffer_idx, enable); 1843 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
1280 } 1844 }
1281 1845
1282 mutex_lock(&ftrace_sysctl_lock); 1846 mutex_lock(&ftrace_lock);
1283 mutex_lock(&ftrace_start_lock);
1284 if (ftrace_start_up && ftrace_enabled) 1847 if (ftrace_start_up && ftrace_enabled)
1285 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1848 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1286 mutex_unlock(&ftrace_start_lock); 1849 mutex_unlock(&ftrace_lock);
1287 mutex_unlock(&ftrace_sysctl_lock);
1288 1850
1289 kfree(iter); 1851 kfree(iter);
1290 mutex_unlock(&ftrace_regex_lock); 1852 mutex_unlock(&ftrace_regex_lock);
@@ -1303,31 +1865,31 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
1303 return ftrace_regex_release(inode, file, 0); 1865 return ftrace_regex_release(inode, file, 0);
1304} 1866}
1305 1867
1306static struct file_operations ftrace_avail_fops = { 1868static const struct file_operations ftrace_avail_fops = {
1307 .open = ftrace_avail_open, 1869 .open = ftrace_avail_open,
1308 .read = seq_read, 1870 .read = seq_read,
1309 .llseek = seq_lseek, 1871 .llseek = seq_lseek,
1310 .release = ftrace_avail_release, 1872 .release = ftrace_avail_release,
1311}; 1873};
1312 1874
1313static struct file_operations ftrace_failures_fops = { 1875static const struct file_operations ftrace_failures_fops = {
1314 .open = ftrace_failures_open, 1876 .open = ftrace_failures_open,
1315 .read = seq_read, 1877 .read = seq_read,
1316 .llseek = seq_lseek, 1878 .llseek = seq_lseek,
1317 .release = ftrace_avail_release, 1879 .release = ftrace_avail_release,
1318}; 1880};
1319 1881
1320static struct file_operations ftrace_filter_fops = { 1882static const struct file_operations ftrace_filter_fops = {
1321 .open = ftrace_filter_open, 1883 .open = ftrace_filter_open,
1322 .read = ftrace_regex_read, 1884 .read = seq_read,
1323 .write = ftrace_filter_write, 1885 .write = ftrace_filter_write,
1324 .llseek = ftrace_regex_lseek, 1886 .llseek = ftrace_regex_lseek,
1325 .release = ftrace_filter_release, 1887 .release = ftrace_filter_release,
1326}; 1888};
1327 1889
1328static struct file_operations ftrace_notrace_fops = { 1890static const struct file_operations ftrace_notrace_fops = {
1329 .open = ftrace_notrace_open, 1891 .open = ftrace_notrace_open,
1330 .read = ftrace_regex_read, 1892 .read = seq_read,
1331 .write = ftrace_notrace_write, 1893 .write = ftrace_notrace_write,
1332 .llseek = ftrace_regex_lseek, 1894 .llseek = ftrace_regex_lseek,
1333 .release = ftrace_notrace_release, 1895 .release = ftrace_notrace_release,
@@ -1360,6 +1922,10 @@ static void *g_start(struct seq_file *m, loff_t *pos)
1360 1922
1361 mutex_lock(&graph_lock); 1923 mutex_lock(&graph_lock);
1362 1924
1925 /* Nothing, tell g_show to print all functions are enabled */
1926 if (!ftrace_graph_count && !*pos)
1927 return (void *)1;
1928
1363 p = g_next(m, p, pos); 1929 p = g_next(m, p, pos);
1364 1930
1365 return p; 1931 return p;
@@ -1378,6 +1944,11 @@ static int g_show(struct seq_file *m, void *v)
1378 if (!ptr) 1944 if (!ptr)
1379 return 0; 1945 return 0;
1380 1946
1947 if (ptr == (unsigned long *)1) {
1948 seq_printf(m, "#### all functions enabled ####\n");
1949 return 0;
1950 }
1951
1381 kallsyms_lookup(*ptr, NULL, NULL, NULL, str); 1952 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1382 1953
1383 seq_printf(m, "%s\n", str); 1954 seq_printf(m, "%s\n", str);
@@ -1420,53 +1991,53 @@ ftrace_graph_open(struct inode *inode, struct file *file)
1420 return ret; 1991 return ret;
1421} 1992}
1422 1993
1423static ssize_t
1424ftrace_graph_read(struct file *file, char __user *ubuf,
1425 size_t cnt, loff_t *ppos)
1426{
1427 if (file->f_mode & FMODE_READ)
1428 return seq_read(file, ubuf, cnt, ppos);
1429 else
1430 return -EPERM;
1431}
1432
1433static int 1994static int
1434ftrace_set_func(unsigned long *array, int idx, char *buffer) 1995ftrace_set_func(unsigned long *array, int *idx, char *buffer)
1435{ 1996{
1436 char str[KSYM_SYMBOL_LEN];
1437 struct dyn_ftrace *rec; 1997 struct dyn_ftrace *rec;
1438 struct ftrace_page *pg; 1998 struct ftrace_page *pg;
1999 int search_len;
1439 int found = 0; 2000 int found = 0;
1440 int i, j; 2001 int type, not;
2002 char *search;
2003 bool exists;
2004 int i;
1441 2005
1442 if (ftrace_disabled) 2006 if (ftrace_disabled)
1443 return -ENODEV; 2007 return -ENODEV;
1444 2008
1445 /* should not be called from interrupt context */ 2009 /* decode regex */
1446 spin_lock(&ftrace_lock); 2010 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2011 if (not)
2012 return -EINVAL;
2013
2014 search_len = strlen(search);
1447 2015
1448 for (pg = ftrace_pages_start; pg; pg = pg->next) { 2016 mutex_lock(&ftrace_lock);
1449 for (i = 0; i < pg->index; i++) { 2017 do_for_each_ftrace_rec(pg, rec) {
1450 rec = &pg->records[i];
1451 2018
1452 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) 2019 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
1453 continue; 2020 break;
2021
2022 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2023 continue;
1454 2024
1455 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 2025 if (ftrace_match_record(rec, search, search_len, type)) {
1456 if (strcmp(str, buffer) == 0) { 2026 /* ensure it is not already in the array */
2027 exists = false;
2028 for (i = 0; i < *idx; i++)
2029 if (array[i] == rec->ip) {
2030 exists = true;
2031 break;
2032 }
2033 if (!exists) {
2034 array[(*idx)++] = rec->ip;
1457 found = 1; 2035 found = 1;
1458 for (j = 0; j < idx; j++)
1459 if (array[j] == rec->ip) {
1460 found = 0;
1461 break;
1462 }
1463 if (found)
1464 array[idx] = rec->ip;
1465 break;
1466 } 2036 }
1467 } 2037 }
1468 } 2038 } while_for_each_ftrace_rec();
1469 spin_unlock(&ftrace_lock); 2039
2040 mutex_unlock(&ftrace_lock);
1470 2041
1471 return found ? 0 : -EINVAL; 2042 return found ? 0 : -EINVAL;
1472} 2043}
@@ -1534,13 +2105,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
1534 } 2105 }
1535 buffer[index] = 0; 2106 buffer[index] = 0;
1536 2107
1537 /* we allow only one at a time */ 2108 /* we allow only one expression at a time */
1538 ret = ftrace_set_func(array, ftrace_graph_count, buffer); 2109 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
1539 if (ret) 2110 if (ret)
1540 goto out; 2111 goto out;
1541 2112
1542 ftrace_graph_count++;
1543
1544 file->f_pos += read; 2113 file->f_pos += read;
1545 2114
1546 ret = read; 2115 ret = read;
@@ -1552,7 +2121,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
1552 2121
1553static const struct file_operations ftrace_graph_fops = { 2122static const struct file_operations ftrace_graph_fops = {
1554 .open = ftrace_graph_open, 2123 .open = ftrace_graph_open,
1555 .read = ftrace_graph_read, 2124 .read = seq_read,
1556 .write = ftrace_graph_write, 2125 .write = ftrace_graph_write,
1557}; 2126};
1558#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2127#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -1604,7 +2173,7 @@ static int ftrace_convert_nops(struct module *mod,
1604 unsigned long addr; 2173 unsigned long addr;
1605 unsigned long flags; 2174 unsigned long flags;
1606 2175
1607 mutex_lock(&ftrace_start_lock); 2176 mutex_lock(&ftrace_lock);
1608 p = start; 2177 p = start;
1609 while (p < end) { 2178 while (p < end) {
1610 addr = ftrace_call_adjust(*p++); 2179 addr = ftrace_call_adjust(*p++);
@@ -1623,7 +2192,7 @@ static int ftrace_convert_nops(struct module *mod,
1623 local_irq_save(flags); 2192 local_irq_save(flags);
1624 ftrace_update_code(mod); 2193 ftrace_update_code(mod);
1625 local_irq_restore(flags); 2194 local_irq_restore(flags);
1626 mutex_unlock(&ftrace_start_lock); 2195 mutex_unlock(&ftrace_lock);
1627 2196
1628 return 0; 2197 return 0;
1629} 2198}
@@ -1700,7 +2269,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf,
1700 if (ftrace_pid_trace == ftrace_swapper_pid) 2269 if (ftrace_pid_trace == ftrace_swapper_pid)
1701 r = sprintf(buf, "swapper tasks\n"); 2270 r = sprintf(buf, "swapper tasks\n");
1702 else if (ftrace_pid_trace) 2271 else if (ftrace_pid_trace)
1703 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace)); 2272 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
1704 else 2273 else
1705 r = sprintf(buf, "no pid\n"); 2274 r = sprintf(buf, "no pid\n");
1706 2275
@@ -1796,7 +2365,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
1796 if (ret < 0) 2365 if (ret < 0)
1797 return ret; 2366 return ret;
1798 2367
1799 mutex_lock(&ftrace_start_lock); 2368 mutex_lock(&ftrace_lock);
1800 if (val < 0) { 2369 if (val < 0) {
1801 /* disable pid tracing */ 2370 /* disable pid tracing */
1802 if (!ftrace_pid_trace) 2371 if (!ftrace_pid_trace)
@@ -1835,12 +2404,12 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
1835 ftrace_startup_enable(0); 2404 ftrace_startup_enable(0);
1836 2405
1837 out: 2406 out:
1838 mutex_unlock(&ftrace_start_lock); 2407 mutex_unlock(&ftrace_lock);
1839 2408
1840 return cnt; 2409 return cnt;
1841} 2410}
1842 2411
1843static struct file_operations ftrace_pid_fops = { 2412static const struct file_operations ftrace_pid_fops = {
1844 .read = ftrace_pid_read, 2413 .read = ftrace_pid_read,
1845 .write = ftrace_pid_write, 2414 .write = ftrace_pid_write,
1846}; 2415};
@@ -1863,7 +2432,6 @@ static __init int ftrace_init_debugfs(void)
1863 "'set_ftrace_pid' entry\n"); 2432 "'set_ftrace_pid' entry\n");
1864 return 0; 2433 return 0;
1865} 2434}
1866
1867fs_initcall(ftrace_init_debugfs); 2435fs_initcall(ftrace_init_debugfs);
1868 2436
1869/** 2437/**
@@ -1898,17 +2466,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
1898 if (unlikely(ftrace_disabled)) 2466 if (unlikely(ftrace_disabled))
1899 return -1; 2467 return -1;
1900 2468
1901 mutex_lock(&ftrace_sysctl_lock); 2469 mutex_lock(&ftrace_lock);
1902 2470
1903 ret = __register_ftrace_function(ops); 2471 ret = __register_ftrace_function(ops);
1904 ftrace_startup(0); 2472 ftrace_startup(0);
1905 2473
1906 mutex_unlock(&ftrace_sysctl_lock); 2474 mutex_unlock(&ftrace_lock);
1907 return ret; 2475 return ret;
1908} 2476}
1909 2477
1910/** 2478/**
1911 * unregister_ftrace_function - unresgister a function for profiling. 2479 * unregister_ftrace_function - unregister a function for profiling.
1912 * @ops - ops structure that holds the function to unregister 2480 * @ops - ops structure that holds the function to unregister
1913 * 2481 *
1914 * Unregister a function that was added to be called by ftrace profiling. 2482 * Unregister a function that was added to be called by ftrace profiling.
@@ -1917,10 +2485,10 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1917{ 2485{
1918 int ret; 2486 int ret;
1919 2487
1920 mutex_lock(&ftrace_sysctl_lock); 2488 mutex_lock(&ftrace_lock);
1921 ret = __unregister_ftrace_function(ops); 2489 ret = __unregister_ftrace_function(ops);
1922 ftrace_shutdown(0); 2490 ftrace_shutdown(0);
1923 mutex_unlock(&ftrace_sysctl_lock); 2491 mutex_unlock(&ftrace_lock);
1924 2492
1925 return ret; 2493 return ret;
1926} 2494}
@@ -1935,7 +2503,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1935 if (unlikely(ftrace_disabled)) 2503 if (unlikely(ftrace_disabled))
1936 return -ENODEV; 2504 return -ENODEV;
1937 2505
1938 mutex_lock(&ftrace_sysctl_lock); 2506 mutex_lock(&ftrace_lock);
1939 2507
1940 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 2508 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1941 2509
@@ -1964,7 +2532,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1964 } 2532 }
1965 2533
1966 out: 2534 out:
1967 mutex_unlock(&ftrace_sysctl_lock); 2535 mutex_unlock(&ftrace_lock);
1968 return ret; 2536 return ret;
1969} 2537}
1970 2538
@@ -2029,6 +2597,38 @@ free:
2029 return ret; 2597 return ret;
2030} 2598}
2031 2599
2600static void
2601ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
2602 struct task_struct *next)
2603{
2604 unsigned long long timestamp;
2605 int index;
2606
2607 /*
2608 * Does the user want to count the time a function was asleep.
2609 * If so, do not update the time stamps.
2610 */
2611 if (trace_flags & TRACE_ITER_SLEEP_TIME)
2612 return;
2613
2614 timestamp = trace_clock_local();
2615
2616 prev->ftrace_timestamp = timestamp;
2617
2618 /* only process tasks that we timestamped */
2619 if (!next->ftrace_timestamp)
2620 return;
2621
2622 /*
2623 * Update all the counters in next to make up for the
2624 * time next was sleeping.
2625 */
2626 timestamp -= next->ftrace_timestamp;
2627
2628 for (index = next->curr_ret_stack; index >= 0; index--)
2629 next->ret_stack[index].calltime += timestamp;
2630}
2631
2032/* Allocate a return stack for each task */ 2632/* Allocate a return stack for each task */
2033static int start_graph_tracing(void) 2633static int start_graph_tracing(void)
2034{ 2634{
@@ -2050,6 +2650,13 @@ static int start_graph_tracing(void)
2050 ret = alloc_retstack_tasklist(ret_stack_list); 2650 ret = alloc_retstack_tasklist(ret_stack_list);
2051 } while (ret == -EAGAIN); 2651 } while (ret == -EAGAIN);
2052 2652
2653 if (!ret) {
2654 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
2655 if (ret)
2656 pr_info("ftrace_graph: Couldn't activate tracepoint"
2657 " probe to kernel_sched_switch\n");
2658 }
2659
2053 kfree(ret_stack_list); 2660 kfree(ret_stack_list);
2054 return ret; 2661 return ret;
2055} 2662}
@@ -2080,7 +2687,13 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2080{ 2687{
2081 int ret = 0; 2688 int ret = 0;
2082 2689
2083 mutex_lock(&ftrace_sysctl_lock); 2690 mutex_lock(&ftrace_lock);
2691
2692 /* we currently allow only one tracer registered at a time */
2693 if (atomic_read(&ftrace_graph_active)) {
2694 ret = -EBUSY;
2695 goto out;
2696 }
2084 2697
2085 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; 2698 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2086 register_pm_notifier(&ftrace_suspend_notifier); 2699 register_pm_notifier(&ftrace_suspend_notifier);
@@ -2098,21 +2711,26 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2098 ftrace_startup(FTRACE_START_FUNC_RET); 2711 ftrace_startup(FTRACE_START_FUNC_RET);
2099 2712
2100out: 2713out:
2101 mutex_unlock(&ftrace_sysctl_lock); 2714 mutex_unlock(&ftrace_lock);
2102 return ret; 2715 return ret;
2103} 2716}
2104 2717
2105void unregister_ftrace_graph(void) 2718void unregister_ftrace_graph(void)
2106{ 2719{
2107 mutex_lock(&ftrace_sysctl_lock); 2720 mutex_lock(&ftrace_lock);
2721
2722 if (!unlikely(atomic_read(&ftrace_graph_active)))
2723 goto out;
2108 2724
2109 atomic_dec(&ftrace_graph_active); 2725 atomic_dec(&ftrace_graph_active);
2726 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
2110 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 2727 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2111 ftrace_graph_entry = ftrace_graph_entry_stub; 2728 ftrace_graph_entry = ftrace_graph_entry_stub;
2112 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 2729 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2113 unregister_pm_notifier(&ftrace_suspend_notifier); 2730 unregister_pm_notifier(&ftrace_suspend_notifier);
2114 2731
2115 mutex_unlock(&ftrace_sysctl_lock); 2732 out:
2733 mutex_unlock(&ftrace_lock);
2116} 2734}
2117 2735
2118/* Allocate a return stack for newly created task */ 2736/* Allocate a return stack for newly created task */
@@ -2127,6 +2745,7 @@ void ftrace_graph_init_task(struct task_struct *t)
2127 t->curr_ret_stack = -1; 2745 t->curr_ret_stack = -1;
2128 atomic_set(&t->tracing_graph_pause, 0); 2746 atomic_set(&t->tracing_graph_pause, 0);
2129 atomic_set(&t->trace_overrun, 0); 2747 atomic_set(&t->trace_overrun, 0);
2748 t->ftrace_timestamp = 0;
2130 } else 2749 } else
2131 t->ret_stack = NULL; 2750 t->ret_stack = NULL;
2132} 2751}
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
new file mode 100644
index 000000000000..ae201b3eda89
--- /dev/null
+++ b/kernel/trace/kmemtrace.c
@@ -0,0 +1,339 @@
1/*
2 * Memory allocator tracing
3 *
4 * Copyright (C) 2008 Eduard - Gabriel Munteanu
5 * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
7 */
8
9#include <linux/dcache.h>
10#include <linux/debugfs.h>
11#include <linux/fs.h>
12#include <linux/seq_file.h>
13#include <trace/kmemtrace.h>
14
15#include "trace.h"
16#include "trace_output.h"
17
18/* Select an alternative, minimalistic output than the original one */
19#define TRACE_KMEM_OPT_MINIMAL 0x1
20
21static struct tracer_opt kmem_opts[] = {
22 /* Default disable the minimalistic output */
23 { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
24 { }
25};
26
27static struct tracer_flags kmem_tracer_flags = {
28 .val = 0,
29 .opts = kmem_opts
30};
31
32
33static bool kmem_tracing_enabled __read_mostly;
34static struct trace_array *kmemtrace_array;
35
36static int kmem_trace_init(struct trace_array *tr)
37{
38 int cpu;
39 kmemtrace_array = tr;
40
41 for_each_cpu_mask(cpu, cpu_possible_map)
42 tracing_reset(tr, cpu);
43
44 kmem_tracing_enabled = true;
45
46 return 0;
47}
48
49static void kmem_trace_reset(struct trace_array *tr)
50{
51 kmem_tracing_enabled = false;
52}
53
54static void kmemtrace_headers(struct seq_file *s)
55{
56 /* Don't need headers for the original kmemtrace output */
57 if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
58 return;
59
60 seq_printf(s, "#\n");
61 seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
62 " POINTER NODE CALLER\n");
63 seq_printf(s, "# FREE | | | | "
64 " | | | |\n");
65 seq_printf(s, "# |\n\n");
66}
67
68/*
69 * The two following functions give the original output from kmemtrace,
70 * or something close to....perhaps they need some missing things
71 */
72static enum print_line_t
73kmemtrace_print_alloc_original(struct trace_iterator *iter,
74 struct kmemtrace_alloc_entry *entry)
75{
76 struct trace_seq *s = &iter->seq;
77 int ret;
78
79 /* Taken from the old linux/kmemtrace.h */
80 ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
81 "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
82 entry->type_id, entry->call_site, (unsigned long) entry->ptr,
83 (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
84 (unsigned long) entry->gfp_flags, entry->node);
85
86 if (!ret)
87 return TRACE_TYPE_PARTIAL_LINE;
88
89 return TRACE_TYPE_HANDLED;
90}
91
92static enum print_line_t
93kmemtrace_print_free_original(struct trace_iterator *iter,
94 struct kmemtrace_free_entry *entry)
95{
96 struct trace_seq *s = &iter->seq;
97 int ret;
98
99 /* Taken from the old linux/kmemtrace.h */
100 ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
101 entry->type_id, entry->call_site, (unsigned long) entry->ptr);
102
103 if (!ret)
104 return TRACE_TYPE_PARTIAL_LINE;
105
106 return TRACE_TYPE_HANDLED;
107}
108
109
110/* The two other following provide a more minimalistic output */
111static enum print_line_t
112kmemtrace_print_alloc_compress(struct trace_iterator *iter,
113 struct kmemtrace_alloc_entry *entry)
114{
115 struct trace_seq *s = &iter->seq;
116 int ret;
117
118 /* Alloc entry */
119 ret = trace_seq_printf(s, " + ");
120 if (!ret)
121 return TRACE_TYPE_PARTIAL_LINE;
122
123 /* Type */
124 switch (entry->type_id) {
125 case KMEMTRACE_TYPE_KMALLOC:
126 ret = trace_seq_printf(s, "K ");
127 break;
128 case KMEMTRACE_TYPE_CACHE:
129 ret = trace_seq_printf(s, "C ");
130 break;
131 case KMEMTRACE_TYPE_PAGES:
132 ret = trace_seq_printf(s, "P ");
133 break;
134 default:
135 ret = trace_seq_printf(s, "? ");
136 }
137
138 if (!ret)
139 return TRACE_TYPE_PARTIAL_LINE;
140
141 /* Requested */
142 ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
143 if (!ret)
144 return TRACE_TYPE_PARTIAL_LINE;
145
146 /* Allocated */
147 ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
148 if (!ret)
149 return TRACE_TYPE_PARTIAL_LINE;
150
151 /* Flags
152 * TODO: would be better to see the name of the GFP flag names
153 */
154 ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
155 if (!ret)
156 return TRACE_TYPE_PARTIAL_LINE;
157
158 /* Pointer to allocated */
159 ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
160 if (!ret)
161 return TRACE_TYPE_PARTIAL_LINE;
162
163 /* Node */
164 ret = trace_seq_printf(s, "%4d ", entry->node);
165 if (!ret)
166 return TRACE_TYPE_PARTIAL_LINE;
167
168 /* Call site */
169 ret = seq_print_ip_sym(s, entry->call_site, 0);
170 if (!ret)
171 return TRACE_TYPE_PARTIAL_LINE;
172
173 if (!trace_seq_printf(s, "\n"))
174 return TRACE_TYPE_PARTIAL_LINE;
175
176 return TRACE_TYPE_HANDLED;
177}
178
179static enum print_line_t
180kmemtrace_print_free_compress(struct trace_iterator *iter,
181 struct kmemtrace_free_entry *entry)
182{
183 struct trace_seq *s = &iter->seq;
184 int ret;
185
186 /* Free entry */
187 ret = trace_seq_printf(s, " - ");
188 if (!ret)
189 return TRACE_TYPE_PARTIAL_LINE;
190
191 /* Type */
192 switch (entry->type_id) {
193 case KMEMTRACE_TYPE_KMALLOC:
194 ret = trace_seq_printf(s, "K ");
195 break;
196 case KMEMTRACE_TYPE_CACHE:
197 ret = trace_seq_printf(s, "C ");
198 break;
199 case KMEMTRACE_TYPE_PAGES:
200 ret = trace_seq_printf(s, "P ");
201 break;
202 default:
203 ret = trace_seq_printf(s, "? ");
204 }
205
206 if (!ret)
207 return TRACE_TYPE_PARTIAL_LINE;
208
209 /* Skip requested/allocated/flags */
210 ret = trace_seq_printf(s, " ");
211 if (!ret)
212 return TRACE_TYPE_PARTIAL_LINE;
213
214 /* Pointer to allocated */
215 ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
216 if (!ret)
217 return TRACE_TYPE_PARTIAL_LINE;
218
219 /* Skip node */
220 ret = trace_seq_printf(s, " ");
221 if (!ret)
222 return TRACE_TYPE_PARTIAL_LINE;
223
224 /* Call site */
225 ret = seq_print_ip_sym(s, entry->call_site, 0);
226 if (!ret)
227 return TRACE_TYPE_PARTIAL_LINE;
228
229 if (!trace_seq_printf(s, "\n"))
230 return TRACE_TYPE_PARTIAL_LINE;
231
232 return TRACE_TYPE_HANDLED;
233}
234
235static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
236{
237 struct trace_entry *entry = iter->ent;
238
239 switch (entry->type) {
240 case TRACE_KMEM_ALLOC: {
241 struct kmemtrace_alloc_entry *field;
242 trace_assign_type(field, entry);
243 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
244 return kmemtrace_print_alloc_compress(iter, field);
245 else
246 return kmemtrace_print_alloc_original(iter, field);
247 }
248
249 case TRACE_KMEM_FREE: {
250 struct kmemtrace_free_entry *field;
251 trace_assign_type(field, entry);
252 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
253 return kmemtrace_print_free_compress(iter, field);
254 else
255 return kmemtrace_print_free_original(iter, field);
256 }
257
258 default:
259 return TRACE_TYPE_UNHANDLED;
260 }
261}
262
263/* Trace allocations */
264void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
265 unsigned long call_site,
266 const void *ptr,
267 size_t bytes_req,
268 size_t bytes_alloc,
269 gfp_t gfp_flags,
270 int node)
271{
272 struct ring_buffer_event *event;
273 struct kmemtrace_alloc_entry *entry;
274 struct trace_array *tr = kmemtrace_array;
275
276 if (!kmem_tracing_enabled)
277 return;
278
279 event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
280 sizeof(*entry), 0, 0);
281 if (!event)
282 return;
283 entry = ring_buffer_event_data(event);
284
285 entry->call_site = call_site;
286 entry->ptr = ptr;
287 entry->bytes_req = bytes_req;
288 entry->bytes_alloc = bytes_alloc;
289 entry->gfp_flags = gfp_flags;
290 entry->node = node;
291
292 trace_buffer_unlock_commit(tr, event, 0, 0);
293}
294EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
295
296void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
297 unsigned long call_site,
298 const void *ptr)
299{
300 struct ring_buffer_event *event;
301 struct kmemtrace_free_entry *entry;
302 struct trace_array *tr = kmemtrace_array;
303
304 if (!kmem_tracing_enabled)
305 return;
306
307 event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
308 sizeof(*entry), 0, 0);
309 if (!event)
310 return;
311 entry = ring_buffer_event_data(event);
312 entry->type_id = type_id;
313 entry->call_site = call_site;
314 entry->ptr = ptr;
315
316 trace_buffer_unlock_commit(tr, event, 0, 0);
317}
318EXPORT_SYMBOL(kmemtrace_mark_free);
319
320static struct tracer kmem_tracer __read_mostly = {
321 .name = "kmemtrace",
322 .init = kmem_trace_init,
323 .reset = kmem_trace_reset,
324 .print_line = kmemtrace_print_line,
325 .print_header = kmemtrace_headers,
326 .flags = &kmem_tracer_flags
327};
328
329void kmemtrace_init(void)
330{
331 /* earliest opportunity to start kmem tracing */
332}
333
334static int __init init_kmem_tracer(void)
335{
336 return register_tracer(&kmem_tracer);
337}
338
339device_initcall(init_kmem_tracer);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bd38c5cfd8ad..960cbf44c844 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4,21 +4,92 @@
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */ 5 */
6#include <linux/ring_buffer.h> 6#include <linux/ring_buffer.h>
7#include <linux/trace_clock.h>
8#include <linux/ftrace_irq.h>
7#include <linux/spinlock.h> 9#include <linux/spinlock.h>
8#include <linux/debugfs.h> 10#include <linux/debugfs.h>
9#include <linux/uaccess.h> 11#include <linux/uaccess.h>
12#include <linux/hardirq.h>
10#include <linux/module.h> 13#include <linux/module.h>
11#include <linux/percpu.h> 14#include <linux/percpu.h>
12#include <linux/mutex.h> 15#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/hash.h> 17#include <linux/hash.h>
16#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/cpu.h>
17#include <linux/fs.h> 20#include <linux/fs.h>
18 21
19#include "trace.h" 22#include "trace.h"
20 23
21/* 24/*
25 * The ring buffer is made up of a list of pages. A separate list of pages is
26 * allocated for each CPU. A writer may only write to a buffer that is
27 * associated with the CPU it is currently executing on. A reader may read
28 * from any per cpu buffer.
29 *
30 * The reader is special. For each per cpu buffer, the reader has its own
31 * reader page. When a reader has read the entire reader page, this reader
32 * page is swapped with another page in the ring buffer.
33 *
34 * Now, as long as the writer is off the reader page, the reader can do what
35 * ever it wants with that page. The writer will never write to that page
36 * again (as long as it is out of the ring buffer).
37 *
38 * Here's some silly ASCII art.
39 *
40 * +------+
41 * |reader| RING BUFFER
42 * |page |
43 * +------+ +---+ +---+ +---+
44 * | |-->| |-->| |
45 * +---+ +---+ +---+
46 * ^ |
47 * | |
48 * +---------------+
49 *
50 *
51 * +------+
52 * |reader| RING BUFFER
53 * |page |------------------v
54 * +------+ +---+ +---+ +---+
55 * | |-->| |-->| |
56 * +---+ +---+ +---+
57 * ^ |
58 * | |
59 * +---------------+
60 *
61 *
62 * +------+
63 * |reader| RING BUFFER
64 * |page |------------------v
65 * +------+ +---+ +---+ +---+
66 * ^ | |-->| |-->| |
67 * | +---+ +---+ +---+
68 * | |
69 * | |
70 * +------------------------------+
71 *
72 *
73 * +------+
74 * |buffer| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
77 * ^ | | | |-->| |
78 * | New +---+ +---+ +---+
79 * | Reader------^ |
80 * | page |
81 * +------------------------------+
82 *
83 *
84 * After we make this swap, the reader can hand this page off to the splice
85 * code and be done with it. It can even allocate a new page if it needs to
86 * and swap that into the ring buffer.
87 *
88 * We will be using cmpxchg soon to make all this lockless.
89 *
90 */
91
92/*
22 * A fast way to enable or disable all ring buffers is to 93 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers 94 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to. 95 * prevents all ring buffers from being recorded to.
@@ -57,7 +128,9 @@ enum {
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 128 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
58}; 129};
59 130
60static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 131static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
132
133#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
61 134
62/** 135/**
63 * tracing_on - enable all tracing buffers 136 * tracing_on - enable all tracing buffers
@@ -89,59 +162,92 @@ EXPORT_SYMBOL_GPL(tracing_off);
89 * tracing_off_permanent - permanently disable ring buffers 162 * tracing_off_permanent - permanently disable ring buffers
90 * 163 *
91 * This function, once called, will disable all ring buffers 164 * This function, once called, will disable all ring buffers
92 * permanenty. 165 * permanently.
93 */ 166 */
94void tracing_off_permanent(void) 167void tracing_off_permanent(void)
95{ 168{
96 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 169 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
97} 170}
98 171
172/**
173 * tracing_is_on - show state of ring buffers enabled
174 */
175int tracing_is_on(void)
176{
177 return ring_buffer_flags == RB_BUFFERS_ON;
178}
179EXPORT_SYMBOL_GPL(tracing_is_on);
180
99#include "trace.h" 181#include "trace.h"
100 182
101/* Up this if you want to test the TIME_EXTENTS and normalization */ 183#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
102#define DEBUG_SHIFT 0 184#define RB_ALIGNMENT 4U
185#define RB_MAX_SMALL_DATA 28
186
187enum {
188 RB_LEN_TIME_EXTEND = 8,
189 RB_LEN_TIME_STAMP = 16,
190};
103 191
104/* FIXME!!! */ 192static inline int rb_null_event(struct ring_buffer_event *event)
105u64 ring_buffer_time_stamp(int cpu)
106{ 193{
107 u64 time; 194 return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
195}
108 196
109 preempt_disable_notrace(); 197static inline int rb_discarded_event(struct ring_buffer_event *event)
110 /* shift to debug/test normalization and TIME_EXTENTS */ 198{
111 time = sched_clock() << DEBUG_SHIFT; 199 return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
112 preempt_enable_no_resched_notrace(); 200}
113 201
114 return time; 202static void rb_event_set_padding(struct ring_buffer_event *event)
203{
204 event->type = RINGBUF_TYPE_PADDING;
205 event->time_delta = 0;
115} 206}
116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
117 207
118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 208/**
209 * ring_buffer_event_discard - discard an event in the ring buffer
210 * @buffer: the ring buffer
211 * @event: the event to discard
212 *
213 * Sometimes a event that is in the ring buffer needs to be ignored.
214 * This function lets the user discard an event in the ring buffer
215 * and then that event will not be read later.
216 *
217 * Note, it is up to the user to be careful with this, and protect
218 * against races. If the user discards an event that has been consumed
219 * it is possible that it could corrupt the ring buffer.
220 */
221void ring_buffer_event_discard(struct ring_buffer_event *event)
119{ 222{
120 /* Just stupid testing the normalize function and deltas */ 223 event->type = RINGBUF_TYPE_PADDING;
121 *ts >>= DEBUG_SHIFT; 224 /* time delta must be non zero */
225 if (!event->time_delta)
226 event->time_delta = 1;
122} 227}
123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
124 228
125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 229static unsigned
126#define RB_ALIGNMENT_SHIFT 2 230rb_event_data_length(struct ring_buffer_event *event)
127#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT) 231{
128#define RB_MAX_SMALL_DATA 28 232 unsigned length;
129 233
130enum { 234 if (event->len)
131 RB_LEN_TIME_EXTEND = 8, 235 length = event->len * RB_ALIGNMENT;
132 RB_LEN_TIME_STAMP = 16, 236 else
133}; 237 length = event->array[0];
238 return length + RB_EVNT_HDR_SIZE;
239}
134 240
135/* inline for ring buffer fast paths */ 241/* inline for ring buffer fast paths */
136static inline unsigned 242static unsigned
137rb_event_length(struct ring_buffer_event *event) 243rb_event_length(struct ring_buffer_event *event)
138{ 244{
139 unsigned length;
140
141 switch (event->type) { 245 switch (event->type) {
142 case RINGBUF_TYPE_PADDING: 246 case RINGBUF_TYPE_PADDING:
143 /* undefined */ 247 if (rb_null_event(event))
144 return -1; 248 /* undefined */
249 return -1;
250 return rb_event_data_length(event);
145 251
146 case RINGBUF_TYPE_TIME_EXTEND: 252 case RINGBUF_TYPE_TIME_EXTEND:
147 return RB_LEN_TIME_EXTEND; 253 return RB_LEN_TIME_EXTEND;
@@ -150,11 +256,7 @@ rb_event_length(struct ring_buffer_event *event)
150 return RB_LEN_TIME_STAMP; 256 return RB_LEN_TIME_STAMP;
151 257
152 case RINGBUF_TYPE_DATA: 258 case RINGBUF_TYPE_DATA:
153 if (event->len) 259 return rb_event_data_length(event);
154 length = event->len << RB_ALIGNMENT_SHIFT;
155 else
156 length = event->array[0];
157 return length + RB_EVNT_HDR_SIZE;
158 default: 260 default:
159 BUG(); 261 BUG();
160 } 262 }
@@ -179,7 +281,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
179EXPORT_SYMBOL_GPL(ring_buffer_event_length); 281EXPORT_SYMBOL_GPL(ring_buffer_event_length);
180 282
181/* inline for ring buffer fast paths */ 283/* inline for ring buffer fast paths */
182static inline void * 284static void *
183rb_event_data(struct ring_buffer_event *event) 285rb_event_data(struct ring_buffer_event *event)
184{ 286{
185 BUG_ON(event->type != RINGBUF_TYPE_DATA); 287 BUG_ON(event->type != RINGBUF_TYPE_DATA);
@@ -209,7 +311,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
209 311
210struct buffer_data_page { 312struct buffer_data_page {
211 u64 time_stamp; /* page time stamp */ 313 u64 time_stamp; /* page time stamp */
212 local_t commit; /* write commited index */ 314 local_t commit; /* write committed index */
213 unsigned char data[]; /* data of buffer page */ 315 unsigned char data[]; /* data of buffer page */
214}; 316};
215 317
@@ -225,14 +327,25 @@ static void rb_init_page(struct buffer_data_page *bpage)
225 local_set(&bpage->commit, 0); 327 local_set(&bpage->commit, 0);
226} 328}
227 329
330/**
331 * ring_buffer_page_len - the size of data on the page.
332 * @page: The page to read
333 *
334 * Returns the amount of data on the page, including buffer page header.
335 */
336size_t ring_buffer_page_len(void *page)
337{
338 return local_read(&((struct buffer_data_page *)page)->commit)
339 + BUF_PAGE_HDR_SIZE;
340}
341
228/* 342/*
229 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 343 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
230 * this issue out. 344 * this issue out.
231 */ 345 */
232static inline void free_buffer_page(struct buffer_page *bpage) 346static void free_buffer_page(struct buffer_page *bpage)
233{ 347{
234 if (bpage->page) 348 free_page((unsigned long)bpage->page);
235 free_page((unsigned long)bpage->page);
236 kfree(bpage); 349 kfree(bpage);
237} 350}
238 351
@@ -246,7 +359,7 @@ static inline int test_time_stamp(u64 delta)
246 return 0; 359 return 0;
247} 360}
248 361
249#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) 362#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
250 363
251/* 364/*
252 * head_page == tail_page && head == tail then buffer is empty. 365 * head_page == tail_page && head == tail then buffer is empty.
@@ -260,7 +373,7 @@ struct ring_buffer_per_cpu {
260 struct list_head pages; 373 struct list_head pages;
261 struct buffer_page *head_page; /* read from head */ 374 struct buffer_page *head_page; /* read from head */
262 struct buffer_page *tail_page; /* write to tail */ 375 struct buffer_page *tail_page; /* write to tail */
263 struct buffer_page *commit_page; /* commited pages */ 376 struct buffer_page *commit_page; /* committed pages */
264 struct buffer_page *reader_page; 377 struct buffer_page *reader_page;
265 unsigned long overrun; 378 unsigned long overrun;
266 unsigned long entries; 379 unsigned long entries;
@@ -273,12 +386,17 @@ struct ring_buffer {
273 unsigned pages; 386 unsigned pages;
274 unsigned flags; 387 unsigned flags;
275 int cpus; 388 int cpus;
276 cpumask_var_t cpumask;
277 atomic_t record_disabled; 389 atomic_t record_disabled;
390 cpumask_var_t cpumask;
278 391
279 struct mutex mutex; 392 struct mutex mutex;
280 393
281 struct ring_buffer_per_cpu **buffers; 394 struct ring_buffer_per_cpu **buffers;
395
396#ifdef CONFIG_HOTPLUG_CPU
397 struct notifier_block cpu_notify;
398#endif
399 u64 (*clock)(void);
282}; 400};
283 401
284struct ring_buffer_iter { 402struct ring_buffer_iter {
@@ -299,11 +417,35 @@ struct ring_buffer_iter {
299 _____ret; \ 417 _____ret; \
300 }) 418 })
301 419
420/* Up this if you want to test the TIME_EXTENTS and normalization */
421#define DEBUG_SHIFT 0
422
423u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
424{
425 u64 time;
426
427 preempt_disable_notrace();
428 /* shift to debug/test normalization and TIME_EXTENTS */
429 time = buffer->clock() << DEBUG_SHIFT;
430 preempt_enable_no_resched_notrace();
431
432 return time;
433}
434EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
435
436void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
437 int cpu, u64 *ts)
438{
439 /* Just stupid testing the normalize function and deltas */
440 *ts >>= DEBUG_SHIFT;
441}
442EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
443
302/** 444/**
303 * check_pages - integrity check of buffer pages 445 * check_pages - integrity check of buffer pages
304 * @cpu_buffer: CPU buffer with pages to test 446 * @cpu_buffer: CPU buffer with pages to test
305 * 447 *
306 * As a safty measure we check to make sure the data pages have not 448 * As a safety measure we check to make sure the data pages have not
307 * been corrupted. 449 * been corrupted.
308 */ 450 */
309static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 451static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
@@ -421,7 +563,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
421 struct list_head *head = &cpu_buffer->pages; 563 struct list_head *head = &cpu_buffer->pages;
422 struct buffer_page *bpage, *tmp; 564 struct buffer_page *bpage, *tmp;
423 565
424 list_del_init(&cpu_buffer->reader_page->list);
425 free_buffer_page(cpu_buffer->reader_page); 566 free_buffer_page(cpu_buffer->reader_page);
426 567
427 list_for_each_entry_safe(bpage, tmp, head, list) { 568 list_for_each_entry_safe(bpage, tmp, head, list) {
@@ -437,6 +578,11 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
437 */ 578 */
438extern int ring_buffer_page_too_big(void); 579extern int ring_buffer_page_too_big(void);
439 580
581#ifdef CONFIG_HOTPLUG_CPU
582static int rb_cpu_notify(struct notifier_block *self,
583 unsigned long action, void *hcpu);
584#endif
585
440/** 586/**
441 * ring_buffer_alloc - allocate a new ring_buffer 587 * ring_buffer_alloc - allocate a new ring_buffer
442 * @size: the size in bytes per cpu that is needed. 588 * @size: the size in bytes per cpu that is needed.
@@ -469,12 +615,23 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
469 615
470 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 616 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
471 buffer->flags = flags; 617 buffer->flags = flags;
618 buffer->clock = trace_clock_local;
472 619
473 /* need at least two pages */ 620 /* need at least two pages */
474 if (buffer->pages == 1) 621 if (buffer->pages == 1)
475 buffer->pages++; 622 buffer->pages++;
476 623
624 /*
625 * In case of non-hotplug cpu, if the ring-buffer is allocated
626 * in early initcall, it will not be notified of secondary cpus.
627 * In that off case, we need to allocate for all possible cpus.
628 */
629#ifdef CONFIG_HOTPLUG_CPU
630 get_online_cpus();
631 cpumask_copy(buffer->cpumask, cpu_online_mask);
632#else
477 cpumask_copy(buffer->cpumask, cpu_possible_mask); 633 cpumask_copy(buffer->cpumask, cpu_possible_mask);
634#endif
478 buffer->cpus = nr_cpu_ids; 635 buffer->cpus = nr_cpu_ids;
479 636
480 bsize = sizeof(void *) * nr_cpu_ids; 637 bsize = sizeof(void *) * nr_cpu_ids;
@@ -490,6 +647,13 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
490 goto fail_free_buffers; 647 goto fail_free_buffers;
491 } 648 }
492 649
650#ifdef CONFIG_HOTPLUG_CPU
651 buffer->cpu_notify.notifier_call = rb_cpu_notify;
652 buffer->cpu_notify.priority = 0;
653 register_cpu_notifier(&buffer->cpu_notify);
654#endif
655
656 put_online_cpus();
493 mutex_init(&buffer->mutex); 657 mutex_init(&buffer->mutex);
494 658
495 return buffer; 659 return buffer;
@@ -503,6 +667,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
503 667
504 fail_free_cpumask: 668 fail_free_cpumask:
505 free_cpumask_var(buffer->cpumask); 669 free_cpumask_var(buffer->cpumask);
670 put_online_cpus();
506 671
507 fail_free_buffer: 672 fail_free_buffer:
508 kfree(buffer); 673 kfree(buffer);
@@ -519,15 +684,29 @@ ring_buffer_free(struct ring_buffer *buffer)
519{ 684{
520 int cpu; 685 int cpu;
521 686
687 get_online_cpus();
688
689#ifdef CONFIG_HOTPLUG_CPU
690 unregister_cpu_notifier(&buffer->cpu_notify);
691#endif
692
522 for_each_buffer_cpu(buffer, cpu) 693 for_each_buffer_cpu(buffer, cpu)
523 rb_free_cpu_buffer(buffer->buffers[cpu]); 694 rb_free_cpu_buffer(buffer->buffers[cpu]);
524 695
696 put_online_cpus();
697
525 free_cpumask_var(buffer->cpumask); 698 free_cpumask_var(buffer->cpumask);
526 699
527 kfree(buffer); 700 kfree(buffer);
528} 701}
529EXPORT_SYMBOL_GPL(ring_buffer_free); 702EXPORT_SYMBOL_GPL(ring_buffer_free);
530 703
704void ring_buffer_set_clock(struct ring_buffer *buffer,
705 u64 (*clock)(void))
706{
707 buffer->clock = clock;
708}
709
531static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 710static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
532 711
533static void 712static void
@@ -627,16 +806,15 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
627 return size; 806 return size;
628 807
629 mutex_lock(&buffer->mutex); 808 mutex_lock(&buffer->mutex);
809 get_online_cpus();
630 810
631 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 811 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
632 812
633 if (size < buffer_size) { 813 if (size < buffer_size) {
634 814
635 /* easy case, just free pages */ 815 /* easy case, just free pages */
636 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { 816 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
637 mutex_unlock(&buffer->mutex); 817 goto out_fail;
638 return -1;
639 }
640 818
641 rm_pages = buffer->pages - nr_pages; 819 rm_pages = buffer->pages - nr_pages;
642 820
@@ -655,10 +833,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
655 * add these pages to the cpu_buffers. Otherwise we just free 833 * add these pages to the cpu_buffers. Otherwise we just free
656 * them all and return -ENOMEM; 834 * them all and return -ENOMEM;
657 */ 835 */
658 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { 836 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
659 mutex_unlock(&buffer->mutex); 837 goto out_fail;
660 return -1;
661 }
662 838
663 new_pages = nr_pages - buffer->pages; 839 new_pages = nr_pages - buffer->pages;
664 840
@@ -683,13 +859,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
683 rb_insert_pages(cpu_buffer, &pages, new_pages); 859 rb_insert_pages(cpu_buffer, &pages, new_pages);
684 } 860 }
685 861
686 if (RB_WARN_ON(buffer, !list_empty(&pages))) { 862 if (RB_WARN_ON(buffer, !list_empty(&pages)))
687 mutex_unlock(&buffer->mutex); 863 goto out_fail;
688 return -1;
689 }
690 864
691 out: 865 out:
692 buffer->pages = nr_pages; 866 buffer->pages = nr_pages;
867 put_online_cpus();
693 mutex_unlock(&buffer->mutex); 868 mutex_unlock(&buffer->mutex);
694 869
695 return size; 870 return size;
@@ -699,15 +874,20 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
699 list_del_init(&bpage->list); 874 list_del_init(&bpage->list);
700 free_buffer_page(bpage); 875 free_buffer_page(bpage);
701 } 876 }
877 put_online_cpus();
702 mutex_unlock(&buffer->mutex); 878 mutex_unlock(&buffer->mutex);
703 return -ENOMEM; 879 return -ENOMEM;
704}
705EXPORT_SYMBOL_GPL(ring_buffer_resize);
706 880
707static inline int rb_null_event(struct ring_buffer_event *event) 881 /*
708{ 882 * Something went totally wrong, and we are too paranoid
709 return event->type == RINGBUF_TYPE_PADDING; 883 * to even clean up the mess.
884 */
885 out_fail:
886 put_online_cpus();
887 mutex_unlock(&buffer->mutex);
888 return -1;
710} 889}
890EXPORT_SYMBOL_GPL(ring_buffer_resize);
711 891
712static inline void * 892static inline void *
713__rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 893__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
@@ -811,7 +991,7 @@ rb_event_index(struct ring_buffer_event *event)
811 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); 991 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
812} 992}
813 993
814static inline int 994static int
815rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 995rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
816 struct ring_buffer_event *event) 996 struct ring_buffer_event *event)
817{ 997{
@@ -825,7 +1005,7 @@ rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
825 rb_commit_index(cpu_buffer) == index; 1005 rb_commit_index(cpu_buffer) == index;
826} 1006}
827 1007
828static inline void 1008static void
829rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, 1009rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
830 struct ring_buffer_event *event) 1010 struct ring_buffer_event *event)
831{ 1011{
@@ -850,7 +1030,7 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
850 local_set(&cpu_buffer->commit_page->page->commit, index); 1030 local_set(&cpu_buffer->commit_page->page->commit, index);
851} 1031}
852 1032
853static inline void 1033static void
854rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 1034rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
855{ 1035{
856 /* 1036 /*
@@ -896,7 +1076,7 @@ static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
896 cpu_buffer->reader_page->read = 0; 1076 cpu_buffer->reader_page->read = 0;
897} 1077}
898 1078
899static inline void rb_inc_iter(struct ring_buffer_iter *iter) 1079static void rb_inc_iter(struct ring_buffer_iter *iter)
900{ 1080{
901 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1081 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
902 1082
@@ -926,7 +1106,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter)
926 * and with this, we can determine what to place into the 1106 * and with this, we can determine what to place into the
927 * data field. 1107 * data field.
928 */ 1108 */
929static inline void 1109static void
930rb_update_event(struct ring_buffer_event *event, 1110rb_update_event(struct ring_buffer_event *event,
931 unsigned type, unsigned length) 1111 unsigned type, unsigned length)
932{ 1112{
@@ -938,15 +1118,11 @@ rb_update_event(struct ring_buffer_event *event,
938 break; 1118 break;
939 1119
940 case RINGBUF_TYPE_TIME_EXTEND: 1120 case RINGBUF_TYPE_TIME_EXTEND:
941 event->len = 1121 event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
942 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
943 >> RB_ALIGNMENT_SHIFT;
944 break; 1122 break;
945 1123
946 case RINGBUF_TYPE_TIME_STAMP: 1124 case RINGBUF_TYPE_TIME_STAMP:
947 event->len = 1125 event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
948 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
949 >> RB_ALIGNMENT_SHIFT;
950 break; 1126 break;
951 1127
952 case RINGBUF_TYPE_DATA: 1128 case RINGBUF_TYPE_DATA:
@@ -955,16 +1131,14 @@ rb_update_event(struct ring_buffer_event *event,
955 event->len = 0; 1131 event->len = 0;
956 event->array[0] = length; 1132 event->array[0] = length;
957 } else 1133 } else
958 event->len = 1134 event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
959 (length + (RB_ALIGNMENT-1))
960 >> RB_ALIGNMENT_SHIFT;
961 break; 1135 break;
962 default: 1136 default:
963 BUG(); 1137 BUG();
964 } 1138 }
965} 1139}
966 1140
967static inline unsigned rb_calculate_event_length(unsigned length) 1141static unsigned rb_calculate_event_length(unsigned length)
968{ 1142{
969 struct ring_buffer_event event; /* Used only for sizeof array */ 1143 struct ring_buffer_event event; /* Used only for sizeof array */
970 1144
@@ -990,6 +1164,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
990 struct ring_buffer *buffer = cpu_buffer->buffer; 1164 struct ring_buffer *buffer = cpu_buffer->buffer;
991 struct ring_buffer_event *event; 1165 struct ring_buffer_event *event;
992 unsigned long flags; 1166 unsigned long flags;
1167 bool lock_taken = false;
993 1168
994 commit_page = cpu_buffer->commit_page; 1169 commit_page = cpu_buffer->commit_page;
995 /* we just need to protect against interrupts */ 1170 /* we just need to protect against interrupts */
@@ -1003,7 +1178,30 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1003 struct buffer_page *next_page = tail_page; 1178 struct buffer_page *next_page = tail_page;
1004 1179
1005 local_irq_save(flags); 1180 local_irq_save(flags);
1006 __raw_spin_lock(&cpu_buffer->lock); 1181 /*
1182 * Since the write to the buffer is still not
1183 * fully lockless, we must be careful with NMIs.
1184 * The locks in the writers are taken when a write
1185 * crosses to a new page. The locks protect against
1186 * races with the readers (this will soon be fixed
1187 * with a lockless solution).
1188 *
1189 * Because we can not protect against NMIs, and we
1190 * want to keep traces reentrant, we need to manage
1191 * what happens when we are in an NMI.
1192 *
1193 * NMIs can happen after we take the lock.
1194 * If we are in an NMI, only take the lock
1195 * if it is not already taken. Otherwise
1196 * simply fail.
1197 */
1198 if (unlikely(in_nmi())) {
1199 if (!__raw_spin_trylock(&cpu_buffer->lock))
1200 goto out_reset;
1201 } else
1202 __raw_spin_lock(&cpu_buffer->lock);
1203
1204 lock_taken = true;
1007 1205
1008 rb_inc_page(cpu_buffer, &next_page); 1206 rb_inc_page(cpu_buffer, &next_page);
1009 1207
@@ -1012,7 +1210,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1012 1210
1013 /* we grabbed the lock before incrementing */ 1211 /* we grabbed the lock before incrementing */
1014 if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) 1212 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1015 goto out_unlock; 1213 goto out_reset;
1016 1214
1017 /* 1215 /*
1018 * If for some reason, we had an interrupt storm that made 1216 * If for some reason, we had an interrupt storm that made
@@ -1021,12 +1219,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1021 */ 1219 */
1022 if (unlikely(next_page == commit_page)) { 1220 if (unlikely(next_page == commit_page)) {
1023 WARN_ON_ONCE(1); 1221 WARN_ON_ONCE(1);
1024 goto out_unlock; 1222 goto out_reset;
1025 } 1223 }
1026 1224
1027 if (next_page == head_page) { 1225 if (next_page == head_page) {
1028 if (!(buffer->flags & RB_FL_OVERWRITE)) 1226 if (!(buffer->flags & RB_FL_OVERWRITE))
1029 goto out_unlock; 1227 goto out_reset;
1030 1228
1031 /* tail_page has not moved yet? */ 1229 /* tail_page has not moved yet? */
1032 if (tail_page == cpu_buffer->tail_page) { 1230 if (tail_page == cpu_buffer->tail_page) {
@@ -1050,7 +1248,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1050 cpu_buffer->tail_page = next_page; 1248 cpu_buffer->tail_page = next_page;
1051 1249
1052 /* reread the time stamp */ 1250 /* reread the time stamp */
1053 *ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1251 *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
1054 cpu_buffer->tail_page->page->time_stamp = *ts; 1252 cpu_buffer->tail_page->page->time_stamp = *ts;
1055 } 1253 }
1056 1254
@@ -1060,7 +1258,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1060 if (tail < BUF_PAGE_SIZE) { 1258 if (tail < BUF_PAGE_SIZE) {
1061 /* Mark the rest of the page with padding */ 1259 /* Mark the rest of the page with padding */
1062 event = __rb_page_index(tail_page, tail); 1260 event = __rb_page_index(tail_page, tail);
1063 event->type = RINGBUF_TYPE_PADDING; 1261 rb_event_set_padding(event);
1064 } 1262 }
1065 1263
1066 if (tail <= BUF_PAGE_SIZE) 1264 if (tail <= BUF_PAGE_SIZE)
@@ -1100,12 +1298,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1100 1298
1101 return event; 1299 return event;
1102 1300
1103 out_unlock: 1301 out_reset:
1104 /* reset write */ 1302 /* reset write */
1105 if (tail <= BUF_PAGE_SIZE) 1303 if (tail <= BUF_PAGE_SIZE)
1106 local_set(&tail_page->write, tail); 1304 local_set(&tail_page->write, tail);
1107 1305
1108 __raw_spin_unlock(&cpu_buffer->lock); 1306 if (likely(lock_taken))
1307 __raw_spin_unlock(&cpu_buffer->lock);
1109 local_irq_restore(flags); 1308 local_irq_restore(flags);
1110 return NULL; 1309 return NULL;
1111} 1310}
@@ -1192,7 +1391,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1192 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 1391 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1193 return NULL; 1392 return NULL;
1194 1393
1195 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1394 ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1196 1395
1197 /* 1396 /*
1198 * Only the first commit can update the timestamp. 1397 * Only the first commit can update the timestamp.
@@ -1265,7 +1464,6 @@ static DEFINE_PER_CPU(int, rb_need_resched);
1265 * ring_buffer_lock_reserve - reserve a part of the buffer 1464 * ring_buffer_lock_reserve - reserve a part of the buffer
1266 * @buffer: the ring buffer to reserve from 1465 * @buffer: the ring buffer to reserve from
1267 * @length: the length of the data to reserve (excluding event header) 1466 * @length: the length of the data to reserve (excluding event header)
1268 * @flags: a pointer to save the interrupt flags
1269 * 1467 *
1270 * Returns a reseverd event on the ring buffer to copy directly to. 1468 * Returns a reseverd event on the ring buffer to copy directly to.
1271 * The user of this interface will need to get the body to write into 1469 * The user of this interface will need to get the body to write into
@@ -1278,9 +1476,7 @@ static DEFINE_PER_CPU(int, rb_need_resched);
1278 * If NULL is returned, then nothing has been allocated or locked. 1476 * If NULL is returned, then nothing has been allocated or locked.
1279 */ 1477 */
1280struct ring_buffer_event * 1478struct ring_buffer_event *
1281ring_buffer_lock_reserve(struct ring_buffer *buffer, 1479ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1282 unsigned long length,
1283 unsigned long *flags)
1284{ 1480{
1285 struct ring_buffer_per_cpu *cpu_buffer; 1481 struct ring_buffer_per_cpu *cpu_buffer;
1286 struct ring_buffer_event *event; 1482 struct ring_buffer_event *event;
@@ -1347,15 +1543,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1347 * ring_buffer_unlock_commit - commit a reserved 1543 * ring_buffer_unlock_commit - commit a reserved
1348 * @buffer: The buffer to commit to 1544 * @buffer: The buffer to commit to
1349 * @event: The event pointer to commit. 1545 * @event: The event pointer to commit.
1350 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1351 * 1546 *
1352 * This commits the data to the ring buffer, and releases any locks held. 1547 * This commits the data to the ring buffer, and releases any locks held.
1353 * 1548 *
1354 * Must be paired with ring_buffer_lock_reserve. 1549 * Must be paired with ring_buffer_lock_reserve.
1355 */ 1550 */
1356int ring_buffer_unlock_commit(struct ring_buffer *buffer, 1551int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1357 struct ring_buffer_event *event, 1552 struct ring_buffer_event *event)
1358 unsigned long flags)
1359{ 1553{
1360 struct ring_buffer_per_cpu *cpu_buffer; 1554 struct ring_buffer_per_cpu *cpu_buffer;
1361 int cpu = raw_smp_processor_id(); 1555 int cpu = raw_smp_processor_id();
@@ -1438,7 +1632,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1438} 1632}
1439EXPORT_SYMBOL_GPL(ring_buffer_write); 1633EXPORT_SYMBOL_GPL(ring_buffer_write);
1440 1634
1441static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1635static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1442{ 1636{
1443 struct buffer_page *reader = cpu_buffer->reader_page; 1637 struct buffer_page *reader = cpu_buffer->reader_page;
1444 struct buffer_page *head = cpu_buffer->head_page; 1638 struct buffer_page *head = cpu_buffer->head_page;
@@ -1528,12 +1722,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1528unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) 1722unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1529{ 1723{
1530 struct ring_buffer_per_cpu *cpu_buffer; 1724 struct ring_buffer_per_cpu *cpu_buffer;
1725 unsigned long ret;
1531 1726
1532 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1727 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1533 return 0; 1728 return 0;
1534 1729
1535 cpu_buffer = buffer->buffers[cpu]; 1730 cpu_buffer = buffer->buffers[cpu];
1536 return cpu_buffer->entries; 1731 ret = cpu_buffer->entries;
1732
1733 return ret;
1537} 1734}
1538EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 1735EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1539 1736
@@ -1545,12 +1742,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1545unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) 1742unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1546{ 1743{
1547 struct ring_buffer_per_cpu *cpu_buffer; 1744 struct ring_buffer_per_cpu *cpu_buffer;
1745 unsigned long ret;
1548 1746
1549 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1747 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1550 return 0; 1748 return 0;
1551 1749
1552 cpu_buffer = buffer->buffers[cpu]; 1750 cpu_buffer = buffer->buffers[cpu];
1553 return cpu_buffer->overrun; 1751 ret = cpu_buffer->overrun;
1752
1753 return ret;
1554} 1754}
1555EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 1755EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1556 1756
@@ -1627,9 +1827,14 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
1627 */ 1827 */
1628void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 1828void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1629{ 1829{
1630 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1830 struct ring_buffer_per_cpu *cpu_buffer;
1631 unsigned long flags; 1831 unsigned long flags;
1632 1832
1833 if (!iter)
1834 return;
1835
1836 cpu_buffer = iter->cpu_buffer;
1837
1633 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1838 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1634 rb_iter_reset(iter); 1839 rb_iter_reset(iter);
1635 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1840 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
@@ -1803,7 +2008,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1803 2008
1804 event = rb_reader_event(cpu_buffer); 2009 event = rb_reader_event(cpu_buffer);
1805 2010
1806 if (event->type == RINGBUF_TYPE_DATA) 2011 if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
1807 cpu_buffer->entries--; 2012 cpu_buffer->entries--;
1808 2013
1809 rb_update_read_stamp(cpu_buffer, event); 2014 rb_update_read_stamp(cpu_buffer, event);
@@ -1864,9 +2069,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1864 struct buffer_page *reader; 2069 struct buffer_page *reader;
1865 int nr_loops = 0; 2070 int nr_loops = 0;
1866 2071
1867 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1868 return NULL;
1869
1870 cpu_buffer = buffer->buffers[cpu]; 2072 cpu_buffer = buffer->buffers[cpu];
1871 2073
1872 again: 2074 again:
@@ -1889,9 +2091,18 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1889 2091
1890 switch (event->type) { 2092 switch (event->type) {
1891 case RINGBUF_TYPE_PADDING: 2093 case RINGBUF_TYPE_PADDING:
1892 RB_WARN_ON(cpu_buffer, 1); 2094 if (rb_null_event(event))
2095 RB_WARN_ON(cpu_buffer, 1);
2096 /*
2097 * Because the writer could be discarding every
2098 * event it creates (which would probably be bad)
2099 * if we were to go back to "again" then we may never
2100 * catch up, and will trigger the warn on, or lock
2101 * the box. Return the padding, and we will release
2102 * the current locks, and try again.
2103 */
1893 rb_advance_reader(cpu_buffer); 2104 rb_advance_reader(cpu_buffer);
1894 return NULL; 2105 return event;
1895 2106
1896 case RINGBUF_TYPE_TIME_EXTEND: 2107 case RINGBUF_TYPE_TIME_EXTEND:
1897 /* Internal data, OK to advance */ 2108 /* Internal data, OK to advance */
@@ -1906,7 +2117,8 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1906 case RINGBUF_TYPE_DATA: 2117 case RINGBUF_TYPE_DATA:
1907 if (ts) { 2118 if (ts) {
1908 *ts = cpu_buffer->read_stamp + event->time_delta; 2119 *ts = cpu_buffer->read_stamp + event->time_delta;
1909 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); 2120 ring_buffer_normalize_time_stamp(buffer,
2121 cpu_buffer->cpu, ts);
1910 } 2122 }
1911 return event; 2123 return event;
1912 2124
@@ -1951,8 +2163,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1951 2163
1952 switch (event->type) { 2164 switch (event->type) {
1953 case RINGBUF_TYPE_PADDING: 2165 case RINGBUF_TYPE_PADDING:
1954 rb_inc_iter(iter); 2166 if (rb_null_event(event)) {
1955 goto again; 2167 rb_inc_iter(iter);
2168 goto again;
2169 }
2170 rb_advance_iter(iter);
2171 return event;
1956 2172
1957 case RINGBUF_TYPE_TIME_EXTEND: 2173 case RINGBUF_TYPE_TIME_EXTEND:
1958 /* Internal data, OK to advance */ 2174 /* Internal data, OK to advance */
@@ -1967,7 +2183,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1967 case RINGBUF_TYPE_DATA: 2183 case RINGBUF_TYPE_DATA:
1968 if (ts) { 2184 if (ts) {
1969 *ts = iter->read_stamp + event->time_delta; 2185 *ts = iter->read_stamp + event->time_delta;
1970 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); 2186 ring_buffer_normalize_time_stamp(buffer,
2187 cpu_buffer->cpu, ts);
1971 } 2188 }
1972 return event; 2189 return event;
1973 2190
@@ -1995,10 +2212,19 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1995 struct ring_buffer_event *event; 2212 struct ring_buffer_event *event;
1996 unsigned long flags; 2213 unsigned long flags;
1997 2214
2215 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2216 return NULL;
2217
2218 again:
1998 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2219 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1999 event = rb_buffer_peek(buffer, cpu, ts); 2220 event = rb_buffer_peek(buffer, cpu, ts);
2000 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2221 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2001 2222
2223 if (event && event->type == RINGBUF_TYPE_PADDING) {
2224 cpu_relax();
2225 goto again;
2226 }
2227
2002 return event; 2228 return event;
2003} 2229}
2004 2230
@@ -2017,10 +2243,16 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2017 struct ring_buffer_event *event; 2243 struct ring_buffer_event *event;
2018 unsigned long flags; 2244 unsigned long flags;
2019 2245
2246 again:
2020 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2247 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2021 event = rb_iter_peek(iter, ts); 2248 event = rb_iter_peek(iter, ts);
2022 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2249 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2023 2250
2251 if (event && event->type == RINGBUF_TYPE_PADDING) {
2252 cpu_relax();
2253 goto again;
2254 }
2255
2024 return event; 2256 return event;
2025} 2257}
2026 2258
@@ -2035,24 +2267,37 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2035struct ring_buffer_event * 2267struct ring_buffer_event *
2036ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 2268ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2037{ 2269{
2038 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2270 struct ring_buffer_per_cpu *cpu_buffer;
2039 struct ring_buffer_event *event; 2271 struct ring_buffer_event *event = NULL;
2040 unsigned long flags; 2272 unsigned long flags;
2041 2273
2274 again:
2275 /* might be called in atomic */
2276 preempt_disable();
2277
2042 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2278 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2043 return NULL; 2279 goto out;
2044 2280
2281 cpu_buffer = buffer->buffers[cpu];
2045 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2282 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2046 2283
2047 event = rb_buffer_peek(buffer, cpu, ts); 2284 event = rb_buffer_peek(buffer, cpu, ts);
2048 if (!event) 2285 if (!event)
2049 goto out; 2286 goto out_unlock;
2050 2287
2051 rb_advance_reader(cpu_buffer); 2288 rb_advance_reader(cpu_buffer);
2052 2289
2053 out: 2290 out_unlock:
2054 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2291 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2055 2292
2293 out:
2294 preempt_enable();
2295
2296 if (event && event->type == RINGBUF_TYPE_PADDING) {
2297 cpu_relax();
2298 goto again;
2299 }
2300
2056 return event; 2301 return event;
2057} 2302}
2058EXPORT_SYMBOL_GPL(ring_buffer_consume); 2303EXPORT_SYMBOL_GPL(ring_buffer_consume);
@@ -2131,6 +2376,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2131 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2376 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2132 unsigned long flags; 2377 unsigned long flags;
2133 2378
2379 again:
2134 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2380 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2135 event = rb_iter_peek(iter, ts); 2381 event = rb_iter_peek(iter, ts);
2136 if (!event) 2382 if (!event)
@@ -2140,6 +2386,11 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2140 out: 2386 out:
2141 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2387 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2142 2388
2389 if (event && event->type == RINGBUF_TYPE_PADDING) {
2390 cpu_relax();
2391 goto again;
2392 }
2393
2143 return event; 2394 return event;
2144} 2395}
2145EXPORT_SYMBOL_GPL(ring_buffer_read); 2396EXPORT_SYMBOL_GPL(ring_buffer_read);
@@ -2232,6 +2483,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2232 if (!rb_per_cpu_empty(cpu_buffer)) 2483 if (!rb_per_cpu_empty(cpu_buffer))
2233 return 0; 2484 return 0;
2234 } 2485 }
2486
2235 return 1; 2487 return 1;
2236} 2488}
2237EXPORT_SYMBOL_GPL(ring_buffer_empty); 2489EXPORT_SYMBOL_GPL(ring_buffer_empty);
@@ -2244,12 +2496,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
2244int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 2496int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2245{ 2497{
2246 struct ring_buffer_per_cpu *cpu_buffer; 2498 struct ring_buffer_per_cpu *cpu_buffer;
2499 int ret;
2247 2500
2248 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2501 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2249 return 1; 2502 return 1;
2250 2503
2251 cpu_buffer = buffer->buffers[cpu]; 2504 cpu_buffer = buffer->buffers[cpu];
2252 return rb_per_cpu_empty(cpu_buffer); 2505 ret = rb_per_cpu_empty(cpu_buffer);
2506
2507
2508 return ret;
2253} 2509}
2254EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 2510EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2255 2511
@@ -2268,18 +2524,36 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2268{ 2524{
2269 struct ring_buffer_per_cpu *cpu_buffer_a; 2525 struct ring_buffer_per_cpu *cpu_buffer_a;
2270 struct ring_buffer_per_cpu *cpu_buffer_b; 2526 struct ring_buffer_per_cpu *cpu_buffer_b;
2527 int ret = -EINVAL;
2271 2528
2272 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 2529 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2273 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 2530 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2274 return -EINVAL; 2531 goto out;
2275 2532
2276 /* At least make sure the two buffers are somewhat the same */ 2533 /* At least make sure the two buffers are somewhat the same */
2277 if (buffer_a->pages != buffer_b->pages) 2534 if (buffer_a->pages != buffer_b->pages)
2278 return -EINVAL; 2535 goto out;
2536
2537 ret = -EAGAIN;
2538
2539 if (ring_buffer_flags != RB_BUFFERS_ON)
2540 goto out;
2541
2542 if (atomic_read(&buffer_a->record_disabled))
2543 goto out;
2544
2545 if (atomic_read(&buffer_b->record_disabled))
2546 goto out;
2279 2547
2280 cpu_buffer_a = buffer_a->buffers[cpu]; 2548 cpu_buffer_a = buffer_a->buffers[cpu];
2281 cpu_buffer_b = buffer_b->buffers[cpu]; 2549 cpu_buffer_b = buffer_b->buffers[cpu];
2282 2550
2551 if (atomic_read(&cpu_buffer_a->record_disabled))
2552 goto out;
2553
2554 if (atomic_read(&cpu_buffer_b->record_disabled))
2555 goto out;
2556
2283 /* 2557 /*
2284 * We can't do a synchronize_sched here because this 2558 * We can't do a synchronize_sched here because this
2285 * function can be called in atomic context. 2559 * function can be called in atomic context.
@@ -2298,18 +2572,21 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2298 atomic_dec(&cpu_buffer_a->record_disabled); 2572 atomic_dec(&cpu_buffer_a->record_disabled);
2299 atomic_dec(&cpu_buffer_b->record_disabled); 2573 atomic_dec(&cpu_buffer_b->record_disabled);
2300 2574
2301 return 0; 2575 ret = 0;
2576out:
2577 return ret;
2302} 2578}
2303EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 2579EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2304 2580
2305static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2581static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2306 struct buffer_data_page *bpage) 2582 struct buffer_data_page *bpage,
2583 unsigned int offset)
2307{ 2584{
2308 struct ring_buffer_event *event; 2585 struct ring_buffer_event *event;
2309 unsigned long head; 2586 unsigned long head;
2310 2587
2311 __raw_spin_lock(&cpu_buffer->lock); 2588 __raw_spin_lock(&cpu_buffer->lock);
2312 for (head = 0; head < local_read(&bpage->commit); 2589 for (head = offset; head < local_read(&bpage->commit);
2313 head += rb_event_length(event)) { 2590 head += rb_event_length(event)) {
2314 2591
2315 event = __rb_data_page_index(bpage, head); 2592 event = __rb_data_page_index(bpage, head);
@@ -2340,8 +2617,8 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2340 */ 2617 */
2341void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) 2618void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2342{ 2619{
2343 unsigned long addr;
2344 struct buffer_data_page *bpage; 2620 struct buffer_data_page *bpage;
2621 unsigned long addr;
2345 2622
2346 addr = __get_free_page(GFP_KERNEL); 2623 addr = __get_free_page(GFP_KERNEL);
2347 if (!addr) 2624 if (!addr)
@@ -2349,6 +2626,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2349 2626
2350 bpage = (void *)addr; 2627 bpage = (void *)addr;
2351 2628
2629 rb_init_page(bpage);
2630
2352 return bpage; 2631 return bpage;
2353} 2632}
2354 2633
@@ -2368,6 +2647,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2368 * ring_buffer_read_page - extract a page from the ring buffer 2647 * ring_buffer_read_page - extract a page from the ring buffer
2369 * @buffer: buffer to extract from 2648 * @buffer: buffer to extract from
2370 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 2649 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2650 * @len: amount to extract
2371 * @cpu: the cpu of the buffer to extract 2651 * @cpu: the cpu of the buffer to extract
2372 * @full: should the extraction only happen when the page is full. 2652 * @full: should the extraction only happen when the page is full.
2373 * 2653 *
@@ -2377,12 +2657,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2377 * to swap with a page in the ring buffer. 2657 * to swap with a page in the ring buffer.
2378 * 2658 *
2379 * for example: 2659 * for example:
2380 * rpage = ring_buffer_alloc_page(buffer); 2660 * rpage = ring_buffer_alloc_read_page(buffer);
2381 * if (!rpage) 2661 * if (!rpage)
2382 * return error; 2662 * return error;
2383 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); 2663 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2384 * if (ret) 2664 * if (ret >= 0)
2385 * process_page(rpage); 2665 * process_page(rpage, ret);
2386 * 2666 *
2387 * When @full is set, the function will not return true unless 2667 * When @full is set, the function will not return true unless
2388 * the writer is off the reader page. 2668 * the writer is off the reader page.
@@ -2393,72 +2673,118 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2393 * responsible for that. 2673 * responsible for that.
2394 * 2674 *
2395 * Returns: 2675 * Returns:
2396 * 1 if data has been transferred 2676 * >=0 if data has been transferred, returns the offset of consumed data.
2397 * 0 if no data has been transferred. 2677 * <0 if no data has been transferred.
2398 */ 2678 */
2399int ring_buffer_read_page(struct ring_buffer *buffer, 2679int ring_buffer_read_page(struct ring_buffer *buffer,
2400 void **data_page, int cpu, int full) 2680 void **data_page, size_t len, int cpu, int full)
2401{ 2681{
2402 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2682 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2403 struct ring_buffer_event *event; 2683 struct ring_buffer_event *event;
2404 struct buffer_data_page *bpage; 2684 struct buffer_data_page *bpage;
2685 struct buffer_page *reader;
2405 unsigned long flags; 2686 unsigned long flags;
2406 int ret = 0; 2687 unsigned int commit;
2688 unsigned int read;
2689 u64 save_timestamp;
2690 int ret = -1;
2691
2692 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2693 goto out;
2694
2695 /*
2696 * If len is not big enough to hold the page header, then
2697 * we can not copy anything.
2698 */
2699 if (len <= BUF_PAGE_HDR_SIZE)
2700 goto out;
2701
2702 len -= BUF_PAGE_HDR_SIZE;
2407 2703
2408 if (!data_page) 2704 if (!data_page)
2409 return 0; 2705 goto out;
2410 2706
2411 bpage = *data_page; 2707 bpage = *data_page;
2412 if (!bpage) 2708 if (!bpage)
2413 return 0; 2709 goto out;
2414 2710
2415 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2711 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2416 2712
2417 /* 2713 reader = rb_get_reader_page(cpu_buffer);
2418 * rb_buffer_peek will get the next ring buffer if 2714 if (!reader)
2419 * the current reader page is empty. 2715 goto out_unlock;
2420 */ 2716
2421 event = rb_buffer_peek(buffer, cpu, NULL); 2717 event = rb_reader_event(cpu_buffer);
2422 if (!event) 2718
2423 goto out; 2719 read = reader->read;
2720 commit = rb_page_commit(reader);
2424 2721
2425 /* check for data */
2426 if (!local_read(&cpu_buffer->reader_page->page->commit))
2427 goto out;
2428 /* 2722 /*
2429 * If the writer is already off of the read page, then simply 2723 * If this page has been partially read or
2430 * switch the read page with the given page. Otherwise 2724 * if len is not big enough to read the rest of the page or
2431 * we need to copy the data from the reader to the writer. 2725 * a writer is still on the page, then
2726 * we must copy the data from the page to the buffer.
2727 * Otherwise, we can simply swap the page with the one passed in.
2432 */ 2728 */
2433 if (cpu_buffer->reader_page == cpu_buffer->commit_page) { 2729 if (read || (len < (commit - read)) ||
2434 unsigned int read = cpu_buffer->reader_page->read; 2730 cpu_buffer->reader_page == cpu_buffer->commit_page) {
2731 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2732 unsigned int rpos = read;
2733 unsigned int pos = 0;
2734 unsigned int size;
2435 2735
2436 if (full) 2736 if (full)
2437 goto out; 2737 goto out_unlock;
2438 /* The writer is still on the reader page, we must copy */ 2738
2439 bpage = cpu_buffer->reader_page->page; 2739 if (len > (commit - read))
2440 memcpy(bpage->data, 2740 len = (commit - read);
2441 cpu_buffer->reader_page->page->data + read, 2741
2442 local_read(&bpage->commit) - read); 2742 size = rb_event_length(event);
2743
2744 if (len < size)
2745 goto out_unlock;
2443 2746
2444 /* consume what was read */ 2747 /* save the current timestamp, since the user will need it */
2445 cpu_buffer->reader_page += read; 2748 save_timestamp = cpu_buffer->read_stamp;
2446 2749
2750 /* Need to copy one event at a time */
2751 do {
2752 memcpy(bpage->data + pos, rpage->data + rpos, size);
2753
2754 len -= size;
2755
2756 rb_advance_reader(cpu_buffer);
2757 rpos = reader->read;
2758 pos += size;
2759
2760 event = rb_reader_event(cpu_buffer);
2761 size = rb_event_length(event);
2762 } while (len > size);
2763
2764 /* update bpage */
2765 local_set(&bpage->commit, pos);
2766 bpage->time_stamp = save_timestamp;
2767
2768 /* we copied everything to the beginning */
2769 read = 0;
2447 } else { 2770 } else {
2448 /* swap the pages */ 2771 /* swap the pages */
2449 rb_init_page(bpage); 2772 rb_init_page(bpage);
2450 bpage = cpu_buffer->reader_page->page; 2773 bpage = reader->page;
2451 cpu_buffer->reader_page->page = *data_page; 2774 reader->page = *data_page;
2452 cpu_buffer->reader_page->read = 0; 2775 local_set(&reader->write, 0);
2776 reader->read = 0;
2453 *data_page = bpage; 2777 *data_page = bpage;
2778
2779 /* update the entry counter */
2780 rb_remove_entries(cpu_buffer, bpage, read);
2454 } 2781 }
2455 ret = 1; 2782 ret = read;
2456 2783
2457 /* update the entry counter */ 2784 out_unlock:
2458 rb_remove_entries(cpu_buffer, bpage);
2459 out:
2460 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2785 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2461 2786
2787 out:
2462 return ret; 2788 return ret;
2463} 2789}
2464 2790
@@ -2466,7 +2792,7 @@ static ssize_t
2466rb_simple_read(struct file *filp, char __user *ubuf, 2792rb_simple_read(struct file *filp, char __user *ubuf,
2467 size_t cnt, loff_t *ppos) 2793 size_t cnt, loff_t *ppos)
2468{ 2794{
2469 long *p = filp->private_data; 2795 unsigned long *p = filp->private_data;
2470 char buf[64]; 2796 char buf[64];
2471 int r; 2797 int r;
2472 2798
@@ -2482,9 +2808,9 @@ static ssize_t
2482rb_simple_write(struct file *filp, const char __user *ubuf, 2808rb_simple_write(struct file *filp, const char __user *ubuf,
2483 size_t cnt, loff_t *ppos) 2809 size_t cnt, loff_t *ppos)
2484{ 2810{
2485 long *p = filp->private_data; 2811 unsigned long *p = filp->private_data;
2486 char buf[64]; 2812 char buf[64];
2487 long val; 2813 unsigned long val;
2488 int ret; 2814 int ret;
2489 2815
2490 if (cnt >= sizeof(buf)) 2816 if (cnt >= sizeof(buf))
@@ -2509,7 +2835,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2509 return cnt; 2835 return cnt;
2510} 2836}
2511 2837
2512static struct file_operations rb_simple_fops = { 2838static const struct file_operations rb_simple_fops = {
2513 .open = tracing_open_generic, 2839 .open = tracing_open_generic,
2514 .read = rb_simple_read, 2840 .read = rb_simple_read,
2515 .write = rb_simple_write, 2841 .write = rb_simple_write,
@@ -2532,3 +2858,42 @@ static __init int rb_init_debugfs(void)
2532} 2858}
2533 2859
2534fs_initcall(rb_init_debugfs); 2860fs_initcall(rb_init_debugfs);
2861
2862#ifdef CONFIG_HOTPLUG_CPU
2863static int rb_cpu_notify(struct notifier_block *self,
2864 unsigned long action, void *hcpu)
2865{
2866 struct ring_buffer *buffer =
2867 container_of(self, struct ring_buffer, cpu_notify);
2868 long cpu = (long)hcpu;
2869
2870 switch (action) {
2871 case CPU_UP_PREPARE:
2872 case CPU_UP_PREPARE_FROZEN:
2873 if (cpu_isset(cpu, *buffer->cpumask))
2874 return NOTIFY_OK;
2875
2876 buffer->buffers[cpu] =
2877 rb_allocate_cpu_buffer(buffer, cpu);
2878 if (!buffer->buffers[cpu]) {
2879 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
2880 cpu);
2881 return NOTIFY_OK;
2882 }
2883 smp_wmb();
2884 cpu_set(cpu, *buffer->cpumask);
2885 break;
2886 case CPU_DOWN_PREPARE:
2887 case CPU_DOWN_PREPARE_FROZEN:
2888 /*
2889 * Do nothing.
2890 * If we were to free the buffer, then the user would
2891 * lose any trace that was in the buffer.
2892 */
2893 break;
2894 default:
2895 break;
2896 }
2897 return NOTIFY_OK;
2898}
2899#endif
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 17bb88d86ac2..a0174a40c563 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -11,32 +11,33 @@
11 * Copyright (C) 2004-2006 Ingo Molnar 11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III 12 * Copyright (C) 2004 William Lee Irwin III
13 */ 13 */
14#include <linux/ring_buffer.h>
14#include <linux/utsrelease.h> 15#include <linux/utsrelease.h>
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
15#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
16#include <linux/seq_file.h> 19#include <linux/seq_file.h>
17#include <linux/notifier.h> 20#include <linux/notifier.h>
21#include <linux/irqflags.h>
18#include <linux/debugfs.h> 22#include <linux/debugfs.h>
19#include <linux/pagemap.h> 23#include <linux/pagemap.h>
20#include <linux/hardirq.h> 24#include <linux/hardirq.h>
21#include <linux/linkage.h> 25#include <linux/linkage.h>
22#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/kprobes.h>
23#include <linux/ftrace.h> 28#include <linux/ftrace.h>
24#include <linux/module.h> 29#include <linux/module.h>
25#include <linux/percpu.h> 30#include <linux/percpu.h>
31#include <linux/splice.h>
26#include <linux/kdebug.h> 32#include <linux/kdebug.h>
27#include <linux/ctype.h> 33#include <linux/ctype.h>
28#include <linux/init.h> 34#include <linux/init.h>
29#include <linux/poll.h> 35#include <linux/poll.h>
30#include <linux/gfp.h> 36#include <linux/gfp.h>
31#include <linux/fs.h> 37#include <linux/fs.h>
32#include <linux/kprobes.h>
33#include <linux/writeback.h>
34
35#include <linux/stacktrace.h>
36#include <linux/ring_buffer.h>
37#include <linux/irqflags.h>
38 38
39#include "trace.h" 39#include "trace.h"
40#include "trace_output.h"
40 41
41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 42#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
42 43
@@ -44,14 +45,25 @@ unsigned long __read_mostly tracing_max_latency;
44unsigned long __read_mostly tracing_thresh; 45unsigned long __read_mostly tracing_thresh;
45 46
46/* 47/*
48 * On boot up, the ring buffer is set to the minimum size, so that
49 * we do not waste memory on systems that are not using tracing.
50 */
51static int ring_buffer_expanded;
52
53/*
47 * We need to change this state when a selftest is running. 54 * We need to change this state when a selftest is running.
48 * A selftest will lurk into the ring-buffer to count the 55 * A selftest will lurk into the ring-buffer to count the
49 * entries inserted during the selftest although some concurrent 56 * entries inserted during the selftest although some concurrent
50 * insertions into the ring-buffer such as ftrace_printk could occurred 57 * insertions into the ring-buffer such as trace_printk could occurred
51 * at the same time, giving false positive or negative results. 58 * at the same time, giving false positive or negative results.
52 */ 59 */
53static bool __read_mostly tracing_selftest_running; 60static bool __read_mostly tracing_selftest_running;
54 61
62/*
63 * If a tracer is running, we do not want to run SELFTEST.
64 */
65static bool __read_mostly tracing_selftest_disabled;
66
55/* For tracers that don't implement custom flags */ 67/* For tracers that don't implement custom flags */
56static struct tracer_opt dummy_tracer_opt[] = { 68static struct tracer_opt dummy_tracer_opt[] = {
57 { } 69 { }
@@ -73,7 +85,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
73 * of the tracer is successful. But that is the only place that sets 85 * of the tracer is successful. But that is the only place that sets
74 * this back to zero. 86 * this back to zero.
75 */ 87 */
76int tracing_disabled = 1; 88static int tracing_disabled = 1;
77 89
78static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 90static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
79 91
@@ -91,6 +103,9 @@ static inline void ftrace_enable_cpu(void)
91 103
92static cpumask_var_t __read_mostly tracing_buffer_mask; 104static cpumask_var_t __read_mostly tracing_buffer_mask;
93 105
106/* Define which cpu buffers are currently read in trace_pipe */
107static cpumask_var_t tracing_reader_cpumask;
108
94#define for_each_tracing_cpu(cpu) \ 109#define for_each_tracing_cpu(cpu) \
95 for_each_cpu(cpu, tracing_buffer_mask) 110 for_each_cpu(cpu, tracing_buffer_mask)
96 111
@@ -109,14 +124,21 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
109 */ 124 */
110int ftrace_dump_on_oops; 125int ftrace_dump_on_oops;
111 126
112static int tracing_set_tracer(char *buf); 127static int tracing_set_tracer(const char *buf);
128
129#define BOOTUP_TRACER_SIZE 100
130static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
131static char *default_bootup_tracer;
113 132
114static int __init set_ftrace(char *str) 133static int __init set_ftrace(char *str)
115{ 134{
116 tracing_set_tracer(str); 135 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
136 default_bootup_tracer = bootup_tracer_buf;
137 /* We are using ftrace early, expand it */
138 ring_buffer_expanded = 1;
117 return 1; 139 return 1;
118} 140}
119__setup("ftrace", set_ftrace); 141__setup("ftrace=", set_ftrace);
120 142
121static int __init set_ftrace_dump_on_oops(char *str) 143static int __init set_ftrace_dump_on_oops(char *str)
122{ 144{
@@ -133,13 +155,6 @@ ns2usecs(cycle_t nsec)
133 return nsec; 155 return nsec;
134} 156}
135 157
136cycle_t ftrace_now(int cpu)
137{
138 u64 ts = ring_buffer_time_stamp(cpu);
139 ring_buffer_normalize_time_stamp(cpu, &ts);
140 return ts;
141}
142
143/* 158/*
144 * The global_trace is the descriptor that holds the tracing 159 * The global_trace is the descriptor that holds the tracing
145 * buffers for the live tracing. For each CPU, it contains 160 * buffers for the live tracing. For each CPU, it contains
@@ -156,6 +171,20 @@ static struct trace_array global_trace;
156 171
157static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 172static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
158 173
174cycle_t ftrace_now(int cpu)
175{
176 u64 ts;
177
178 /* Early boot up does not have a buffer yet */
179 if (!global_trace.buffer)
180 return trace_clock_local();
181
182 ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
183 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
184
185 return ts;
186}
187
159/* 188/*
160 * The max_tr is used to snapshot the global_trace when a maximum 189 * The max_tr is used to snapshot the global_trace when a maximum
161 * latency is reached. Some tracers will use this to store a maximum 190 * latency is reached. Some tracers will use this to store a maximum
@@ -186,9 +215,6 @@ int tracing_is_enabled(void)
186 return tracer_enabled; 215 return tracer_enabled;
187} 216}
188 217
189/* function tracing enabled */
190int ftrace_function_enabled;
191
192/* 218/*
193 * trace_buf_size is the size in bytes that is allocated 219 * trace_buf_size is the size in bytes that is allocated
194 * for a buffer. Note, the number of bytes is always rounded 220 * for a buffer. Note, the number of bytes is always rounded
@@ -229,7 +255,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
229 255
230/* trace_flags holds trace_options default values */ 256/* trace_flags holds trace_options default values */
231unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 257unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
232 TRACE_ITER_ANNOTATE; 258 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME;
233 259
234/** 260/**
235 * trace_wake_up - wake up tasks waiting for trace input 261 * trace_wake_up - wake up tasks waiting for trace input
@@ -280,13 +306,17 @@ static const char *trace_options[] = {
280 "block", 306 "block",
281 "stacktrace", 307 "stacktrace",
282 "sched-tree", 308 "sched-tree",
283 "ftrace_printk", 309 "trace_printk",
284 "ftrace_preempt", 310 "ftrace_preempt",
285 "branch", 311 "branch",
286 "annotate", 312 "annotate",
287 "userstacktrace", 313 "userstacktrace",
288 "sym-userobj", 314 "sym-userobj",
289 "printk-msg-only", 315 "printk-msg-only",
316 "context-info",
317 "latency-format",
318 "global-clock",
319 "sleep-time",
290 NULL 320 NULL
291}; 321};
292 322
@@ -326,146 +356,37 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
326 data->rt_priority = tsk->rt_priority; 356 data->rt_priority = tsk->rt_priority;
327 357
328 /* record this tasks comm */ 358 /* record this tasks comm */
329 tracing_record_cmdline(current); 359 tracing_record_cmdline(tsk);
330} 360}
331 361
332/** 362ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
333 * trace_seq_printf - sequence printing of trace information
334 * @s: trace sequence descriptor
335 * @fmt: printf format string
336 *
337 * The tracer may use either sequence operations or its own
338 * copy to user routines. To simplify formating of a trace
339 * trace_seq_printf is used to store strings into a special
340 * buffer (@s). Then the output may be either used by
341 * the sequencer or pulled into another buffer.
342 */
343int
344trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
345{ 363{
346 int len = (PAGE_SIZE - 1) - s->len; 364 int len;
347 va_list ap;
348 int ret; 365 int ret;
349 366
350 if (!len) 367 if (!cnt)
351 return 0;
352
353 va_start(ap, fmt);
354 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
355 va_end(ap);
356
357 /* If we can't write it all, don't bother writing anything */
358 if (ret >= len)
359 return 0;
360
361 s->len += ret;
362
363 return len;
364}
365
366/**
367 * trace_seq_puts - trace sequence printing of simple string
368 * @s: trace sequence descriptor
369 * @str: simple string to record
370 *
371 * The tracer may use either the sequence operations or its own
372 * copy to user routines. This function records a simple string
373 * into a special buffer (@s) for later retrieval by a sequencer
374 * or other mechanism.
375 */
376static int
377trace_seq_puts(struct trace_seq *s, const char *str)
378{
379 int len = strlen(str);
380
381 if (len > ((PAGE_SIZE - 1) - s->len))
382 return 0;
383
384 memcpy(s->buffer + s->len, str, len);
385 s->len += len;
386
387 return len;
388}
389
390static int
391trace_seq_putc(struct trace_seq *s, unsigned char c)
392{
393 if (s->len >= (PAGE_SIZE - 1))
394 return 0;
395
396 s->buffer[s->len++] = c;
397
398 return 1;
399}
400
401static int
402trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
403{
404 if (len > ((PAGE_SIZE - 1) - s->len))
405 return 0; 368 return 0;
406 369
407 memcpy(s->buffer + s->len, mem, len); 370 if (s->len <= s->readpos)
408 s->len += len; 371 return -EBUSY;
409
410 return len;
411}
412
413#define MAX_MEMHEX_BYTES 8
414#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
415
416static int
417trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
418{
419 unsigned char hex[HEX_CHARS];
420 unsigned char *data = mem;
421 int i, j;
422
423#ifdef __BIG_ENDIAN
424 for (i = 0, j = 0; i < len; i++) {
425#else
426 for (i = len-1, j = 0; i >= 0; i--) {
427#endif
428 hex[j++] = hex_asc_hi(data[i]);
429 hex[j++] = hex_asc_lo(data[i]);
430 }
431 hex[j++] = ' ';
432
433 return trace_seq_putmem(s, hex, j);
434}
435
436static int
437trace_seq_path(struct trace_seq *s, struct path *path)
438{
439 unsigned char *p;
440 372
441 if (s->len >= (PAGE_SIZE - 1)) 373 len = s->len - s->readpos;
442 return 0; 374 if (cnt > len)
443 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); 375 cnt = len;
444 if (!IS_ERR(p)) { 376 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
445 p = mangle_path(s->buffer + s->len, p, "\n"); 377 if (ret == cnt)
446 if (p) { 378 return -EFAULT;
447 s->len = p - s->buffer;
448 return 1;
449 }
450 } else {
451 s->buffer[s->len++] = '?';
452 return 1;
453 }
454 379
455 return 0; 380 cnt -= ret;
456}
457 381
458static void 382 s->readpos += cnt;
459trace_seq_reset(struct trace_seq *s) 383 return cnt;
460{
461 s->len = 0;
462 s->readpos = 0;
463} 384}
464 385
465ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 386static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
466{ 387{
467 int len; 388 int len;
468 int ret; 389 void *ret;
469 390
470 if (s->len <= s->readpos) 391 if (s->len <= s->readpos)
471 return -EBUSY; 392 return -EBUSY;
@@ -473,11 +394,11 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
473 len = s->len - s->readpos; 394 len = s->len - s->readpos;
474 if (cnt > len) 395 if (cnt > len)
475 cnt = len; 396 cnt = len;
476 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 397 ret = memcpy(buf, s->buffer + s->readpos, cnt);
477 if (ret) 398 if (!ret)
478 return -EFAULT; 399 return -EFAULT;
479 400
480 s->readpos += len; 401 s->readpos += cnt;
481 return cnt; 402 return cnt;
482} 403}
483 404
@@ -489,7 +410,7 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
489 s->buffer[len] = 0; 410 s->buffer[len] = 0;
490 seq_puts(m, s->buffer); 411 seq_puts(m, s->buffer);
491 412
492 trace_seq_reset(s); 413 trace_seq_init(s);
493} 414}
494 415
495/** 416/**
@@ -543,7 +464,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
543 464
544 ftrace_enable_cpu(); 465 ftrace_enable_cpu();
545 466
546 WARN_ON_ONCE(ret); 467 WARN_ON_ONCE(ret && ret != -EAGAIN);
547 468
548 __update_max_tr(tr, tsk, cpu); 469 __update_max_tr(tr, tsk, cpu);
549 __raw_spin_unlock(&ftrace_max_lock); 470 __raw_spin_unlock(&ftrace_max_lock);
@@ -556,6 +477,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
556 * Register a new plugin tracer. 477 * Register a new plugin tracer.
557 */ 478 */
558int register_tracer(struct tracer *type) 479int register_tracer(struct tracer *type)
480__releases(kernel_lock)
481__acquires(kernel_lock)
559{ 482{
560 struct tracer *t; 483 struct tracer *t;
561 int len; 484 int len;
@@ -594,9 +517,12 @@ int register_tracer(struct tracer *type)
594 else 517 else
595 if (!type->flags->opts) 518 if (!type->flags->opts)
596 type->flags->opts = dummy_tracer_opt; 519 type->flags->opts = dummy_tracer_opt;
520 if (!type->wait_pipe)
521 type->wait_pipe = default_wait_pipe;
522
597 523
598#ifdef CONFIG_FTRACE_STARTUP_TEST 524#ifdef CONFIG_FTRACE_STARTUP_TEST
599 if (type->selftest) { 525 if (type->selftest && !tracing_selftest_disabled) {
600 struct tracer *saved_tracer = current_trace; 526 struct tracer *saved_tracer = current_trace;
601 struct trace_array *tr = &global_trace; 527 struct trace_array *tr = &global_trace;
602 int i; 528 int i;
@@ -638,8 +564,26 @@ int register_tracer(struct tracer *type)
638 out: 564 out:
639 tracing_selftest_running = false; 565 tracing_selftest_running = false;
640 mutex_unlock(&trace_types_lock); 566 mutex_unlock(&trace_types_lock);
641 lock_kernel();
642 567
568 if (ret || !default_bootup_tracer)
569 goto out_unlock;
570
571 if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
572 goto out_unlock;
573
574 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
575 /* Do we want this tracer to start on bootup? */
576 tracing_set_tracer(type->name);
577 default_bootup_tracer = NULL;
578 /* disable other selftests, since this will break it. */
579 tracing_selftest_disabled = 1;
580#ifdef CONFIG_FTRACE_STARTUP_TEST
581 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
582 type->name);
583#endif
584
585 out_unlock:
586 lock_kernel();
643 return ret; 587 return ret;
644} 588}
645 589
@@ -658,6 +602,15 @@ void unregister_tracer(struct tracer *type)
658 602
659 found: 603 found:
660 *t = (*t)->next; 604 *t = (*t)->next;
605
606 if (type == current_trace && tracer_enabled) {
607 tracer_enabled = 0;
608 tracing_stop();
609 if (current_trace->stop)
610 current_trace->stop(&global_trace);
611 current_trace = &nop_trace;
612 }
613
661 if (strlen(type->name) != max_tracer_type_len) 614 if (strlen(type->name) != max_tracer_type_len)
662 goto out; 615 goto out;
663 616
@@ -689,19 +642,20 @@ void tracing_reset_online_cpus(struct trace_array *tr)
689} 642}
690 643
691#define SAVED_CMDLINES 128 644#define SAVED_CMDLINES 128
645#define NO_CMDLINE_MAP UINT_MAX
692static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 646static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
693static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 647static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
694static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 648static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
695static int cmdline_idx; 649static int cmdline_idx;
696static DEFINE_SPINLOCK(trace_cmdline_lock); 650static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
697 651
698/* temporary disable recording */ 652/* temporary disable recording */
699atomic_t trace_record_cmdline_disabled __read_mostly; 653static atomic_t trace_record_cmdline_disabled __read_mostly;
700 654
701static void trace_init_cmdlines(void) 655static void trace_init_cmdlines(void)
702{ 656{
703 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline)); 657 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
704 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid)); 658 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
705 cmdline_idx = 0; 659 cmdline_idx = 0;
706} 660}
707 661
@@ -738,13 +692,12 @@ void tracing_start(void)
738 return; 692 return;
739 693
740 spin_lock_irqsave(&tracing_start_lock, flags); 694 spin_lock_irqsave(&tracing_start_lock, flags);
741 if (--trace_stop_count) 695 if (--trace_stop_count) {
742 goto out; 696 if (trace_stop_count < 0) {
743 697 /* Someone screwed up their debugging */
744 if (trace_stop_count < 0) { 698 WARN_ON_ONCE(1);
745 /* Someone screwed up their debugging */ 699 trace_stop_count = 0;
746 WARN_ON_ONCE(1); 700 }
747 trace_stop_count = 0;
748 goto out; 701 goto out;
749 } 702 }
750 703
@@ -794,8 +747,7 @@ void trace_stop_cmdline_recording(void);
794 747
795static void trace_save_cmdline(struct task_struct *tsk) 748static void trace_save_cmdline(struct task_struct *tsk)
796{ 749{
797 unsigned map; 750 unsigned pid, idx;
798 unsigned idx;
799 751
800 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 752 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
801 return; 753 return;
@@ -806,17 +758,24 @@ static void trace_save_cmdline(struct task_struct *tsk)
806 * nor do we want to disable interrupts, 758 * nor do we want to disable interrupts,
807 * so if we miss here, then better luck next time. 759 * so if we miss here, then better luck next time.
808 */ 760 */
809 if (!spin_trylock(&trace_cmdline_lock)) 761 if (!__raw_spin_trylock(&trace_cmdline_lock))
810 return; 762 return;
811 763
812 idx = map_pid_to_cmdline[tsk->pid]; 764 idx = map_pid_to_cmdline[tsk->pid];
813 if (idx >= SAVED_CMDLINES) { 765 if (idx == NO_CMDLINE_MAP) {
814 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 766 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
815 767
816 map = map_cmdline_to_pid[idx]; 768 /*
817 if (map <= PID_MAX_DEFAULT) 769 * Check whether the cmdline buffer at idx has a pid
818 map_pid_to_cmdline[map] = (unsigned)-1; 770 * mapped. We are going to overwrite that entry so we
771 * need to clear the map_pid_to_cmdline. Otherwise we
772 * would read the new comm for the old pid.
773 */
774 pid = map_cmdline_to_pid[idx];
775 if (pid != NO_CMDLINE_MAP)
776 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
819 777
778 map_cmdline_to_pid[idx] = tsk->pid;
820 map_pid_to_cmdline[tsk->pid] = idx; 779 map_pid_to_cmdline[tsk->pid] = idx;
821 780
822 cmdline_idx = idx; 781 cmdline_idx = idx;
@@ -824,33 +783,37 @@ static void trace_save_cmdline(struct task_struct *tsk)
824 783
825 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 784 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
826 785
827 spin_unlock(&trace_cmdline_lock); 786 __raw_spin_unlock(&trace_cmdline_lock);
828} 787}
829 788
830char *trace_find_cmdline(int pid) 789void trace_find_cmdline(int pid, char comm[])
831{ 790{
832 char *cmdline = "<...>";
833 unsigned map; 791 unsigned map;
834 792
835 if (!pid) 793 if (!pid) {
836 return "<idle>"; 794 strcpy(comm, "<idle>");
795 return;
796 }
837 797
838 if (pid > PID_MAX_DEFAULT) 798 if (pid > PID_MAX_DEFAULT) {
839 goto out; 799 strcpy(comm, "<...>");
800 return;
801 }
840 802
803 __raw_spin_lock(&trace_cmdline_lock);
841 map = map_pid_to_cmdline[pid]; 804 map = map_pid_to_cmdline[pid];
842 if (map >= SAVED_CMDLINES) 805 if (map != NO_CMDLINE_MAP)
843 goto out; 806 strcpy(comm, saved_cmdlines[map]);
844 807 else
845 cmdline = saved_cmdlines[map]; 808 strcpy(comm, "<...>");
846 809
847 out: 810 __raw_spin_unlock(&trace_cmdline_lock);
848 return cmdline;
849} 811}
850 812
851void tracing_record_cmdline(struct task_struct *tsk) 813void tracing_record_cmdline(struct task_struct *tsk)
852{ 814{
853 if (atomic_read(&trace_record_cmdline_disabled)) 815 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
816 !tracing_is_on())
854 return; 817 return;
855 818
856 trace_save_cmdline(tsk); 819 trace_save_cmdline(tsk);
@@ -864,7 +827,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
864 827
865 entry->preempt_count = pc & 0xff; 828 entry->preempt_count = pc & 0xff;
866 entry->pid = (tsk) ? tsk->pid : 0; 829 entry->pid = (tsk) ? tsk->pid : 0;
867 entry->tgid = (tsk) ? tsk->tgid : 0; 830 entry->tgid = (tsk) ? tsk->tgid : 0;
868 entry->flags = 831 entry->flags =
869#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 832#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
870 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 833 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -876,78 +839,132 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
876 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 839 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
877} 840}
878 841
842struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
843 unsigned char type,
844 unsigned long len,
845 unsigned long flags, int pc)
846{
847 struct ring_buffer_event *event;
848
849 event = ring_buffer_lock_reserve(tr->buffer, len);
850 if (event != NULL) {
851 struct trace_entry *ent = ring_buffer_event_data(event);
852
853 tracing_generic_entry_update(ent, flags, pc);
854 ent->type = type;
855 }
856
857 return event;
858}
859static void ftrace_trace_stack(struct trace_array *tr,
860 unsigned long flags, int skip, int pc);
861static void ftrace_trace_userstack(struct trace_array *tr,
862 unsigned long flags, int pc);
863
864static inline void __trace_buffer_unlock_commit(struct trace_array *tr,
865 struct ring_buffer_event *event,
866 unsigned long flags, int pc,
867 int wake)
868{
869 ring_buffer_unlock_commit(tr->buffer, event);
870
871 ftrace_trace_stack(tr, flags, 6, pc);
872 ftrace_trace_userstack(tr, flags, pc);
873
874 if (wake)
875 trace_wake_up();
876}
877
878void trace_buffer_unlock_commit(struct trace_array *tr,
879 struct ring_buffer_event *event,
880 unsigned long flags, int pc)
881{
882 __trace_buffer_unlock_commit(tr, event, flags, pc, 1);
883}
884
885struct ring_buffer_event *
886trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
887 unsigned long flags, int pc)
888{
889 return trace_buffer_lock_reserve(&global_trace,
890 type, len, flags, pc);
891}
892
893void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
894 unsigned long flags, int pc)
895{
896 return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1);
897}
898
899void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
900 unsigned long flags, int pc)
901{
902 return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0);
903}
904
879void 905void
880trace_function(struct trace_array *tr, struct trace_array_cpu *data, 906trace_function(struct trace_array *tr,
881 unsigned long ip, unsigned long parent_ip, unsigned long flags, 907 unsigned long ip, unsigned long parent_ip, unsigned long flags,
882 int pc) 908 int pc)
883{ 909{
884 struct ring_buffer_event *event; 910 struct ring_buffer_event *event;
885 struct ftrace_entry *entry; 911 struct ftrace_entry *entry;
886 unsigned long irq_flags;
887 912
888 /* If we are reading the ring buffer, don't trace */ 913 /* If we are reading the ring buffer, don't trace */
889 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 914 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
890 return; 915 return;
891 916
892 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 917 event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
893 &irq_flags); 918 flags, pc);
894 if (!event) 919 if (!event)
895 return; 920 return;
896 entry = ring_buffer_event_data(event); 921 entry = ring_buffer_event_data(event);
897 tracing_generic_entry_update(&entry->ent, flags, pc);
898 entry->ent.type = TRACE_FN;
899 entry->ip = ip; 922 entry->ip = ip;
900 entry->parent_ip = parent_ip; 923 entry->parent_ip = parent_ip;
901 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 924 ring_buffer_unlock_commit(tr->buffer, event);
902} 925}
903 926
904#ifdef CONFIG_FUNCTION_GRAPH_TRACER 927#ifdef CONFIG_FUNCTION_GRAPH_TRACER
905static void __trace_graph_entry(struct trace_array *tr, 928static int __trace_graph_entry(struct trace_array *tr,
906 struct trace_array_cpu *data,
907 struct ftrace_graph_ent *trace, 929 struct ftrace_graph_ent *trace,
908 unsigned long flags, 930 unsigned long flags,
909 int pc) 931 int pc)
910{ 932{
911 struct ring_buffer_event *event; 933 struct ring_buffer_event *event;
912 struct ftrace_graph_ent_entry *entry; 934 struct ftrace_graph_ent_entry *entry;
913 unsigned long irq_flags;
914 935
915 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 936 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
916 return; 937 return 0;
917 938
918 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), 939 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
919 &irq_flags); 940 sizeof(*entry), flags, pc);
920 if (!event) 941 if (!event)
921 return; 942 return 0;
922 entry = ring_buffer_event_data(event); 943 entry = ring_buffer_event_data(event);
923 tracing_generic_entry_update(&entry->ent, flags, pc);
924 entry->ent.type = TRACE_GRAPH_ENT;
925 entry->graph_ent = *trace; 944 entry->graph_ent = *trace;
926 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 945 ring_buffer_unlock_commit(global_trace.buffer, event);
946
947 return 1;
927} 948}
928 949
929static void __trace_graph_return(struct trace_array *tr, 950static void __trace_graph_return(struct trace_array *tr,
930 struct trace_array_cpu *data,
931 struct ftrace_graph_ret *trace, 951 struct ftrace_graph_ret *trace,
932 unsigned long flags, 952 unsigned long flags,
933 int pc) 953 int pc)
934{ 954{
935 struct ring_buffer_event *event; 955 struct ring_buffer_event *event;
936 struct ftrace_graph_ret_entry *entry; 956 struct ftrace_graph_ret_entry *entry;
937 unsigned long irq_flags;
938 957
939 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 958 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
940 return; 959 return;
941 960
942 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), 961 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
943 &irq_flags); 962 sizeof(*entry), flags, pc);
944 if (!event) 963 if (!event)
945 return; 964 return;
946 entry = ring_buffer_event_data(event); 965 entry = ring_buffer_event_data(event);
947 tracing_generic_entry_update(&entry->ent, flags, pc);
948 entry->ent.type = TRACE_GRAPH_RET;
949 entry->ret = *trace; 966 entry->ret = *trace;
950 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 967 ring_buffer_unlock_commit(global_trace.buffer, event);
951} 968}
952#endif 969#endif
953 970
@@ -957,31 +974,23 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
957 int pc) 974 int pc)
958{ 975{
959 if (likely(!atomic_read(&data->disabled))) 976 if (likely(!atomic_read(&data->disabled)))
960 trace_function(tr, data, ip, parent_ip, flags, pc); 977 trace_function(tr, ip, parent_ip, flags, pc);
961} 978}
962 979
963static void ftrace_trace_stack(struct trace_array *tr, 980static void __ftrace_trace_stack(struct trace_array *tr,
964 struct trace_array_cpu *data, 981 unsigned long flags,
965 unsigned long flags, 982 int skip, int pc)
966 int skip, int pc)
967{ 983{
968#ifdef CONFIG_STACKTRACE 984#ifdef CONFIG_STACKTRACE
969 struct ring_buffer_event *event; 985 struct ring_buffer_event *event;
970 struct stack_entry *entry; 986 struct stack_entry *entry;
971 struct stack_trace trace; 987 struct stack_trace trace;
972 unsigned long irq_flags;
973 988
974 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 989 event = trace_buffer_lock_reserve(tr, TRACE_STACK,
975 return; 990 sizeof(*entry), flags, pc);
976
977 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
978 &irq_flags);
979 if (!event) 991 if (!event)
980 return; 992 return;
981 entry = ring_buffer_event_data(event); 993 entry = ring_buffer_event_data(event);
982 tracing_generic_entry_update(&entry->ent, flags, pc);
983 entry->ent.type = TRACE_STACK;
984
985 memset(&entry->caller, 0, sizeof(entry->caller)); 994 memset(&entry->caller, 0, sizeof(entry->caller));
986 995
987 trace.nr_entries = 0; 996 trace.nr_entries = 0;
@@ -990,38 +999,43 @@ static void ftrace_trace_stack(struct trace_array *tr,
990 trace.entries = entry->caller; 999 trace.entries = entry->caller;
991 1000
992 save_stack_trace(&trace); 1001 save_stack_trace(&trace);
993 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1002 ring_buffer_unlock_commit(tr->buffer, event);
994#endif 1003#endif
995} 1004}
996 1005
1006static void ftrace_trace_stack(struct trace_array *tr,
1007 unsigned long flags,
1008 int skip, int pc)
1009{
1010 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1011 return;
1012
1013 __ftrace_trace_stack(tr, flags, skip, pc);
1014}
1015
997void __trace_stack(struct trace_array *tr, 1016void __trace_stack(struct trace_array *tr,
998 struct trace_array_cpu *data,
999 unsigned long flags, 1017 unsigned long flags,
1000 int skip) 1018 int skip, int pc)
1001{ 1019{
1002 ftrace_trace_stack(tr, data, flags, skip, preempt_count()); 1020 __ftrace_trace_stack(tr, flags, skip, pc);
1003} 1021}
1004 1022
1005static void ftrace_trace_userstack(struct trace_array *tr, 1023static void ftrace_trace_userstack(struct trace_array *tr,
1006 struct trace_array_cpu *data, 1024 unsigned long flags, int pc)
1007 unsigned long flags, int pc)
1008{ 1025{
1009#ifdef CONFIG_STACKTRACE 1026#ifdef CONFIG_STACKTRACE
1010 struct ring_buffer_event *event; 1027 struct ring_buffer_event *event;
1011 struct userstack_entry *entry; 1028 struct userstack_entry *entry;
1012 struct stack_trace trace; 1029 struct stack_trace trace;
1013 unsigned long irq_flags;
1014 1030
1015 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1031 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1016 return; 1032 return;
1017 1033
1018 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 1034 event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
1019 &irq_flags); 1035 sizeof(*entry), flags, pc);
1020 if (!event) 1036 if (!event)
1021 return; 1037 return;
1022 entry = ring_buffer_event_data(event); 1038 entry = ring_buffer_event_data(event);
1023 tracing_generic_entry_update(&entry->ent, flags, pc);
1024 entry->ent.type = TRACE_USER_STACK;
1025 1039
1026 memset(&entry->caller, 0, sizeof(entry->caller)); 1040 memset(&entry->caller, 0, sizeof(entry->caller));
1027 1041
@@ -1031,70 +1045,58 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1031 trace.entries = entry->caller; 1045 trace.entries = entry->caller;
1032 1046
1033 save_stack_trace_user(&trace); 1047 save_stack_trace_user(&trace);
1034 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1048 ring_buffer_unlock_commit(tr->buffer, event);
1035#endif 1049#endif
1036} 1050}
1037 1051
1038void __trace_userstack(struct trace_array *tr, 1052#ifdef UNUSED
1039 struct trace_array_cpu *data, 1053static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1040 unsigned long flags)
1041{ 1054{
1042 ftrace_trace_userstack(tr, data, flags, preempt_count()); 1055 ftrace_trace_userstack(tr, flags, preempt_count());
1043} 1056}
1057#endif /* UNUSED */
1044 1058
1045static void 1059static void
1046ftrace_trace_special(void *__tr, void *__data, 1060ftrace_trace_special(void *__tr,
1047 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1061 unsigned long arg1, unsigned long arg2, unsigned long arg3,
1048 int pc) 1062 int pc)
1049{ 1063{
1050 struct ring_buffer_event *event; 1064 struct ring_buffer_event *event;
1051 struct trace_array_cpu *data = __data;
1052 struct trace_array *tr = __tr; 1065 struct trace_array *tr = __tr;
1053 struct special_entry *entry; 1066 struct special_entry *entry;
1054 unsigned long irq_flags;
1055 1067
1056 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 1068 event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
1057 &irq_flags); 1069 sizeof(*entry), 0, pc);
1058 if (!event) 1070 if (!event)
1059 return; 1071 return;
1060 entry = ring_buffer_event_data(event); 1072 entry = ring_buffer_event_data(event);
1061 tracing_generic_entry_update(&entry->ent, 0, pc);
1062 entry->ent.type = TRACE_SPECIAL;
1063 entry->arg1 = arg1; 1073 entry->arg1 = arg1;
1064 entry->arg2 = arg2; 1074 entry->arg2 = arg2;
1065 entry->arg3 = arg3; 1075 entry->arg3 = arg3;
1066 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1076 trace_buffer_unlock_commit(tr, event, 0, pc);
1067 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
1068 ftrace_trace_userstack(tr, data, irq_flags, pc);
1069
1070 trace_wake_up();
1071} 1077}
1072 1078
1073void 1079void
1074__trace_special(void *__tr, void *__data, 1080__trace_special(void *__tr, void *__data,
1075 unsigned long arg1, unsigned long arg2, unsigned long arg3) 1081 unsigned long arg1, unsigned long arg2, unsigned long arg3)
1076{ 1082{
1077 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); 1083 ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
1078} 1084}
1079 1085
1080void 1086void
1081tracing_sched_switch_trace(struct trace_array *tr, 1087tracing_sched_switch_trace(struct trace_array *tr,
1082 struct trace_array_cpu *data,
1083 struct task_struct *prev, 1088 struct task_struct *prev,
1084 struct task_struct *next, 1089 struct task_struct *next,
1085 unsigned long flags, int pc) 1090 unsigned long flags, int pc)
1086{ 1091{
1087 struct ring_buffer_event *event; 1092 struct ring_buffer_event *event;
1088 struct ctx_switch_entry *entry; 1093 struct ctx_switch_entry *entry;
1089 unsigned long irq_flags;
1090 1094
1091 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 1095 event = trace_buffer_lock_reserve(tr, TRACE_CTX,
1092 &irq_flags); 1096 sizeof(*entry), flags, pc);
1093 if (!event) 1097 if (!event)
1094 return; 1098 return;
1095 entry = ring_buffer_event_data(event); 1099 entry = ring_buffer_event_data(event);
1096 tracing_generic_entry_update(&entry->ent, flags, pc);
1097 entry->ent.type = TRACE_CTX;
1098 entry->prev_pid = prev->pid; 1100 entry->prev_pid = prev->pid;
1099 entry->prev_prio = prev->prio; 1101 entry->prev_prio = prev->prio;
1100 entry->prev_state = prev->state; 1102 entry->prev_state = prev->state;
@@ -1102,29 +1104,23 @@ tracing_sched_switch_trace(struct trace_array *tr,
1102 entry->next_prio = next->prio; 1104 entry->next_prio = next->prio;
1103 entry->next_state = next->state; 1105 entry->next_state = next->state;
1104 entry->next_cpu = task_cpu(next); 1106 entry->next_cpu = task_cpu(next);
1105 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1107 trace_buffer_unlock_commit(tr, event, flags, pc);
1106 ftrace_trace_stack(tr, data, flags, 5, pc);
1107 ftrace_trace_userstack(tr, data, flags, pc);
1108} 1108}
1109 1109
1110void 1110void
1111tracing_sched_wakeup_trace(struct trace_array *tr, 1111tracing_sched_wakeup_trace(struct trace_array *tr,
1112 struct trace_array_cpu *data,
1113 struct task_struct *wakee, 1112 struct task_struct *wakee,
1114 struct task_struct *curr, 1113 struct task_struct *curr,
1115 unsigned long flags, int pc) 1114 unsigned long flags, int pc)
1116{ 1115{
1117 struct ring_buffer_event *event; 1116 struct ring_buffer_event *event;
1118 struct ctx_switch_entry *entry; 1117 struct ctx_switch_entry *entry;
1119 unsigned long irq_flags;
1120 1118
1121 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 1119 event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
1122 &irq_flags); 1120 sizeof(*entry), flags, pc);
1123 if (!event) 1121 if (!event)
1124 return; 1122 return;
1125 entry = ring_buffer_event_data(event); 1123 entry = ring_buffer_event_data(event);
1126 tracing_generic_entry_update(&entry->ent, flags, pc);
1127 entry->ent.type = TRACE_WAKE;
1128 entry->prev_pid = curr->pid; 1124 entry->prev_pid = curr->pid;
1129 entry->prev_prio = curr->prio; 1125 entry->prev_prio = curr->prio;
1130 entry->prev_state = curr->state; 1126 entry->prev_state = curr->state;
@@ -1132,11 +1128,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1132 entry->next_prio = wakee->prio; 1128 entry->next_prio = wakee->prio;
1133 entry->next_state = wakee->state; 1129 entry->next_state = wakee->state;
1134 entry->next_cpu = task_cpu(wakee); 1130 entry->next_cpu = task_cpu(wakee);
1135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1136 ftrace_trace_stack(tr, data, flags, 6, pc);
1137 ftrace_trace_userstack(tr, data, flags, pc);
1138 1131
1139 trace_wake_up(); 1132 ring_buffer_unlock_commit(tr->buffer, event);
1133 ftrace_trace_stack(tr, flags, 6, pc);
1134 ftrace_trace_userstack(tr, flags, pc);
1140} 1135}
1141 1136
1142void 1137void
@@ -1157,66 +1152,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1157 data = tr->data[cpu]; 1152 data = tr->data[cpu];
1158 1153
1159 if (likely(atomic_inc_return(&data->disabled) == 1)) 1154 if (likely(atomic_inc_return(&data->disabled) == 1))
1160 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 1155 ftrace_trace_special(tr, arg1, arg2, arg3, pc);
1161
1162 atomic_dec(&data->disabled);
1163 local_irq_restore(flags);
1164}
1165
1166#ifdef CONFIG_FUNCTION_TRACER
1167static void
1168function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
1169{
1170 struct trace_array *tr = &global_trace;
1171 struct trace_array_cpu *data;
1172 unsigned long flags;
1173 long disabled;
1174 int cpu, resched;
1175 int pc;
1176
1177 if (unlikely(!ftrace_function_enabled))
1178 return;
1179
1180 pc = preempt_count();
1181 resched = ftrace_preempt_disable();
1182 local_save_flags(flags);
1183 cpu = raw_smp_processor_id();
1184 data = tr->data[cpu];
1185 disabled = atomic_inc_return(&data->disabled);
1186
1187 if (likely(disabled == 1))
1188 trace_function(tr, data, ip, parent_ip, flags, pc);
1189
1190 atomic_dec(&data->disabled);
1191 ftrace_preempt_enable(resched);
1192}
1193
1194static void
1195function_trace_call(unsigned long ip, unsigned long parent_ip)
1196{
1197 struct trace_array *tr = &global_trace;
1198 struct trace_array_cpu *data;
1199 unsigned long flags;
1200 long disabled;
1201 int cpu;
1202 int pc;
1203
1204 if (unlikely(!ftrace_function_enabled))
1205 return;
1206
1207 /*
1208 * Need to use raw, since this must be called before the
1209 * recursive protection is performed.
1210 */
1211 local_irq_save(flags);
1212 cpu = raw_smp_processor_id();
1213 data = tr->data[cpu];
1214 disabled = atomic_inc_return(&data->disabled);
1215
1216 if (likely(disabled == 1)) {
1217 pc = preempt_count();
1218 trace_function(tr, data, ip, parent_ip, flags, pc);
1219 }
1220 1156
1221 atomic_dec(&data->disabled); 1157 atomic_dec(&data->disabled);
1222 local_irq_restore(flags); 1158 local_irq_restore(flags);
@@ -1229,6 +1165,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
1229 struct trace_array_cpu *data; 1165 struct trace_array_cpu *data;
1230 unsigned long flags; 1166 unsigned long flags;
1231 long disabled; 1167 long disabled;
1168 int ret;
1232 int cpu; 1169 int cpu;
1233 int pc; 1170 int pc;
1234 1171
@@ -1244,15 +1181,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
1244 disabled = atomic_inc_return(&data->disabled); 1181 disabled = atomic_inc_return(&data->disabled);
1245 if (likely(disabled == 1)) { 1182 if (likely(disabled == 1)) {
1246 pc = preempt_count(); 1183 pc = preempt_count();
1247 __trace_graph_entry(tr, data, trace, flags, pc); 1184 ret = __trace_graph_entry(tr, trace, flags, pc);
1185 } else {
1186 ret = 0;
1248 } 1187 }
1249 /* Only do the atomic if it is not already set */ 1188 /* Only do the atomic if it is not already set */
1250 if (!test_tsk_trace_graph(current)) 1189 if (!test_tsk_trace_graph(current))
1251 set_tsk_trace_graph(current); 1190 set_tsk_trace_graph(current);
1191
1252 atomic_dec(&data->disabled); 1192 atomic_dec(&data->disabled);
1253 local_irq_restore(flags); 1193 local_irq_restore(flags);
1254 1194
1255 return 1; 1195 return ret;
1256} 1196}
1257 1197
1258void trace_graph_return(struct ftrace_graph_ret *trace) 1198void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1270,7 +1210,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1270 disabled = atomic_inc_return(&data->disabled); 1210 disabled = atomic_inc_return(&data->disabled);
1271 if (likely(disabled == 1)) { 1211 if (likely(disabled == 1)) {
1272 pc = preempt_count(); 1212 pc = preempt_count();
1273 __trace_graph_return(tr, data, trace, flags, pc); 1213 __trace_graph_return(tr, trace, flags, pc);
1274 } 1214 }
1275 if (!trace->depth) 1215 if (!trace->depth)
1276 clear_tsk_trace_graph(current); 1216 clear_tsk_trace_graph(current);
@@ -1279,30 +1219,122 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1279} 1219}
1280#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1220#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1281 1221
1282static struct ftrace_ops trace_ops __read_mostly =
1283{
1284 .func = function_trace_call,
1285};
1286 1222
1287void tracing_start_function_trace(void) 1223/**
1224 * trace_vbprintk - write binary msg to tracing buffer
1225 *
1226 */
1227int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1288{ 1228{
1289 ftrace_function_enabled = 0; 1229 static raw_spinlock_t trace_buf_lock =
1230 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
1231 static u32 trace_buf[TRACE_BUF_SIZE];
1290 1232
1291 if (trace_flags & TRACE_ITER_PREEMPTONLY) 1233 struct ring_buffer_event *event;
1292 trace_ops.func = function_trace_call_preempt_only; 1234 struct trace_array *tr = &global_trace;
1293 else 1235 struct trace_array_cpu *data;
1294 trace_ops.func = function_trace_call; 1236 struct bprint_entry *entry;
1237 unsigned long flags;
1238 int resched;
1239 int cpu, len = 0, size, pc;
1240
1241 if (unlikely(tracing_selftest_running || tracing_disabled))
1242 return 0;
1243
1244 /* Don't pollute graph traces with trace_vprintk internals */
1245 pause_graph_tracing();
1246
1247 pc = preempt_count();
1248 resched = ftrace_preempt_disable();
1249 cpu = raw_smp_processor_id();
1250 data = tr->data[cpu];
1251
1252 if (unlikely(atomic_read(&data->disabled)))
1253 goto out;
1254
1255 /* Lockdep uses trace_printk for lock tracing */
1256 local_irq_save(flags);
1257 __raw_spin_lock(&trace_buf_lock);
1258 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1259
1260 if (len > TRACE_BUF_SIZE || len < 0)
1261 goto out_unlock;
1262
1263 size = sizeof(*entry) + sizeof(u32) * len;
1264 event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc);
1265 if (!event)
1266 goto out_unlock;
1267 entry = ring_buffer_event_data(event);
1268 entry->ip = ip;
1269 entry->fmt = fmt;
1270
1271 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1272 ring_buffer_unlock_commit(tr->buffer, event);
1273
1274out_unlock:
1275 __raw_spin_unlock(&trace_buf_lock);
1276 local_irq_restore(flags);
1277
1278out:
1279 ftrace_preempt_enable(resched);
1280 unpause_graph_tracing();
1295 1281
1296 register_ftrace_function(&trace_ops); 1282 return len;
1297 ftrace_function_enabled = 1;
1298} 1283}
1284EXPORT_SYMBOL_GPL(trace_vbprintk);
1299 1285
1300void tracing_stop_function_trace(void) 1286int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1301{ 1287{
1302 ftrace_function_enabled = 0; 1288 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1303 unregister_ftrace_function(&trace_ops); 1289 static char trace_buf[TRACE_BUF_SIZE];
1290
1291 struct ring_buffer_event *event;
1292 struct trace_array *tr = &global_trace;
1293 struct trace_array_cpu *data;
1294 int cpu, len = 0, size, pc;
1295 struct print_entry *entry;
1296 unsigned long irq_flags;
1297
1298 if (tracing_disabled || tracing_selftest_running)
1299 return 0;
1300
1301 pc = preempt_count();
1302 preempt_disable_notrace();
1303 cpu = raw_smp_processor_id();
1304 data = tr->data[cpu];
1305
1306 if (unlikely(atomic_read(&data->disabled)))
1307 goto out;
1308
1309 pause_graph_tracing();
1310 raw_local_irq_save(irq_flags);
1311 __raw_spin_lock(&trace_buf_lock);
1312 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1313
1314 len = min(len, TRACE_BUF_SIZE-1);
1315 trace_buf[len] = 0;
1316
1317 size = sizeof(*entry) + len + 1;
1318 event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
1319 if (!event)
1320 goto out_unlock;
1321 entry = ring_buffer_event_data(event);
1322 entry->ip = ip;
1323
1324 memcpy(&entry->buf, trace_buf, len);
1325 entry->buf[len] = 0;
1326 ring_buffer_unlock_commit(tr->buffer, event);
1327
1328 out_unlock:
1329 __raw_spin_unlock(&trace_buf_lock);
1330 raw_local_irq_restore(irq_flags);
1331 unpause_graph_tracing();
1332 out:
1333 preempt_enable_notrace();
1334
1335 return len;
1304} 1336}
1305#endif 1337EXPORT_SYMBOL_GPL(trace_vprintk);
1306 1338
1307enum trace_file_type { 1339enum trace_file_type {
1308 TRACE_FILE_LAT_FMT = 1, 1340 TRACE_FILE_LAT_FMT = 1,
@@ -1345,10 +1377,25 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1345{ 1377{
1346 struct ring_buffer *buffer = iter->tr->buffer; 1378 struct ring_buffer *buffer = iter->tr->buffer;
1347 struct trace_entry *ent, *next = NULL; 1379 struct trace_entry *ent, *next = NULL;
1380 int cpu_file = iter->cpu_file;
1348 u64 next_ts = 0, ts; 1381 u64 next_ts = 0, ts;
1349 int next_cpu = -1; 1382 int next_cpu = -1;
1350 int cpu; 1383 int cpu;
1351 1384
1385 /*
1386 * If we are in a per_cpu trace file, don't bother by iterating over
1387 * all cpu and peek directly.
1388 */
1389 if (cpu_file > TRACE_PIPE_ALL_CPU) {
1390 if (ring_buffer_empty_cpu(buffer, cpu_file))
1391 return NULL;
1392 ent = peek_next_entry(iter, cpu_file, ent_ts);
1393 if (ent_cpu)
1394 *ent_cpu = cpu_file;
1395
1396 return ent;
1397 }
1398
1352 for_each_tracing_cpu(cpu) { 1399 for_each_tracing_cpu(cpu) {
1353 1400
1354 if (ring_buffer_empty_cpu(buffer, cpu)) 1401 if (ring_buffer_empty_cpu(buffer, cpu))
@@ -1376,8 +1423,8 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1376} 1423}
1377 1424
1378/* Find the next real entry, without updating the iterator itself */ 1425/* Find the next real entry, without updating the iterator itself */
1379static struct trace_entry * 1426struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1380find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) 1427 int *ent_cpu, u64 *ent_ts)
1381{ 1428{
1382 return __find_next_entry(iter, ent_cpu, ent_ts); 1429 return __find_next_entry(iter, ent_cpu, ent_ts);
1383} 1430}
@@ -1426,19 +1473,32 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1426 return ent; 1473 return ent;
1427} 1474}
1428 1475
1476/*
1477 * No necessary locking here. The worst thing which can
1478 * happen is loosing events consumed at the same time
1479 * by a trace_pipe reader.
1480 * Other than that, we don't risk to crash the ring buffer
1481 * because it serializes the readers.
1482 *
1483 * The current tracer is copied to avoid a global locking
1484 * all around.
1485 */
1429static void *s_start(struct seq_file *m, loff_t *pos) 1486static void *s_start(struct seq_file *m, loff_t *pos)
1430{ 1487{
1431 struct trace_iterator *iter = m->private; 1488 struct trace_iterator *iter = m->private;
1489 static struct tracer *old_tracer;
1490 int cpu_file = iter->cpu_file;
1432 void *p = NULL; 1491 void *p = NULL;
1433 loff_t l = 0; 1492 loff_t l = 0;
1434 int cpu; 1493 int cpu;
1435 1494
1495 /* copy the tracer to avoid using a global lock all around */
1436 mutex_lock(&trace_types_lock); 1496 mutex_lock(&trace_types_lock);
1437 1497 if (unlikely(old_tracer != current_trace && current_trace)) {
1438 if (!current_trace || current_trace != iter->trace) { 1498 old_tracer = current_trace;
1439 mutex_unlock(&trace_types_lock); 1499 *iter->trace = *current_trace;
1440 return NULL;
1441 } 1500 }
1501 mutex_unlock(&trace_types_lock);
1442 1502
1443 atomic_inc(&trace_record_cmdline_disabled); 1503 atomic_inc(&trace_record_cmdline_disabled);
1444 1504
@@ -1449,9 +1509,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1449 1509
1450 ftrace_disable_cpu(); 1510 ftrace_disable_cpu();
1451 1511
1452 for_each_tracing_cpu(cpu) { 1512 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1453 ring_buffer_iter_reset(iter->buffer_iter[cpu]); 1513 for_each_tracing_cpu(cpu)
1454 } 1514 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
1515 } else
1516 ring_buffer_iter_reset(iter->buffer_iter[cpu_file]);
1517
1455 1518
1456 ftrace_enable_cpu(); 1519 ftrace_enable_cpu();
1457 1520
@@ -1469,155 +1532,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1469static void s_stop(struct seq_file *m, void *p) 1532static void s_stop(struct seq_file *m, void *p)
1470{ 1533{
1471 atomic_dec(&trace_record_cmdline_disabled); 1534 atomic_dec(&trace_record_cmdline_disabled);
1472 mutex_unlock(&trace_types_lock);
1473}
1474
1475#ifdef CONFIG_KRETPROBES
1476static inline const char *kretprobed(const char *name)
1477{
1478 static const char tramp_name[] = "kretprobe_trampoline";
1479 int size = sizeof(tramp_name);
1480
1481 if (strncmp(tramp_name, name, size) == 0)
1482 return "[unknown/kretprobe'd]";
1483 return name;
1484}
1485#else
1486static inline const char *kretprobed(const char *name)
1487{
1488 return name;
1489}
1490#endif /* CONFIG_KRETPROBES */
1491
1492static int
1493seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1494{
1495#ifdef CONFIG_KALLSYMS
1496 char str[KSYM_SYMBOL_LEN];
1497 const char *name;
1498
1499 kallsyms_lookup(address, NULL, NULL, NULL, str);
1500
1501 name = kretprobed(str);
1502
1503 return trace_seq_printf(s, fmt, name);
1504#endif
1505 return 1;
1506}
1507
1508static int
1509seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1510 unsigned long address)
1511{
1512#ifdef CONFIG_KALLSYMS
1513 char str[KSYM_SYMBOL_LEN];
1514 const char *name;
1515
1516 sprint_symbol(str, address);
1517 name = kretprobed(str);
1518
1519 return trace_seq_printf(s, fmt, name);
1520#endif
1521 return 1;
1522}
1523
1524#ifndef CONFIG_64BIT
1525# define IP_FMT "%08lx"
1526#else
1527# define IP_FMT "%016lx"
1528#endif
1529
1530int
1531seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1532{
1533 int ret;
1534
1535 if (!ip)
1536 return trace_seq_printf(s, "0");
1537
1538 if (sym_flags & TRACE_ITER_SYM_OFFSET)
1539 ret = seq_print_sym_offset(s, "%s", ip);
1540 else
1541 ret = seq_print_sym_short(s, "%s", ip);
1542
1543 if (!ret)
1544 return 0;
1545
1546 if (sym_flags & TRACE_ITER_SYM_ADDR)
1547 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1548 return ret;
1549}
1550
1551static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
1552 unsigned long ip, unsigned long sym_flags)
1553{
1554 struct file *file = NULL;
1555 unsigned long vmstart = 0;
1556 int ret = 1;
1557
1558 if (mm) {
1559 const struct vm_area_struct *vma;
1560
1561 down_read(&mm->mmap_sem);
1562 vma = find_vma(mm, ip);
1563 if (vma) {
1564 file = vma->vm_file;
1565 vmstart = vma->vm_start;
1566 }
1567 if (file) {
1568 ret = trace_seq_path(s, &file->f_path);
1569 if (ret)
1570 ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
1571 }
1572 up_read(&mm->mmap_sem);
1573 }
1574 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
1575 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1576 return ret;
1577}
1578
1579static int
1580seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
1581 unsigned long sym_flags)
1582{
1583 struct mm_struct *mm = NULL;
1584 int ret = 1;
1585 unsigned int i;
1586
1587 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
1588 struct task_struct *task;
1589 /*
1590 * we do the lookup on the thread group leader,
1591 * since individual threads might have already quit!
1592 */
1593 rcu_read_lock();
1594 task = find_task_by_vpid(entry->ent.tgid);
1595 if (task)
1596 mm = get_task_mm(task);
1597 rcu_read_unlock();
1598 }
1599
1600 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1601 unsigned long ip = entry->caller[i];
1602
1603 if (ip == ULONG_MAX || !ret)
1604 break;
1605 if (i && ret)
1606 ret = trace_seq_puts(s, " <- ");
1607 if (!ip) {
1608 if (ret)
1609 ret = trace_seq_puts(s, "??");
1610 continue;
1611 }
1612 if (!ret)
1613 break;
1614 if (ret)
1615 ret = seq_print_user_ip(s, mm, ip, sym_flags);
1616 }
1617
1618 if (mm)
1619 mmput(mm);
1620 return ret;
1621} 1535}
1622 1536
1623static void print_lat_help_header(struct seq_file *m) 1537static void print_lat_help_header(struct seq_file *m)
@@ -1658,11 +1572,11 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1658 total = entries + 1572 total = entries +
1659 ring_buffer_overruns(iter->tr->buffer); 1573 ring_buffer_overruns(iter->tr->buffer);
1660 1574
1661 seq_printf(m, "%s latency trace v1.1.5 on %s\n", 1575 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1662 name, UTS_RELEASE); 1576 name, UTS_RELEASE);
1663 seq_puts(m, "-----------------------------------" 1577 seq_puts(m, "# -----------------------------------"
1664 "---------------------------------\n"); 1578 "---------------------------------\n");
1665 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |" 1579 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
1666 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 1580 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1667 nsecs_to_usecs(data->saved_latency), 1581 nsecs_to_usecs(data->saved_latency),
1668 entries, 1582 entries,
@@ -1684,121 +1598,24 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1684#else 1598#else
1685 seq_puts(m, ")\n"); 1599 seq_puts(m, ")\n");
1686#endif 1600#endif
1687 seq_puts(m, " -----------------\n"); 1601 seq_puts(m, "# -----------------\n");
1688 seq_printf(m, " | task: %.16s-%d " 1602 seq_printf(m, "# | task: %.16s-%d "
1689 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 1603 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1690 data->comm, data->pid, data->uid, data->nice, 1604 data->comm, data->pid, data->uid, data->nice,
1691 data->policy, data->rt_priority); 1605 data->policy, data->rt_priority);
1692 seq_puts(m, " -----------------\n"); 1606 seq_puts(m, "# -----------------\n");
1693 1607
1694 if (data->critical_start) { 1608 if (data->critical_start) {
1695 seq_puts(m, " => started at: "); 1609 seq_puts(m, "# => started at: ");
1696 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 1610 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1697 trace_print_seq(m, &iter->seq); 1611 trace_print_seq(m, &iter->seq);
1698 seq_puts(m, "\n => ended at: "); 1612 seq_puts(m, "\n# => ended at: ");
1699 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 1613 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1700 trace_print_seq(m, &iter->seq); 1614 trace_print_seq(m, &iter->seq);
1701 seq_puts(m, "\n"); 1615 seq_puts(m, "#\n");
1702 }
1703
1704 seq_puts(m, "\n");
1705}
1706
1707static void
1708lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1709{
1710 int hardirq, softirq;
1711 char *comm;
1712
1713 comm = trace_find_cmdline(entry->pid);
1714
1715 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1716 trace_seq_printf(s, "%3d", cpu);
1717 trace_seq_printf(s, "%c%c",
1718 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1719 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
1720 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1721
1722 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1723 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1724 if (hardirq && softirq) {
1725 trace_seq_putc(s, 'H');
1726 } else {
1727 if (hardirq) {
1728 trace_seq_putc(s, 'h');
1729 } else {
1730 if (softirq)
1731 trace_seq_putc(s, 's');
1732 else
1733 trace_seq_putc(s, '.');
1734 }
1735 }
1736
1737 if (entry->preempt_count)
1738 trace_seq_printf(s, "%x", entry->preempt_count);
1739 else
1740 trace_seq_puts(s, ".");
1741}
1742
1743unsigned long preempt_mark_thresh = 100;
1744
1745static void
1746lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1747 unsigned long rel_usecs)
1748{
1749 trace_seq_printf(s, " %4lldus", abs_usecs);
1750 if (rel_usecs > preempt_mark_thresh)
1751 trace_seq_puts(s, "!: ");
1752 else if (rel_usecs > 1)
1753 trace_seq_puts(s, "+: ");
1754 else
1755 trace_seq_puts(s, " : ");
1756}
1757
1758static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1759
1760static int task_state_char(unsigned long state)
1761{
1762 int bit = state ? __ffs(state) + 1 : 0;
1763
1764 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1765}
1766
1767/*
1768 * The message is supposed to contain an ending newline.
1769 * If the printing stops prematurely, try to add a newline of our own.
1770 */
1771void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1772{
1773 struct trace_entry *ent;
1774 struct trace_field_cont *cont;
1775 bool ok = true;
1776
1777 ent = peek_next_entry(iter, iter->cpu, NULL);
1778 if (!ent || ent->type != TRACE_CONT) {
1779 trace_seq_putc(s, '\n');
1780 return;
1781 } 1616 }
1782 1617
1783 do { 1618 seq_puts(m, "#\n");
1784 cont = (struct trace_field_cont *)ent;
1785 if (ok)
1786 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
1787
1788 ftrace_disable_cpu();
1789
1790 if (iter->buffer_iter[iter->cpu])
1791 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1792 else
1793 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1794
1795 ftrace_enable_cpu();
1796
1797 ent = peek_next_entry(iter, iter->cpu, NULL);
1798 } while (ent && ent->type == TRACE_CONT);
1799
1800 if (!ok)
1801 trace_seq_putc(s, '\n');
1802} 1619}
1803 1620
1804static void test_cpu_buff_start(struct trace_iterator *iter) 1621static void test_cpu_buff_start(struct trace_iterator *iter)
@@ -1818,472 +1635,89 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1635 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1819} 1636}
1820 1637
1821static enum print_line_t
1822print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1823{
1824 struct trace_seq *s = &iter->seq;
1825 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1826 struct trace_entry *next_entry;
1827 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1828 struct trace_entry *entry = iter->ent;
1829 unsigned long abs_usecs;
1830 unsigned long rel_usecs;
1831 u64 next_ts;
1832 char *comm;
1833 int S, T;
1834 int i;
1835
1836 if (entry->type == TRACE_CONT)
1837 return TRACE_TYPE_HANDLED;
1838
1839 test_cpu_buff_start(iter);
1840
1841 next_entry = find_next_entry(iter, NULL, &next_ts);
1842 if (!next_entry)
1843 next_ts = iter->ts;
1844 rel_usecs = ns2usecs(next_ts - iter->ts);
1845 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
1846
1847 if (verbose) {
1848 comm = trace_find_cmdline(entry->pid);
1849 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1850 " %ld.%03ldms (+%ld.%03ldms): ",
1851 comm,
1852 entry->pid, cpu, entry->flags,
1853 entry->preempt_count, trace_idx,
1854 ns2usecs(iter->ts),
1855 abs_usecs/1000,
1856 abs_usecs % 1000, rel_usecs/1000,
1857 rel_usecs % 1000);
1858 } else {
1859 lat_print_generic(s, entry, cpu);
1860 lat_print_timestamp(s, abs_usecs, rel_usecs);
1861 }
1862 switch (entry->type) {
1863 case TRACE_FN: {
1864 struct ftrace_entry *field;
1865
1866 trace_assign_type(field, entry);
1867
1868 seq_print_ip_sym(s, field->ip, sym_flags);
1869 trace_seq_puts(s, " (");
1870 seq_print_ip_sym(s, field->parent_ip, sym_flags);
1871 trace_seq_puts(s, ")\n");
1872 break;
1873 }
1874 case TRACE_CTX:
1875 case TRACE_WAKE: {
1876 struct ctx_switch_entry *field;
1877
1878 trace_assign_type(field, entry);
1879
1880 T = task_state_char(field->next_state);
1881 S = task_state_char(field->prev_state);
1882 comm = trace_find_cmdline(field->next_pid);
1883 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1884 field->prev_pid,
1885 field->prev_prio,
1886 S, entry->type == TRACE_CTX ? "==>" : " +",
1887 field->next_cpu,
1888 field->next_pid,
1889 field->next_prio,
1890 T, comm);
1891 break;
1892 }
1893 case TRACE_SPECIAL: {
1894 struct special_entry *field;
1895
1896 trace_assign_type(field, entry);
1897
1898 trace_seq_printf(s, "# %ld %ld %ld\n",
1899 field->arg1,
1900 field->arg2,
1901 field->arg3);
1902 break;
1903 }
1904 case TRACE_STACK: {
1905 struct stack_entry *field;
1906
1907 trace_assign_type(field, entry);
1908
1909 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1910 if (i)
1911 trace_seq_puts(s, " <= ");
1912 seq_print_ip_sym(s, field->caller[i], sym_flags);
1913 }
1914 trace_seq_puts(s, "\n");
1915 break;
1916 }
1917 case TRACE_PRINT: {
1918 struct print_entry *field;
1919
1920 trace_assign_type(field, entry);
1921
1922 seq_print_ip_sym(s, field->ip, sym_flags);
1923 trace_seq_printf(s, ": %s", field->buf);
1924 if (entry->flags & TRACE_FLAG_CONT)
1925 trace_seq_print_cont(s, iter);
1926 break;
1927 }
1928 case TRACE_BRANCH: {
1929 struct trace_branch *field;
1930
1931 trace_assign_type(field, entry);
1932
1933 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1934 field->correct ? " ok " : " MISS ",
1935 field->func,
1936 field->file,
1937 field->line);
1938 break;
1939 }
1940 case TRACE_USER_STACK: {
1941 struct userstack_entry *field;
1942
1943 trace_assign_type(field, entry);
1944
1945 seq_print_userip_objs(field, s, sym_flags);
1946 trace_seq_putc(s, '\n');
1947 break;
1948 }
1949 default:
1950 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1951 }
1952 return TRACE_TYPE_HANDLED;
1953}
1954
1955static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 1638static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1956{ 1639{
1957 struct trace_seq *s = &iter->seq; 1640 struct trace_seq *s = &iter->seq;
1958 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1641 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1959 struct trace_entry *entry; 1642 struct trace_entry *entry;
1960 unsigned long usec_rem; 1643 struct trace_event *event;
1961 unsigned long long t;
1962 unsigned long secs;
1963 char *comm;
1964 int ret;
1965 int S, T;
1966 int i;
1967 1644
1968 entry = iter->ent; 1645 entry = iter->ent;
1969 1646
1970 if (entry->type == TRACE_CONT)
1971 return TRACE_TYPE_HANDLED;
1972
1973 test_cpu_buff_start(iter); 1647 test_cpu_buff_start(iter);
1974 1648
1975 comm = trace_find_cmdline(iter->ent->pid); 1649 event = ftrace_find_event(entry->type);
1976
1977 t = ns2usecs(iter->ts);
1978 usec_rem = do_div(t, 1000000ULL);
1979 secs = (unsigned long)t;
1980
1981 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1982 if (!ret)
1983 return TRACE_TYPE_PARTIAL_LINE;
1984 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
1985 if (!ret)
1986 return TRACE_TYPE_PARTIAL_LINE;
1987 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1988 if (!ret)
1989 return TRACE_TYPE_PARTIAL_LINE;
1990
1991 switch (entry->type) {
1992 case TRACE_FN: {
1993 struct ftrace_entry *field;
1994
1995 trace_assign_type(field, entry);
1996
1997 ret = seq_print_ip_sym(s, field->ip, sym_flags);
1998 if (!ret)
1999 return TRACE_TYPE_PARTIAL_LINE;
2000 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
2001 field->parent_ip) {
2002 ret = trace_seq_printf(s, " <-");
2003 if (!ret)
2004 return TRACE_TYPE_PARTIAL_LINE;
2005 ret = seq_print_ip_sym(s,
2006 field->parent_ip,
2007 sym_flags);
2008 if (!ret)
2009 return TRACE_TYPE_PARTIAL_LINE;
2010 }
2011 ret = trace_seq_printf(s, "\n");
2012 if (!ret)
2013 return TRACE_TYPE_PARTIAL_LINE;
2014 break;
2015 }
2016 case TRACE_CTX:
2017 case TRACE_WAKE: {
2018 struct ctx_switch_entry *field;
2019
2020 trace_assign_type(field, entry);
2021
2022 T = task_state_char(field->next_state);
2023 S = task_state_char(field->prev_state);
2024 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
2025 field->prev_pid,
2026 field->prev_prio,
2027 S,
2028 entry->type == TRACE_CTX ? "==>" : " +",
2029 field->next_cpu,
2030 field->next_pid,
2031 field->next_prio,
2032 T);
2033 if (!ret)
2034 return TRACE_TYPE_PARTIAL_LINE;
2035 break;
2036 }
2037 case TRACE_SPECIAL: {
2038 struct special_entry *field;
2039
2040 trace_assign_type(field, entry);
2041 1650
2042 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1651 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2043 field->arg1, 1652 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2044 field->arg2, 1653 if (!trace_print_lat_context(iter))
2045 field->arg3); 1654 goto partial;
2046 if (!ret) 1655 } else {
2047 return TRACE_TYPE_PARTIAL_LINE; 1656 if (!trace_print_context(iter))
2048 break; 1657 goto partial;
2049 }
2050 case TRACE_STACK: {
2051 struct stack_entry *field;
2052
2053 trace_assign_type(field, entry);
2054
2055 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
2056 if (i) {
2057 ret = trace_seq_puts(s, " <= ");
2058 if (!ret)
2059 return TRACE_TYPE_PARTIAL_LINE;
2060 }
2061 ret = seq_print_ip_sym(s, field->caller[i],
2062 sym_flags);
2063 if (!ret)
2064 return TRACE_TYPE_PARTIAL_LINE;
2065 } 1658 }
2066 ret = trace_seq_puts(s, "\n");
2067 if (!ret)
2068 return TRACE_TYPE_PARTIAL_LINE;
2069 break;
2070 }
2071 case TRACE_PRINT: {
2072 struct print_entry *field;
2073
2074 trace_assign_type(field, entry);
2075
2076 seq_print_ip_sym(s, field->ip, sym_flags);
2077 trace_seq_printf(s, ": %s", field->buf);
2078 if (entry->flags & TRACE_FLAG_CONT)
2079 trace_seq_print_cont(s, iter);
2080 break;
2081 }
2082 case TRACE_GRAPH_RET: {
2083 return print_graph_function(iter);
2084 }
2085 case TRACE_GRAPH_ENT: {
2086 return print_graph_function(iter);
2087 } 1659 }
2088 case TRACE_BRANCH: {
2089 struct trace_branch *field;
2090 1660
2091 trace_assign_type(field, entry); 1661 if (event)
1662 return event->trace(iter, sym_flags);
2092 1663
2093 trace_seq_printf(s, "[%s] %s:%s:%d\n", 1664 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2094 field->correct ? " ok " : " MISS ", 1665 goto partial;
2095 field->func,
2096 field->file,
2097 field->line);
2098 break;
2099 }
2100 case TRACE_USER_STACK: {
2101 struct userstack_entry *field;
2102
2103 trace_assign_type(field, entry);
2104 1666
2105 ret = seq_print_userip_objs(field, s, sym_flags);
2106 if (!ret)
2107 return TRACE_TYPE_PARTIAL_LINE;
2108 ret = trace_seq_putc(s, '\n');
2109 if (!ret)
2110 return TRACE_TYPE_PARTIAL_LINE;
2111 break;
2112 }
2113 }
2114 return TRACE_TYPE_HANDLED; 1667 return TRACE_TYPE_HANDLED;
1668partial:
1669 return TRACE_TYPE_PARTIAL_LINE;
2115} 1670}
2116 1671
2117static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 1672static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2118{ 1673{
2119 struct trace_seq *s = &iter->seq; 1674 struct trace_seq *s = &iter->seq;
2120 struct trace_entry *entry; 1675 struct trace_entry *entry;
2121 int ret; 1676 struct trace_event *event;
2122 int S, T;
2123 1677
2124 entry = iter->ent; 1678 entry = iter->ent;
2125 1679
2126 if (entry->type == TRACE_CONT) 1680 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2127 return TRACE_TYPE_HANDLED; 1681 if (!trace_seq_printf(s, "%d %d %llu ",
2128 1682 entry->pid, iter->cpu, iter->ts))
2129 ret = trace_seq_printf(s, "%d %d %llu ", 1683 goto partial;
2130 entry->pid, iter->cpu, iter->ts);
2131 if (!ret)
2132 return TRACE_TYPE_PARTIAL_LINE;
2133
2134 switch (entry->type) {
2135 case TRACE_FN: {
2136 struct ftrace_entry *field;
2137
2138 trace_assign_type(field, entry);
2139
2140 ret = trace_seq_printf(s, "%x %x\n",
2141 field->ip,
2142 field->parent_ip);
2143 if (!ret)
2144 return TRACE_TYPE_PARTIAL_LINE;
2145 break;
2146 }
2147 case TRACE_CTX:
2148 case TRACE_WAKE: {
2149 struct ctx_switch_entry *field;
2150
2151 trace_assign_type(field, entry);
2152
2153 T = task_state_char(field->next_state);
2154 S = entry->type == TRACE_WAKE ? '+' :
2155 task_state_char(field->prev_state);
2156 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
2157 field->prev_pid,
2158 field->prev_prio,
2159 S,
2160 field->next_cpu,
2161 field->next_pid,
2162 field->next_prio,
2163 T);
2164 if (!ret)
2165 return TRACE_TYPE_PARTIAL_LINE;
2166 break;
2167 } 1684 }
2168 case TRACE_SPECIAL:
2169 case TRACE_USER_STACK:
2170 case TRACE_STACK: {
2171 struct special_entry *field;
2172 1685
2173 trace_assign_type(field, entry); 1686 event = ftrace_find_event(entry->type);
1687 if (event)
1688 return event->raw(iter, 0);
2174 1689
2175 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1690 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2176 field->arg1, 1691 goto partial;
2177 field->arg2,
2178 field->arg3);
2179 if (!ret)
2180 return TRACE_TYPE_PARTIAL_LINE;
2181 break;
2182 }
2183 case TRACE_PRINT: {
2184 struct print_entry *field;
2185
2186 trace_assign_type(field, entry);
2187 1692
2188 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
2189 if (entry->flags & TRACE_FLAG_CONT)
2190 trace_seq_print_cont(s, iter);
2191 break;
2192 }
2193 }
2194 return TRACE_TYPE_HANDLED; 1693 return TRACE_TYPE_HANDLED;
1694partial:
1695 return TRACE_TYPE_PARTIAL_LINE;
2195} 1696}
2196 1697
2197#define SEQ_PUT_FIELD_RET(s, x) \
2198do { \
2199 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
2200 return 0; \
2201} while (0)
2202
2203#define SEQ_PUT_HEX_FIELD_RET(s, x) \
2204do { \
2205 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
2206 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
2207 return 0; \
2208} while (0)
2209
2210static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 1698static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2211{ 1699{
2212 struct trace_seq *s = &iter->seq; 1700 struct trace_seq *s = &iter->seq;
2213 unsigned char newline = '\n'; 1701 unsigned char newline = '\n';
2214 struct trace_entry *entry; 1702 struct trace_entry *entry;
2215 int S, T; 1703 struct trace_event *event;
2216 1704
2217 entry = iter->ent; 1705 entry = iter->ent;
2218 1706
2219 if (entry->type == TRACE_CONT) 1707 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2220 return TRACE_TYPE_HANDLED; 1708 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2221 1709 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2222 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 1710 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2223 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2224 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2225
2226 switch (entry->type) {
2227 case TRACE_FN: {
2228 struct ftrace_entry *field;
2229
2230 trace_assign_type(field, entry);
2231
2232 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
2233 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
2234 break;
2235 }
2236 case TRACE_CTX:
2237 case TRACE_WAKE: {
2238 struct ctx_switch_entry *field;
2239
2240 trace_assign_type(field, entry);
2241
2242 T = task_state_char(field->next_state);
2243 S = entry->type == TRACE_WAKE ? '+' :
2244 task_state_char(field->prev_state);
2245 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
2246 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
2247 SEQ_PUT_HEX_FIELD_RET(s, S);
2248 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
2249 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
2250 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
2251 SEQ_PUT_HEX_FIELD_RET(s, T);
2252 break;
2253 } 1711 }
2254 case TRACE_SPECIAL:
2255 case TRACE_USER_STACK:
2256 case TRACE_STACK: {
2257 struct special_entry *field;
2258 1712
2259 trace_assign_type(field, entry); 1713 event = ftrace_find_event(entry->type);
2260 1714 if (event) {
2261 SEQ_PUT_HEX_FIELD_RET(s, field->arg1); 1715 enum print_line_t ret = event->hex(iter, 0);
2262 SEQ_PUT_HEX_FIELD_RET(s, field->arg2); 1716 if (ret != TRACE_TYPE_HANDLED)
2263 SEQ_PUT_HEX_FIELD_RET(s, field->arg3); 1717 return ret;
2264 break;
2265 }
2266 } 1718 }
2267 SEQ_PUT_FIELD_RET(s, newline);
2268
2269 return TRACE_TYPE_HANDLED;
2270}
2271
2272static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
2273{
2274 struct trace_seq *s = &iter->seq;
2275 struct trace_entry *entry = iter->ent;
2276 struct print_entry *field;
2277 int ret;
2278
2279 trace_assign_type(field, entry);
2280
2281 ret = trace_seq_printf(s, field->buf);
2282 if (!ret)
2283 return TRACE_TYPE_PARTIAL_LINE;
2284 1719
2285 if (entry->flags & TRACE_FLAG_CONT) 1720 SEQ_PUT_FIELD_RET(s, newline);
2286 trace_seq_print_cont(s, iter);
2287 1721
2288 return TRACE_TYPE_HANDLED; 1722 return TRACE_TYPE_HANDLED;
2289} 1723}
@@ -2292,59 +1726,37 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2292{ 1726{
2293 struct trace_seq *s = &iter->seq; 1727 struct trace_seq *s = &iter->seq;
2294 struct trace_entry *entry; 1728 struct trace_entry *entry;
1729 struct trace_event *event;
2295 1730
2296 entry = iter->ent; 1731 entry = iter->ent;
2297 1732
2298 if (entry->type == TRACE_CONT) 1733 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2299 return TRACE_TYPE_HANDLED; 1734 SEQ_PUT_FIELD_RET(s, entry->pid);
2300 1735 SEQ_PUT_FIELD_RET(s, iter->cpu);
2301 SEQ_PUT_FIELD_RET(s, entry->pid); 1736 SEQ_PUT_FIELD_RET(s, iter->ts);
2302 SEQ_PUT_FIELD_RET(s, entry->cpu);
2303 SEQ_PUT_FIELD_RET(s, iter->ts);
2304
2305 switch (entry->type) {
2306 case TRACE_FN: {
2307 struct ftrace_entry *field;
2308
2309 trace_assign_type(field, entry);
2310
2311 SEQ_PUT_FIELD_RET(s, field->ip);
2312 SEQ_PUT_FIELD_RET(s, field->parent_ip);
2313 break;
2314 }
2315 case TRACE_CTX: {
2316 struct ctx_switch_entry *field;
2317
2318 trace_assign_type(field, entry);
2319
2320 SEQ_PUT_FIELD_RET(s, field->prev_pid);
2321 SEQ_PUT_FIELD_RET(s, field->prev_prio);
2322 SEQ_PUT_FIELD_RET(s, field->prev_state);
2323 SEQ_PUT_FIELD_RET(s, field->next_pid);
2324 SEQ_PUT_FIELD_RET(s, field->next_prio);
2325 SEQ_PUT_FIELD_RET(s, field->next_state);
2326 break;
2327 } 1737 }
2328 case TRACE_SPECIAL:
2329 case TRACE_USER_STACK:
2330 case TRACE_STACK: {
2331 struct special_entry *field;
2332 1738
2333 trace_assign_type(field, entry); 1739 event = ftrace_find_event(entry->type);
2334 1740 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
2335 SEQ_PUT_FIELD_RET(s, field->arg1);
2336 SEQ_PUT_FIELD_RET(s, field->arg2);
2337 SEQ_PUT_FIELD_RET(s, field->arg3);
2338 break;
2339 }
2340 }
2341 return 1;
2342} 1741}
2343 1742
2344static int trace_empty(struct trace_iterator *iter) 1743static int trace_empty(struct trace_iterator *iter)
2345{ 1744{
2346 int cpu; 1745 int cpu;
2347 1746
1747 /* If we are looking at one CPU buffer, only check that one */
1748 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
1749 cpu = iter->cpu_file;
1750 if (iter->buffer_iter[cpu]) {
1751 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1752 return 0;
1753 } else {
1754 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1755 return 0;
1756 }
1757 return 1;
1758 }
1759
2348 for_each_tracing_cpu(cpu) { 1760 for_each_tracing_cpu(cpu) {
2349 if (iter->buffer_iter[cpu]) { 1761 if (iter->buffer_iter[cpu]) {
2350 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) 1762 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
@@ -2368,10 +1780,15 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2368 return ret; 1780 return ret;
2369 } 1781 }
2370 1782
1783 if (iter->ent->type == TRACE_BPRINT &&
1784 trace_flags & TRACE_ITER_PRINTK &&
1785 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
1786 return trace_print_bprintk_msg_only(iter);
1787
2371 if (iter->ent->type == TRACE_PRINT && 1788 if (iter->ent->type == TRACE_PRINT &&
2372 trace_flags & TRACE_ITER_PRINTK && 1789 trace_flags & TRACE_ITER_PRINTK &&
2373 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 1790 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2374 return print_printk_msg_only(iter); 1791 return trace_print_printk_msg_only(iter);
2375 1792
2376 if (trace_flags & TRACE_ITER_BIN) 1793 if (trace_flags & TRACE_ITER_BIN)
2377 return print_bin_fmt(iter); 1794 return print_bin_fmt(iter);
@@ -2382,9 +1799,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2382 if (trace_flags & TRACE_ITER_RAW) 1799 if (trace_flags & TRACE_ITER_RAW)
2383 return print_raw_fmt(iter); 1800 return print_raw_fmt(iter);
2384 1801
2385 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2386 return print_lat_fmt(iter, iter->idx, iter->cpu);
2387
2388 return print_trace_fmt(iter); 1802 return print_trace_fmt(iter);
2389} 1803}
2390 1804
@@ -2426,30 +1840,40 @@ static struct seq_operations tracer_seq_ops = {
2426}; 1840};
2427 1841
2428static struct trace_iterator * 1842static struct trace_iterator *
2429__tracing_open(struct inode *inode, struct file *file, int *ret) 1843__tracing_open(struct inode *inode, struct file *file)
2430{ 1844{
1845 long cpu_file = (long) inode->i_private;
1846 void *fail_ret = ERR_PTR(-ENOMEM);
2431 struct trace_iterator *iter; 1847 struct trace_iterator *iter;
2432 struct seq_file *m; 1848 struct seq_file *m;
2433 int cpu; 1849 int cpu, ret;
2434 1850
2435 if (tracing_disabled) { 1851 if (tracing_disabled)
2436 *ret = -ENODEV; 1852 return ERR_PTR(-ENODEV);
2437 return NULL;
2438 }
2439 1853
2440 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 1854 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2441 if (!iter) { 1855 if (!iter)
2442 *ret = -ENOMEM; 1856 return ERR_PTR(-ENOMEM);
2443 goto out;
2444 }
2445 1857
1858 /*
1859 * We make a copy of the current tracer to avoid concurrent
1860 * changes on it while we are reading.
1861 */
2446 mutex_lock(&trace_types_lock); 1862 mutex_lock(&trace_types_lock);
1863 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
1864 if (!iter->trace)
1865 goto fail;
1866
1867 if (current_trace)
1868 *iter->trace = *current_trace;
1869
2447 if (current_trace && current_trace->print_max) 1870 if (current_trace && current_trace->print_max)
2448 iter->tr = &max_tr; 1871 iter->tr = &max_tr;
2449 else 1872 else
2450 iter->tr = inode->i_private; 1873 iter->tr = &global_trace;
2451 iter->trace = current_trace;
2452 iter->pos = -1; 1874 iter->pos = -1;
1875 mutex_init(&iter->mutex);
1876 iter->cpu_file = cpu_file;
2453 1877
2454 /* Notify the tracer early; before we stop tracing. */ 1878 /* Notify the tracer early; before we stop tracing. */
2455 if (iter->trace && iter->trace->open) 1879 if (iter->trace && iter->trace->open)
@@ -2459,20 +1883,24 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2459 if (ring_buffer_overruns(iter->tr->buffer)) 1883 if (ring_buffer_overruns(iter->tr->buffer))
2460 iter->iter_flags |= TRACE_FILE_ANNOTATE; 1884 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2461 1885
1886 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
1887 for_each_tracing_cpu(cpu) {
2462 1888
2463 for_each_tracing_cpu(cpu) { 1889 iter->buffer_iter[cpu] =
2464 1890 ring_buffer_read_start(iter->tr->buffer, cpu);
1891 }
1892 } else {
1893 cpu = iter->cpu_file;
2465 iter->buffer_iter[cpu] = 1894 iter->buffer_iter[cpu] =
2466 ring_buffer_read_start(iter->tr->buffer, cpu); 1895 ring_buffer_read_start(iter->tr->buffer, cpu);
2467
2468 if (!iter->buffer_iter[cpu])
2469 goto fail_buffer;
2470 } 1896 }
2471 1897
2472 /* TODO stop tracer */ 1898 /* TODO stop tracer */
2473 *ret = seq_open(file, &tracer_seq_ops); 1899 ret = seq_open(file, &tracer_seq_ops);
2474 if (*ret) 1900 if (ret < 0) {
1901 fail_ret = ERR_PTR(ret);
2475 goto fail_buffer; 1902 goto fail_buffer;
1903 }
2476 1904
2477 m = file->private_data; 1905 m = file->private_data;
2478 m->private = iter; 1906 m->private = iter;
@@ -2482,7 +1910,6 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2482 1910
2483 mutex_unlock(&trace_types_lock); 1911 mutex_unlock(&trace_types_lock);
2484 1912
2485 out:
2486 return iter; 1913 return iter;
2487 1914
2488 fail_buffer: 1915 fail_buffer:
@@ -2490,10 +1917,12 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2490 if (iter->buffer_iter[cpu]) 1917 if (iter->buffer_iter[cpu])
2491 ring_buffer_read_finish(iter->buffer_iter[cpu]); 1918 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2492 } 1919 }
1920 fail:
2493 mutex_unlock(&trace_types_lock); 1921 mutex_unlock(&trace_types_lock);
1922 kfree(iter->trace);
2494 kfree(iter); 1923 kfree(iter);
2495 1924
2496 return ERR_PTR(-ENOMEM); 1925 return fail_ret;
2497} 1926}
2498 1927
2499int tracing_open_generic(struct inode *inode, struct file *filp) 1928int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -2505,12 +1934,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
2505 return 0; 1934 return 0;
2506} 1935}
2507 1936
2508int tracing_release(struct inode *inode, struct file *file) 1937static int tracing_release(struct inode *inode, struct file *file)
2509{ 1938{
2510 struct seq_file *m = (struct seq_file *)file->private_data; 1939 struct seq_file *m = (struct seq_file *)file->private_data;
2511 struct trace_iterator *iter = m->private; 1940 struct trace_iterator *iter;
2512 int cpu; 1941 int cpu;
2513 1942
1943 if (!(file->f_mode & FMODE_READ))
1944 return 0;
1945
1946 iter = m->private;
1947
2514 mutex_lock(&trace_types_lock); 1948 mutex_lock(&trace_types_lock);
2515 for_each_tracing_cpu(cpu) { 1949 for_each_tracing_cpu(cpu) {
2516 if (iter->buffer_iter[cpu]) 1950 if (iter->buffer_iter[cpu])
@@ -2525,33 +1959,38 @@ int tracing_release(struct inode *inode, struct file *file)
2525 mutex_unlock(&trace_types_lock); 1959 mutex_unlock(&trace_types_lock);
2526 1960
2527 seq_release(inode, file); 1961 seq_release(inode, file);
1962 mutex_destroy(&iter->mutex);
1963 kfree(iter->trace);
2528 kfree(iter); 1964 kfree(iter);
2529 return 0; 1965 return 0;
2530} 1966}
2531 1967
2532static int tracing_open(struct inode *inode, struct file *file) 1968static int tracing_open(struct inode *inode, struct file *file)
2533{ 1969{
2534 int ret;
2535
2536 __tracing_open(inode, file, &ret);
2537
2538 return ret;
2539}
2540
2541static int tracing_lt_open(struct inode *inode, struct file *file)
2542{
2543 struct trace_iterator *iter; 1970 struct trace_iterator *iter;
2544 int ret; 1971 int ret = 0;
2545 1972
2546 iter = __tracing_open(inode, file, &ret); 1973 /* If this file was open for write, then erase contents */
1974 if ((file->f_mode & FMODE_WRITE) &&
1975 !(file->f_flags & O_APPEND)) {
1976 long cpu = (long) inode->i_private;
2547 1977
2548 if (!ret) 1978 if (cpu == TRACE_PIPE_ALL_CPU)
2549 iter->iter_flags |= TRACE_FILE_LAT_FMT; 1979 tracing_reset_online_cpus(&global_trace);
1980 else
1981 tracing_reset(&global_trace, cpu);
1982 }
2550 1983
1984 if (file->f_mode & FMODE_READ) {
1985 iter = __tracing_open(inode, file);
1986 if (IS_ERR(iter))
1987 ret = PTR_ERR(iter);
1988 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
1989 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1990 }
2551 return ret; 1991 return ret;
2552} 1992}
2553 1993
2554
2555static void * 1994static void *
2556t_next(struct seq_file *m, void *v, loff_t *pos) 1995t_next(struct seq_file *m, void *v, loff_t *pos)
2557{ 1996{
@@ -2623,21 +2062,22 @@ static int show_traces_open(struct inode *inode, struct file *file)
2623 return ret; 2062 return ret;
2624} 2063}
2625 2064
2626static struct file_operations tracing_fops = { 2065static ssize_t
2627 .open = tracing_open, 2066tracing_write_stub(struct file *filp, const char __user *ubuf,
2628 .read = seq_read, 2067 size_t count, loff_t *ppos)
2629 .llseek = seq_lseek, 2068{
2630 .release = tracing_release, 2069 return count;
2631}; 2070}
2632 2071
2633static struct file_operations tracing_lt_fops = { 2072static const struct file_operations tracing_fops = {
2634 .open = tracing_lt_open, 2073 .open = tracing_open,
2635 .read = seq_read, 2074 .read = seq_read,
2075 .write = tracing_write_stub,
2636 .llseek = seq_lseek, 2076 .llseek = seq_lseek,
2637 .release = tracing_release, 2077 .release = tracing_release,
2638}; 2078};
2639 2079
2640static struct file_operations show_traces_fops = { 2080static const struct file_operations show_traces_fops = {
2641 .open = show_traces_open, 2081 .open = show_traces_open,
2642 .read = seq_read, 2082 .read = seq_read,
2643 .release = seq_release, 2083 .release = seq_release,
@@ -2730,7 +2170,7 @@ err_unlock:
2730 return err; 2170 return err;
2731} 2171}
2732 2172
2733static struct file_operations tracing_cpumask_fops = { 2173static const struct file_operations tracing_cpumask_fops = {
2734 .open = tracing_open_generic, 2174 .open = tracing_open_generic,
2735 .read = tracing_cpumask_read, 2175 .read = tracing_cpumask_read,
2736 .write = tracing_cpumask_write, 2176 .write = tracing_cpumask_write,
@@ -2740,57 +2180,62 @@ static ssize_t
2740tracing_trace_options_read(struct file *filp, char __user *ubuf, 2180tracing_trace_options_read(struct file *filp, char __user *ubuf,
2741 size_t cnt, loff_t *ppos) 2181 size_t cnt, loff_t *ppos)
2742{ 2182{
2743 int i; 2183 struct tracer_opt *trace_opts;
2184 u32 tracer_flags;
2185 int len = 0;
2744 char *buf; 2186 char *buf;
2745 int r = 0; 2187 int r = 0;
2746 int len = 0; 2188 int i;
2747 u32 tracer_flags = current_trace->flags->val;
2748 struct tracer_opt *trace_opts = current_trace->flags->opts;
2749 2189
2750 2190
2751 /* calulate max size */ 2191 /* calculate max size */
2752 for (i = 0; trace_options[i]; i++) { 2192 for (i = 0; trace_options[i]; i++) {
2753 len += strlen(trace_options[i]); 2193 len += strlen(trace_options[i]);
2754 len += 3; /* "no" and space */ 2194 len += 3; /* "no" and newline */
2755 } 2195 }
2756 2196
2197 mutex_lock(&trace_types_lock);
2198 tracer_flags = current_trace->flags->val;
2199 trace_opts = current_trace->flags->opts;
2200
2757 /* 2201 /*
2758 * Increase the size with names of options specific 2202 * Increase the size with names of options specific
2759 * of the current tracer. 2203 * of the current tracer.
2760 */ 2204 */
2761 for (i = 0; trace_opts[i].name; i++) { 2205 for (i = 0; trace_opts[i].name; i++) {
2762 len += strlen(trace_opts[i].name); 2206 len += strlen(trace_opts[i].name);
2763 len += 3; /* "no" and space */ 2207 len += 3; /* "no" and newline */
2764 } 2208 }
2765 2209
2766 /* +2 for \n and \0 */ 2210 /* +2 for \n and \0 */
2767 buf = kmalloc(len + 2, GFP_KERNEL); 2211 buf = kmalloc(len + 2, GFP_KERNEL);
2768 if (!buf) 2212 if (!buf) {
2213 mutex_unlock(&trace_types_lock);
2769 return -ENOMEM; 2214 return -ENOMEM;
2215 }
2770 2216
2771 for (i = 0; trace_options[i]; i++) { 2217 for (i = 0; trace_options[i]; i++) {
2772 if (trace_flags & (1 << i)) 2218 if (trace_flags & (1 << i))
2773 r += sprintf(buf + r, "%s ", trace_options[i]); 2219 r += sprintf(buf + r, "%s\n", trace_options[i]);
2774 else 2220 else
2775 r += sprintf(buf + r, "no%s ", trace_options[i]); 2221 r += sprintf(buf + r, "no%s\n", trace_options[i]);
2776 } 2222 }
2777 2223
2778 for (i = 0; trace_opts[i].name; i++) { 2224 for (i = 0; trace_opts[i].name; i++) {
2779 if (tracer_flags & trace_opts[i].bit) 2225 if (tracer_flags & trace_opts[i].bit)
2780 r += sprintf(buf + r, "%s ", 2226 r += sprintf(buf + r, "%s\n",
2781 trace_opts[i].name); 2227 trace_opts[i].name);
2782 else 2228 else
2783 r += sprintf(buf + r, "no%s ", 2229 r += sprintf(buf + r, "no%s\n",
2784 trace_opts[i].name); 2230 trace_opts[i].name);
2785 } 2231 }
2232 mutex_unlock(&trace_types_lock);
2786 2233
2787 r += sprintf(buf + r, "\n");
2788 WARN_ON(r >= len + 2); 2234 WARN_ON(r >= len + 2);
2789 2235
2790 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2236 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2791 2237
2792 kfree(buf); 2238 kfree(buf);
2793
2794 return r; 2239 return r;
2795} 2240}
2796 2241
@@ -2828,6 +2273,34 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2828 return 0; 2273 return 0;
2829} 2274}
2830 2275
2276static void set_tracer_flags(unsigned int mask, int enabled)
2277{
2278 /* do nothing if flag is already set */
2279 if (!!(trace_flags & mask) == !!enabled)
2280 return;
2281
2282 if (enabled)
2283 trace_flags |= mask;
2284 else
2285 trace_flags &= ~mask;
2286
2287 if (mask == TRACE_ITER_GLOBAL_CLK) {
2288 u64 (*func)(void);
2289
2290 if (enabled)
2291 func = trace_clock_global;
2292 else
2293 func = trace_clock_local;
2294
2295 mutex_lock(&trace_types_lock);
2296 ring_buffer_set_clock(global_trace.buffer, func);
2297
2298 if (max_tr.buffer)
2299 ring_buffer_set_clock(max_tr.buffer, func);
2300 mutex_unlock(&trace_types_lock);
2301 }
2302}
2303
2831static ssize_t 2304static ssize_t
2832tracing_trace_options_write(struct file *filp, const char __user *ubuf, 2305tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2833 size_t cnt, loff_t *ppos) 2306 size_t cnt, loff_t *ppos)
@@ -2855,17 +2328,16 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2855 int len = strlen(trace_options[i]); 2328 int len = strlen(trace_options[i]);
2856 2329
2857 if (strncmp(cmp, trace_options[i], len) == 0) { 2330 if (strncmp(cmp, trace_options[i], len) == 0) {
2858 if (neg) 2331 set_tracer_flags(1 << i, !neg);
2859 trace_flags &= ~(1 << i);
2860 else
2861 trace_flags |= (1 << i);
2862 break; 2332 break;
2863 } 2333 }
2864 } 2334 }
2865 2335
2866 /* If no option could be set, test the specific tracer options */ 2336 /* If no option could be set, test the specific tracer options */
2867 if (!trace_options[i]) { 2337 if (!trace_options[i]) {
2338 mutex_lock(&trace_types_lock);
2868 ret = set_tracer_option(current_trace, cmp, neg); 2339 ret = set_tracer_option(current_trace, cmp, neg);
2340 mutex_unlock(&trace_types_lock);
2869 if (ret) 2341 if (ret)
2870 return ret; 2342 return ret;
2871 } 2343 }
@@ -2875,7 +2347,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2875 return cnt; 2347 return cnt;
2876} 2348}
2877 2349
2878static struct file_operations tracing_iter_fops = { 2350static const struct file_operations tracing_iter_fops = {
2879 .open = tracing_open_generic, 2351 .open = tracing_open_generic,
2880 .read = tracing_trace_options_read, 2352 .read = tracing_trace_options_read,
2881 .write = tracing_trace_options_write, 2353 .write = tracing_trace_options_write,
@@ -2908,7 +2380,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
2908 readme_msg, strlen(readme_msg)); 2380 readme_msg, strlen(readme_msg));
2909} 2381}
2910 2382
2911static struct file_operations tracing_readme_fops = { 2383static const struct file_operations tracing_readme_fops = {
2912 .open = tracing_open_generic, 2384 .open = tracing_open_generic,
2913 .read = tracing_readme_read, 2385 .read = tracing_readme_read,
2914}; 2386};
@@ -2930,7 +2402,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2930{ 2402{
2931 struct trace_array *tr = filp->private_data; 2403 struct trace_array *tr = filp->private_data;
2932 char buf[64]; 2404 char buf[64];
2933 long val; 2405 unsigned long val;
2934 int ret; 2406 int ret;
2935 2407
2936 if (cnt >= sizeof(buf)) 2408 if (cnt >= sizeof(buf))
@@ -2985,13 +2457,105 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
2985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2457 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2986} 2458}
2987 2459
2988static int tracing_set_tracer(char *buf) 2460int tracer_init(struct tracer *t, struct trace_array *tr)
2461{
2462 tracing_reset_online_cpus(tr);
2463 return t->init(tr);
2464}
2465
2466static int tracing_resize_ring_buffer(unsigned long size)
2467{
2468 int ret;
2469
2470 /*
2471 * If kernel or user changes the size of the ring buffer
2472 * we use the size that was given, and we can forget about
2473 * expanding it later.
2474 */
2475 ring_buffer_expanded = 1;
2476
2477 ret = ring_buffer_resize(global_trace.buffer, size);
2478 if (ret < 0)
2479 return ret;
2480
2481 ret = ring_buffer_resize(max_tr.buffer, size);
2482 if (ret < 0) {
2483 int r;
2484
2485 r = ring_buffer_resize(global_trace.buffer,
2486 global_trace.entries);
2487 if (r < 0) {
2488 /*
2489 * AARGH! We are left with different
2490 * size max buffer!!!!
2491 * The max buffer is our "snapshot" buffer.
2492 * When a tracer needs a snapshot (one of the
2493 * latency tracers), it swaps the max buffer
2494 * with the saved snap shot. We succeeded to
2495 * update the size of the main buffer, but failed to
2496 * update the size of the max buffer. But when we tried
2497 * to reset the main buffer to the original size, we
2498 * failed there too. This is very unlikely to
2499 * happen, but if it does, warn and kill all
2500 * tracing.
2501 */
2502 WARN_ON(1);
2503 tracing_disabled = 1;
2504 }
2505 return ret;
2506 }
2507
2508 global_trace.entries = size;
2509
2510 return ret;
2511}
2512
2513/**
2514 * tracing_update_buffers - used by tracing facility to expand ring buffers
2515 *
2516 * To save on memory when the tracing is never used on a system with it
2517 * configured in. The ring buffers are set to a minimum size. But once
2518 * a user starts to use the tracing facility, then they need to grow
2519 * to their default size.
2520 *
2521 * This function is to be called when a tracer is about to be used.
2522 */
2523int tracing_update_buffers(void)
2524{
2525 int ret = 0;
2526
2527 mutex_lock(&trace_types_lock);
2528 if (!ring_buffer_expanded)
2529 ret = tracing_resize_ring_buffer(trace_buf_size);
2530 mutex_unlock(&trace_types_lock);
2531
2532 return ret;
2533}
2534
2535struct trace_option_dentry;
2536
2537static struct trace_option_dentry *
2538create_trace_option_files(struct tracer *tracer);
2539
2540static void
2541destroy_trace_option_files(struct trace_option_dentry *topts);
2542
2543static int tracing_set_tracer(const char *buf)
2989{ 2544{
2545 static struct trace_option_dentry *topts;
2990 struct trace_array *tr = &global_trace; 2546 struct trace_array *tr = &global_trace;
2991 struct tracer *t; 2547 struct tracer *t;
2992 int ret = 0; 2548 int ret = 0;
2993 2549
2994 mutex_lock(&trace_types_lock); 2550 mutex_lock(&trace_types_lock);
2551
2552 if (!ring_buffer_expanded) {
2553 ret = tracing_resize_ring_buffer(trace_buf_size);
2554 if (ret < 0)
2555 goto out;
2556 ret = 0;
2557 }
2558
2995 for (t = trace_types; t; t = t->next) { 2559 for (t = trace_types; t; t = t->next) {
2996 if (strcmp(t->name, buf) == 0) 2560 if (strcmp(t->name, buf) == 0)
2997 break; 2561 break;
@@ -3007,9 +2571,14 @@ static int tracing_set_tracer(char *buf)
3007 if (current_trace && current_trace->reset) 2571 if (current_trace && current_trace->reset)
3008 current_trace->reset(tr); 2572 current_trace->reset(tr);
3009 2573
2574 destroy_trace_option_files(topts);
2575
3010 current_trace = t; 2576 current_trace = t;
2577
2578 topts = create_trace_option_files(current_trace);
2579
3011 if (t->init) { 2580 if (t->init) {
3012 ret = t->init(tr); 2581 ret = tracer_init(t, tr);
3013 if (ret) 2582 if (ret)
3014 goto out; 2583 goto out;
3015 } 2584 }
@@ -3072,9 +2641,9 @@ static ssize_t
3072tracing_max_lat_write(struct file *filp, const char __user *ubuf, 2641tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3073 size_t cnt, loff_t *ppos) 2642 size_t cnt, loff_t *ppos)
3074{ 2643{
3075 long *ptr = filp->private_data; 2644 unsigned long *ptr = filp->private_data;
3076 char buf[64]; 2645 char buf[64];
3077 long val; 2646 unsigned long val;
3078 int ret; 2647 int ret;
3079 2648
3080 if (cnt >= sizeof(buf)) 2649 if (cnt >= sizeof(buf))
@@ -3094,54 +2663,96 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3094 return cnt; 2663 return cnt;
3095} 2664}
3096 2665
3097static atomic_t tracing_reader;
3098
3099static int tracing_open_pipe(struct inode *inode, struct file *filp) 2666static int tracing_open_pipe(struct inode *inode, struct file *filp)
3100{ 2667{
2668 long cpu_file = (long) inode->i_private;
3101 struct trace_iterator *iter; 2669 struct trace_iterator *iter;
2670 int ret = 0;
3102 2671
3103 if (tracing_disabled) 2672 if (tracing_disabled)
3104 return -ENODEV; 2673 return -ENODEV;
3105 2674
3106 /* We only allow for reader of the pipe */ 2675 mutex_lock(&trace_types_lock);
3107 if (atomic_inc_return(&tracing_reader) != 1) { 2676
3108 atomic_dec(&tracing_reader); 2677 /* We only allow one reader per cpu */
3109 return -EBUSY; 2678 if (cpu_file == TRACE_PIPE_ALL_CPU) {
2679 if (!cpumask_empty(tracing_reader_cpumask)) {
2680 ret = -EBUSY;
2681 goto out;
2682 }
2683 cpumask_setall(tracing_reader_cpumask);
2684 } else {
2685 if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
2686 cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
2687 else {
2688 ret = -EBUSY;
2689 goto out;
2690 }
3110 } 2691 }
3111 2692
3112 /* create a buffer to store the information to pass to userspace */ 2693 /* create a buffer to store the information to pass to userspace */
3113 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2694 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3114 if (!iter) 2695 if (!iter) {
3115 return -ENOMEM; 2696 ret = -ENOMEM;
2697 goto out;
2698 }
3116 2699
3117 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 2700 /*
3118 kfree(iter); 2701 * We make a copy of the current tracer to avoid concurrent
3119 return -ENOMEM; 2702 * changes on it while we are reading.
2703 */
2704 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
2705 if (!iter->trace) {
2706 ret = -ENOMEM;
2707 goto fail;
3120 } 2708 }
2709 if (current_trace)
2710 *iter->trace = *current_trace;
3121 2711
3122 mutex_lock(&trace_types_lock); 2712 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
2713 ret = -ENOMEM;
2714 goto fail;
2715 }
3123 2716
3124 /* trace pipe does not show start of buffer */ 2717 /* trace pipe does not show start of buffer */
3125 cpumask_setall(iter->started); 2718 cpumask_setall(iter->started);
3126 2719
2720 iter->cpu_file = cpu_file;
3127 iter->tr = &global_trace; 2721 iter->tr = &global_trace;
3128 iter->trace = current_trace; 2722 mutex_init(&iter->mutex);
3129 filp->private_data = iter; 2723 filp->private_data = iter;
3130 2724
3131 if (iter->trace->pipe_open) 2725 if (iter->trace->pipe_open)
3132 iter->trace->pipe_open(iter); 2726 iter->trace->pipe_open(iter);
2727
2728out:
3133 mutex_unlock(&trace_types_lock); 2729 mutex_unlock(&trace_types_lock);
2730 return ret;
3134 2731
3135 return 0; 2732fail:
2733 kfree(iter->trace);
2734 kfree(iter);
2735 mutex_unlock(&trace_types_lock);
2736 return ret;
3136} 2737}
3137 2738
3138static int tracing_release_pipe(struct inode *inode, struct file *file) 2739static int tracing_release_pipe(struct inode *inode, struct file *file)
3139{ 2740{
3140 struct trace_iterator *iter = file->private_data; 2741 struct trace_iterator *iter = file->private_data;
3141 2742
2743 mutex_lock(&trace_types_lock);
2744
2745 if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
2746 cpumask_clear(tracing_reader_cpumask);
2747 else
2748 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2749
2750 mutex_unlock(&trace_types_lock);
2751
3142 free_cpumask_var(iter->started); 2752 free_cpumask_var(iter->started);
2753 mutex_destroy(&iter->mutex);
2754 kfree(iter->trace);
3143 kfree(iter); 2755 kfree(iter);
3144 atomic_dec(&tracing_reader);
3145 2756
3146 return 0; 2757 return 0;
3147} 2758}
@@ -3167,67 +2778,57 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3167 } 2778 }
3168} 2779}
3169 2780
3170/* 2781
3171 * Consumer reader. 2782void default_wait_pipe(struct trace_iterator *iter)
3172 */
3173static ssize_t
3174tracing_read_pipe(struct file *filp, char __user *ubuf,
3175 size_t cnt, loff_t *ppos)
3176{ 2783{
3177 struct trace_iterator *iter = filp->private_data; 2784 DEFINE_WAIT(wait);
3178 ssize_t sret;
3179 2785
3180 /* return any leftover data */ 2786 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3181 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3182 if (sret != -EBUSY)
3183 return sret;
3184 2787
3185 trace_seq_reset(&iter->seq); 2788 if (trace_empty(iter))
2789 schedule();
3186 2790
3187 mutex_lock(&trace_types_lock); 2791 finish_wait(&trace_wait, &wait);
3188 if (iter->trace->read) { 2792}
3189 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 2793
3190 if (sret) 2794/*
3191 goto out; 2795 * This is a make-shift waitqueue.
3192 } 2796 * A tracer might use this callback on some rare cases:
2797 *
2798 * 1) the current tracer might hold the runqueue lock when it wakes up
2799 * a reader, hence a deadlock (sched, function, and function graph tracers)
2800 * 2) the function tracers, trace all functions, we don't want
2801 * the overhead of calling wake_up and friends
2802 * (and tracing them too)
2803 *
2804 * Anyway, this is really very primitive wakeup.
2805 */
2806void poll_wait_pipe(struct trace_iterator *iter)
2807{
2808 set_current_state(TASK_INTERRUPTIBLE);
2809 /* sleep for 100 msecs, and try again. */
2810 schedule_timeout(HZ / 10);
2811}
2812
2813/* Must be called with trace_types_lock mutex held. */
2814static int tracing_wait_pipe(struct file *filp)
2815{
2816 struct trace_iterator *iter = filp->private_data;
3193 2817
3194waitagain:
3195 sret = 0;
3196 while (trace_empty(iter)) { 2818 while (trace_empty(iter)) {
3197 2819
3198 if ((filp->f_flags & O_NONBLOCK)) { 2820 if ((filp->f_flags & O_NONBLOCK)) {
3199 sret = -EAGAIN; 2821 return -EAGAIN;
3200 goto out;
3201 } 2822 }
3202 2823
3203 /* 2824 mutex_unlock(&iter->mutex);
3204 * This is a make-shift waitqueue. The reason we don't use
3205 * an actual wait queue is because:
3206 * 1) we only ever have one waiter
3207 * 2) the tracing, traces all functions, we don't want
3208 * the overhead of calling wake_up and friends
3209 * (and tracing them too)
3210 * Anyway, this is really very primitive wakeup.
3211 */
3212 set_current_state(TASK_INTERRUPTIBLE);
3213 iter->tr->waiter = current;
3214
3215 mutex_unlock(&trace_types_lock);
3216
3217 /* sleep for 100 msecs, and try again. */
3218 schedule_timeout(HZ/10);
3219
3220 mutex_lock(&trace_types_lock);
3221 2825
3222 iter->tr->waiter = NULL; 2826 iter->trace->wait_pipe(iter);
3223 2827
3224 if (signal_pending(current)) { 2828 mutex_lock(&iter->mutex);
3225 sret = -EINTR;
3226 goto out;
3227 }
3228 2829
3229 if (iter->trace != current_trace) 2830 if (signal_pending(current))
3230 goto out; 2831 return -EINTR;
3231 2832
3232 /* 2833 /*
3233 * We block until we read something and tracing is disabled. 2834 * We block until we read something and tracing is disabled.
@@ -3240,13 +2841,59 @@ waitagain:
3240 */ 2841 */
3241 if (!tracer_enabled && iter->pos) 2842 if (!tracer_enabled && iter->pos)
3242 break; 2843 break;
2844 }
2845
2846 return 1;
2847}
2848
2849/*
2850 * Consumer reader.
2851 */
2852static ssize_t
2853tracing_read_pipe(struct file *filp, char __user *ubuf,
2854 size_t cnt, loff_t *ppos)
2855{
2856 struct trace_iterator *iter = filp->private_data;
2857 static struct tracer *old_tracer;
2858 ssize_t sret;
3243 2859
3244 continue; 2860 /* return any leftover data */
2861 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2862 if (sret != -EBUSY)
2863 return sret;
2864
2865 trace_seq_init(&iter->seq);
2866
2867 /* copy the tracer to avoid using a global lock all around */
2868 mutex_lock(&trace_types_lock);
2869 if (unlikely(old_tracer != current_trace && current_trace)) {
2870 old_tracer = current_trace;
2871 *iter->trace = *current_trace;
2872 }
2873 mutex_unlock(&trace_types_lock);
2874
2875 /*
2876 * Avoid more than one consumer on a single file descriptor
2877 * This is just a matter of traces coherency, the ring buffer itself
2878 * is protected.
2879 */
2880 mutex_lock(&iter->mutex);
2881 if (iter->trace->read) {
2882 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2883 if (sret)
2884 goto out;
3245 } 2885 }
3246 2886
2887waitagain:
2888 sret = tracing_wait_pipe(filp);
2889 if (sret <= 0)
2890 goto out;
2891
3247 /* stop when tracing is finished */ 2892 /* stop when tracing is finished */
3248 if (trace_empty(iter)) 2893 if (trace_empty(iter)) {
2894 sret = 0;
3249 goto out; 2895 goto out;
2896 }
3250 2897
3251 if (cnt >= PAGE_SIZE) 2898 if (cnt >= PAGE_SIZE)
3252 cnt = PAGE_SIZE - 1; 2899 cnt = PAGE_SIZE - 1;
@@ -3267,8 +2914,8 @@ waitagain:
3267 iter->seq.len = len; 2914 iter->seq.len = len;
3268 break; 2915 break;
3269 } 2916 }
3270 2917 if (ret != TRACE_TYPE_NO_CONSUME)
3271 trace_consume(iter); 2918 trace_consume(iter);
3272 2919
3273 if (iter->seq.len >= cnt) 2920 if (iter->seq.len >= cnt)
3274 break; 2921 break;
@@ -3277,7 +2924,7 @@ waitagain:
3277 /* Now copy what we have to the user */ 2924 /* Now copy what we have to the user */
3278 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2925 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3279 if (iter->seq.readpos >= iter->seq.len) 2926 if (iter->seq.readpos >= iter->seq.len)
3280 trace_seq_reset(&iter->seq); 2927 trace_seq_init(&iter->seq);
3281 2928
3282 /* 2929 /*
3283 * If there was nothing to send to user, inspite of consuming trace 2930 * If there was nothing to send to user, inspite of consuming trace
@@ -3287,20 +2934,165 @@ waitagain:
3287 goto waitagain; 2934 goto waitagain;
3288 2935
3289out: 2936out:
3290 mutex_unlock(&trace_types_lock); 2937 mutex_unlock(&iter->mutex);
3291 2938
3292 return sret; 2939 return sret;
3293} 2940}
3294 2941
2942static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
2943 struct pipe_buffer *buf)
2944{
2945 __free_page(buf->page);
2946}
2947
2948static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
2949 unsigned int idx)
2950{
2951 __free_page(spd->pages[idx]);
2952}
2953
2954static struct pipe_buf_operations tracing_pipe_buf_ops = {
2955 .can_merge = 0,
2956 .map = generic_pipe_buf_map,
2957 .unmap = generic_pipe_buf_unmap,
2958 .confirm = generic_pipe_buf_confirm,
2959 .release = tracing_pipe_buf_release,
2960 .steal = generic_pipe_buf_steal,
2961 .get = generic_pipe_buf_get,
2962};
2963
2964static size_t
2965tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
2966{
2967 size_t count;
2968 int ret;
2969
2970 /* Seq buffer is page-sized, exactly what we need. */
2971 for (;;) {
2972 count = iter->seq.len;
2973 ret = print_trace_line(iter);
2974 count = iter->seq.len - count;
2975 if (rem < count) {
2976 rem = 0;
2977 iter->seq.len -= count;
2978 break;
2979 }
2980 if (ret == TRACE_TYPE_PARTIAL_LINE) {
2981 iter->seq.len -= count;
2982 break;
2983 }
2984
2985 trace_consume(iter);
2986 rem -= count;
2987 if (!find_next_entry_inc(iter)) {
2988 rem = 0;
2989 iter->ent = NULL;
2990 break;
2991 }
2992 }
2993
2994 return rem;
2995}
2996
2997static ssize_t tracing_splice_read_pipe(struct file *filp,
2998 loff_t *ppos,
2999 struct pipe_inode_info *pipe,
3000 size_t len,
3001 unsigned int flags)
3002{
3003 struct page *pages[PIPE_BUFFERS];
3004 struct partial_page partial[PIPE_BUFFERS];
3005 struct trace_iterator *iter = filp->private_data;
3006 struct splice_pipe_desc spd = {
3007 .pages = pages,
3008 .partial = partial,
3009 .nr_pages = 0, /* This gets updated below. */
3010 .flags = flags,
3011 .ops = &tracing_pipe_buf_ops,
3012 .spd_release = tracing_spd_release_pipe,
3013 };
3014 static struct tracer *old_tracer;
3015 ssize_t ret;
3016 size_t rem;
3017 unsigned int i;
3018
3019 /* copy the tracer to avoid using a global lock all around */
3020 mutex_lock(&trace_types_lock);
3021 if (unlikely(old_tracer != current_trace && current_trace)) {
3022 old_tracer = current_trace;
3023 *iter->trace = *current_trace;
3024 }
3025 mutex_unlock(&trace_types_lock);
3026
3027 mutex_lock(&iter->mutex);
3028
3029 if (iter->trace->splice_read) {
3030 ret = iter->trace->splice_read(iter, filp,
3031 ppos, pipe, len, flags);
3032 if (ret)
3033 goto out_err;
3034 }
3035
3036 ret = tracing_wait_pipe(filp);
3037 if (ret <= 0)
3038 goto out_err;
3039
3040 if (!iter->ent && !find_next_entry_inc(iter)) {
3041 ret = -EFAULT;
3042 goto out_err;
3043 }
3044
3045 /* Fill as many pages as possible. */
3046 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
3047 pages[i] = alloc_page(GFP_KERNEL);
3048 if (!pages[i])
3049 break;
3050
3051 rem = tracing_fill_pipe_page(rem, iter);
3052
3053 /* Copy the data into the page, so we can start over. */
3054 ret = trace_seq_to_buffer(&iter->seq,
3055 page_address(pages[i]),
3056 iter->seq.len);
3057 if (ret < 0) {
3058 __free_page(pages[i]);
3059 break;
3060 }
3061 partial[i].offset = 0;
3062 partial[i].len = iter->seq.len;
3063
3064 trace_seq_init(&iter->seq);
3065 }
3066
3067 mutex_unlock(&iter->mutex);
3068
3069 spd.nr_pages = i;
3070
3071 return splice_to_pipe(pipe, &spd);
3072
3073out_err:
3074 mutex_unlock(&iter->mutex);
3075
3076 return ret;
3077}
3078
3295static ssize_t 3079static ssize_t
3296tracing_entries_read(struct file *filp, char __user *ubuf, 3080tracing_entries_read(struct file *filp, char __user *ubuf,
3297 size_t cnt, loff_t *ppos) 3081 size_t cnt, loff_t *ppos)
3298{ 3082{
3299 struct trace_array *tr = filp->private_data; 3083 struct trace_array *tr = filp->private_data;
3300 char buf[64]; 3084 char buf[96];
3301 int r; 3085 int r;
3302 3086
3303 r = sprintf(buf, "%lu\n", tr->entries >> 10); 3087 mutex_lock(&trace_types_lock);
3088 if (!ring_buffer_expanded)
3089 r = sprintf(buf, "%lu (expanded: %lu)\n",
3090 tr->entries >> 10,
3091 trace_buf_size >> 10);
3092 else
3093 r = sprintf(buf, "%lu\n", tr->entries >> 10);
3094 mutex_unlock(&trace_types_lock);
3095
3304 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3096 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3305} 3097}
3306 3098
@@ -3344,28 +3136,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3344 val <<= 10; 3136 val <<= 10;
3345 3137
3346 if (val != global_trace.entries) { 3138 if (val != global_trace.entries) {
3347 ret = ring_buffer_resize(global_trace.buffer, val); 3139 ret = tracing_resize_ring_buffer(val);
3348 if (ret < 0) {
3349 cnt = ret;
3350 goto out;
3351 }
3352
3353 ret = ring_buffer_resize(max_tr.buffer, val);
3354 if (ret < 0) { 3140 if (ret < 0) {
3355 int r;
3356 cnt = ret; 3141 cnt = ret;
3357 r = ring_buffer_resize(global_trace.buffer,
3358 global_trace.entries);
3359 if (r < 0) {
3360 /* AARGH! We are left with different
3361 * size max buffer!!!! */
3362 WARN_ON(1);
3363 tracing_disabled = 1;
3364 }
3365 goto out; 3142 goto out;
3366 } 3143 }
3367
3368 global_trace.entries = val;
3369 } 3144 }
3370 3145
3371 filp->f_pos += cnt; 3146 filp->f_pos += cnt;
@@ -3393,7 +3168,7 @@ static int mark_printk(const char *fmt, ...)
3393 int ret; 3168 int ret;
3394 va_list args; 3169 va_list args;
3395 va_start(args, fmt); 3170 va_start(args, fmt);
3396 ret = trace_vprintk(0, -1, fmt, args); 3171 ret = trace_vprintk(0, fmt, args);
3397 va_end(args); 3172 va_end(args);
3398 return ret; 3173 return ret;
3399} 3174}
@@ -3433,42 +3208,288 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3433 return cnt; 3208 return cnt;
3434} 3209}
3435 3210
3436static struct file_operations tracing_max_lat_fops = { 3211static const struct file_operations tracing_max_lat_fops = {
3437 .open = tracing_open_generic, 3212 .open = tracing_open_generic,
3438 .read = tracing_max_lat_read, 3213 .read = tracing_max_lat_read,
3439 .write = tracing_max_lat_write, 3214 .write = tracing_max_lat_write,
3440}; 3215};
3441 3216
3442static struct file_operations tracing_ctrl_fops = { 3217static const struct file_operations tracing_ctrl_fops = {
3443 .open = tracing_open_generic, 3218 .open = tracing_open_generic,
3444 .read = tracing_ctrl_read, 3219 .read = tracing_ctrl_read,
3445 .write = tracing_ctrl_write, 3220 .write = tracing_ctrl_write,
3446}; 3221};
3447 3222
3448static struct file_operations set_tracer_fops = { 3223static const struct file_operations set_tracer_fops = {
3449 .open = tracing_open_generic, 3224 .open = tracing_open_generic,
3450 .read = tracing_set_trace_read, 3225 .read = tracing_set_trace_read,
3451 .write = tracing_set_trace_write, 3226 .write = tracing_set_trace_write,
3452}; 3227};
3453 3228
3454static struct file_operations tracing_pipe_fops = { 3229static const struct file_operations tracing_pipe_fops = {
3455 .open = tracing_open_pipe, 3230 .open = tracing_open_pipe,
3456 .poll = tracing_poll_pipe, 3231 .poll = tracing_poll_pipe,
3457 .read = tracing_read_pipe, 3232 .read = tracing_read_pipe,
3233 .splice_read = tracing_splice_read_pipe,
3458 .release = tracing_release_pipe, 3234 .release = tracing_release_pipe,
3459}; 3235};
3460 3236
3461static struct file_operations tracing_entries_fops = { 3237static const struct file_operations tracing_entries_fops = {
3462 .open = tracing_open_generic, 3238 .open = tracing_open_generic,
3463 .read = tracing_entries_read, 3239 .read = tracing_entries_read,
3464 .write = tracing_entries_write, 3240 .write = tracing_entries_write,
3465}; 3241};
3466 3242
3467static struct file_operations tracing_mark_fops = { 3243static const struct file_operations tracing_mark_fops = {
3468 .open = tracing_open_generic, 3244 .open = tracing_open_generic,
3469 .write = tracing_mark_write, 3245 .write = tracing_mark_write,
3470}; 3246};
3471 3247
3248struct ftrace_buffer_info {
3249 struct trace_array *tr;
3250 void *spare;
3251 int cpu;
3252 unsigned int read;
3253};
3254
3255static int tracing_buffers_open(struct inode *inode, struct file *filp)
3256{
3257 int cpu = (int)(long)inode->i_private;
3258 struct ftrace_buffer_info *info;
3259
3260 if (tracing_disabled)
3261 return -ENODEV;
3262
3263 info = kzalloc(sizeof(*info), GFP_KERNEL);
3264 if (!info)
3265 return -ENOMEM;
3266
3267 info->tr = &global_trace;
3268 info->cpu = cpu;
3269 info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
3270 /* Force reading ring buffer for first read */
3271 info->read = (unsigned int)-1;
3272 if (!info->spare)
3273 goto out;
3274
3275 filp->private_data = info;
3276
3277 return 0;
3278
3279 out:
3280 kfree(info);
3281 return -ENOMEM;
3282}
3283
3284static ssize_t
3285tracing_buffers_read(struct file *filp, char __user *ubuf,
3286 size_t count, loff_t *ppos)
3287{
3288 struct ftrace_buffer_info *info = filp->private_data;
3289 unsigned int pos;
3290 ssize_t ret;
3291 size_t size;
3292
3293 if (!count)
3294 return 0;
3295
3296 /* Do we have previous read data to read? */
3297 if (info->read < PAGE_SIZE)
3298 goto read;
3299
3300 info->read = 0;
3301
3302 ret = ring_buffer_read_page(info->tr->buffer,
3303 &info->spare,
3304 count,
3305 info->cpu, 0);
3306 if (ret < 0)
3307 return 0;
3308
3309 pos = ring_buffer_page_len(info->spare);
3310
3311 if (pos < PAGE_SIZE)
3312 memset(info->spare + pos, 0, PAGE_SIZE - pos);
3313
3314read:
3315 size = PAGE_SIZE - info->read;
3316 if (size > count)
3317 size = count;
3318
3319 ret = copy_to_user(ubuf, info->spare + info->read, size);
3320 if (ret == size)
3321 return -EFAULT;
3322 size -= ret;
3323
3324 *ppos += size;
3325 info->read += size;
3326
3327 return size;
3328}
3329
3330static int tracing_buffers_release(struct inode *inode, struct file *file)
3331{
3332 struct ftrace_buffer_info *info = file->private_data;
3333
3334 ring_buffer_free_read_page(info->tr->buffer, info->spare);
3335 kfree(info);
3336
3337 return 0;
3338}
3339
3340struct buffer_ref {
3341 struct ring_buffer *buffer;
3342 void *page;
3343 int ref;
3344};
3345
3346static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
3347 struct pipe_buffer *buf)
3348{
3349 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3350
3351 if (--ref->ref)
3352 return;
3353
3354 ring_buffer_free_read_page(ref->buffer, ref->page);
3355 kfree(ref);
3356 buf->private = 0;
3357}
3358
3359static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
3360 struct pipe_buffer *buf)
3361{
3362 return 1;
3363}
3364
3365static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3366 struct pipe_buffer *buf)
3367{
3368 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3369
3370 ref->ref++;
3371}
3372
3373/* Pipe buffer operations for a buffer. */
3374static struct pipe_buf_operations buffer_pipe_buf_ops = {
3375 .can_merge = 0,
3376 .map = generic_pipe_buf_map,
3377 .unmap = generic_pipe_buf_unmap,
3378 .confirm = generic_pipe_buf_confirm,
3379 .release = buffer_pipe_buf_release,
3380 .steal = buffer_pipe_buf_steal,
3381 .get = buffer_pipe_buf_get,
3382};
3383
3384/*
3385 * Callback from splice_to_pipe(), if we need to release some pages
3386 * at the end of the spd in case we error'ed out in filling the pipe.
3387 */
3388static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
3389{
3390 struct buffer_ref *ref =
3391 (struct buffer_ref *)spd->partial[i].private;
3392
3393 if (--ref->ref)
3394 return;
3395
3396 ring_buffer_free_read_page(ref->buffer, ref->page);
3397 kfree(ref);
3398 spd->partial[i].private = 0;
3399}
3400
3401static ssize_t
3402tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3403 struct pipe_inode_info *pipe, size_t len,
3404 unsigned int flags)
3405{
3406 struct ftrace_buffer_info *info = file->private_data;
3407 struct partial_page partial[PIPE_BUFFERS];
3408 struct page *pages[PIPE_BUFFERS];
3409 struct splice_pipe_desc spd = {
3410 .pages = pages,
3411 .partial = partial,
3412 .flags = flags,
3413 .ops = &buffer_pipe_buf_ops,
3414 .spd_release = buffer_spd_release,
3415 };
3416 struct buffer_ref *ref;
3417 int size, i;
3418 size_t ret;
3419
3420 /*
3421 * We can't seek on a buffer input
3422 */
3423 if (unlikely(*ppos))
3424 return -ESPIPE;
3425
3426
3427 for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) {
3428 struct page *page;
3429 int r;
3430
3431 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
3432 if (!ref)
3433 break;
3434
3435 ref->buffer = info->tr->buffer;
3436 ref->page = ring_buffer_alloc_read_page(ref->buffer);
3437 if (!ref->page) {
3438 kfree(ref);
3439 break;
3440 }
3441
3442 r = ring_buffer_read_page(ref->buffer, &ref->page,
3443 len, info->cpu, 0);
3444 if (r < 0) {
3445 ring_buffer_free_read_page(ref->buffer,
3446 ref->page);
3447 kfree(ref);
3448 break;
3449 }
3450
3451 /*
3452 * zero out any left over data, this is going to
3453 * user land.
3454 */
3455 size = ring_buffer_page_len(ref->page);
3456 if (size < PAGE_SIZE)
3457 memset(ref->page + size, 0, PAGE_SIZE - size);
3458
3459 page = virt_to_page(ref->page);
3460
3461 spd.pages[i] = page;
3462 spd.partial[i].len = PAGE_SIZE;
3463 spd.partial[i].offset = 0;
3464 spd.partial[i].private = (unsigned long)ref;
3465 spd.nr_pages++;
3466 }
3467
3468 spd.nr_pages = i;
3469
3470 /* did we read anything? */
3471 if (!spd.nr_pages) {
3472 if (flags & SPLICE_F_NONBLOCK)
3473 ret = -EAGAIN;
3474 else
3475 ret = 0;
3476 /* TODO: block */
3477 return ret;
3478 }
3479
3480 ret = splice_to_pipe(pipe, &spd);
3481
3482 return ret;
3483}
3484
3485static const struct file_operations tracing_buffers_fops = {
3486 .open = tracing_buffers_open,
3487 .read = tracing_buffers_read,
3488 .release = tracing_buffers_release,
3489 .splice_read = tracing_buffers_splice_read,
3490 .llseek = no_llseek,
3491};
3492
3472#ifdef CONFIG_DYNAMIC_FTRACE 3493#ifdef CONFIG_DYNAMIC_FTRACE
3473 3494
3474int __weak ftrace_arch_read_dyn_info(char *buf, int size) 3495int __weak ftrace_arch_read_dyn_info(char *buf, int size)
@@ -3500,7 +3521,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf,
3500 return r; 3521 return r;
3501} 3522}
3502 3523
3503static struct file_operations tracing_dyn_info_fops = { 3524static const struct file_operations tracing_dyn_info_fops = {
3504 .open = tracing_open_generic, 3525 .open = tracing_open_generic,
3505 .read = tracing_read_dyn_info, 3526 .read = tracing_read_dyn_info,
3506}; 3527};
@@ -3515,6 +3536,9 @@ struct dentry *tracing_init_dentry(void)
3515 if (d_tracer) 3536 if (d_tracer)
3516 return d_tracer; 3537 return d_tracer;
3517 3538
3539 if (!debugfs_initialized())
3540 return NULL;
3541
3518 d_tracer = debugfs_create_dir("tracing", NULL); 3542 d_tracer = debugfs_create_dir("tracing", NULL);
3519 3543
3520 if (!d_tracer && !once) { 3544 if (!d_tracer && !once) {
@@ -3526,15 +3550,350 @@ struct dentry *tracing_init_dentry(void)
3526 return d_tracer; 3550 return d_tracer;
3527} 3551}
3528 3552
3553static struct dentry *d_percpu;
3554
3555struct dentry *tracing_dentry_percpu(void)
3556{
3557 static int once;
3558 struct dentry *d_tracer;
3559
3560 if (d_percpu)
3561 return d_percpu;
3562
3563 d_tracer = tracing_init_dentry();
3564
3565 if (!d_tracer)
3566 return NULL;
3567
3568 d_percpu = debugfs_create_dir("per_cpu", d_tracer);
3569
3570 if (!d_percpu && !once) {
3571 once = 1;
3572 pr_warning("Could not create debugfs directory 'per_cpu'\n");
3573 return NULL;
3574 }
3575
3576 return d_percpu;
3577}
3578
3579static void tracing_init_debugfs_percpu(long cpu)
3580{
3581 struct dentry *d_percpu = tracing_dentry_percpu();
3582 struct dentry *entry, *d_cpu;
3583 /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
3584 char cpu_dir[7];
3585
3586 if (cpu > 999 || cpu < 0)
3587 return;
3588
3589 sprintf(cpu_dir, "cpu%ld", cpu);
3590 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
3591 if (!d_cpu) {
3592 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
3593 return;
3594 }
3595
3596 /* per cpu trace_pipe */
3597 entry = debugfs_create_file("trace_pipe", 0444, d_cpu,
3598 (void *) cpu, &tracing_pipe_fops);
3599 if (!entry)
3600 pr_warning("Could not create debugfs 'trace_pipe' entry\n");
3601
3602 /* per cpu trace */
3603 entry = debugfs_create_file("trace", 0644, d_cpu,
3604 (void *) cpu, &tracing_fops);
3605 if (!entry)
3606 pr_warning("Could not create debugfs 'trace' entry\n");
3607
3608 entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu,
3609 (void *) cpu, &tracing_buffers_fops);
3610 if (!entry)
3611 pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n");
3612}
3613
3529#ifdef CONFIG_FTRACE_SELFTEST 3614#ifdef CONFIG_FTRACE_SELFTEST
3530/* Let selftest have access to static functions in this file */ 3615/* Let selftest have access to static functions in this file */
3531#include "trace_selftest.c" 3616#include "trace_selftest.c"
3532#endif 3617#endif
3533 3618
3619struct trace_option_dentry {
3620 struct tracer_opt *opt;
3621 struct tracer_flags *flags;
3622 struct dentry *entry;
3623};
3624
3625static ssize_t
3626trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
3627 loff_t *ppos)
3628{
3629 struct trace_option_dentry *topt = filp->private_data;
3630 char *buf;
3631
3632 if (topt->flags->val & topt->opt->bit)
3633 buf = "1\n";
3634 else
3635 buf = "0\n";
3636
3637 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
3638}
3639
3640static ssize_t
3641trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
3642 loff_t *ppos)
3643{
3644 struct trace_option_dentry *topt = filp->private_data;
3645 unsigned long val;
3646 char buf[64];
3647 int ret;
3648
3649 if (cnt >= sizeof(buf))
3650 return -EINVAL;
3651
3652 if (copy_from_user(&buf, ubuf, cnt))
3653 return -EFAULT;
3654
3655 buf[cnt] = 0;
3656
3657 ret = strict_strtoul(buf, 10, &val);
3658 if (ret < 0)
3659 return ret;
3660
3661 ret = 0;
3662 switch (val) {
3663 case 0:
3664 /* do nothing if already cleared */
3665 if (!(topt->flags->val & topt->opt->bit))
3666 break;
3667
3668 mutex_lock(&trace_types_lock);
3669 if (current_trace->set_flag)
3670 ret = current_trace->set_flag(topt->flags->val,
3671 topt->opt->bit, 0);
3672 mutex_unlock(&trace_types_lock);
3673 if (ret)
3674 return ret;
3675 topt->flags->val &= ~topt->opt->bit;
3676 break;
3677 case 1:
3678 /* do nothing if already set */
3679 if (topt->flags->val & topt->opt->bit)
3680 break;
3681
3682 mutex_lock(&trace_types_lock);
3683 if (current_trace->set_flag)
3684 ret = current_trace->set_flag(topt->flags->val,
3685 topt->opt->bit, 1);
3686 mutex_unlock(&trace_types_lock);
3687 if (ret)
3688 return ret;
3689 topt->flags->val |= topt->opt->bit;
3690 break;
3691
3692 default:
3693 return -EINVAL;
3694 }
3695
3696 *ppos += cnt;
3697
3698 return cnt;
3699}
3700
3701
3702static const struct file_operations trace_options_fops = {
3703 .open = tracing_open_generic,
3704 .read = trace_options_read,
3705 .write = trace_options_write,
3706};
3707
3708static ssize_t
3709trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
3710 loff_t *ppos)
3711{
3712 long index = (long)filp->private_data;
3713 char *buf;
3714
3715 if (trace_flags & (1 << index))
3716 buf = "1\n";
3717 else
3718 buf = "0\n";
3719
3720 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
3721}
3722
3723static ssize_t
3724trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3725 loff_t *ppos)
3726{
3727 long index = (long)filp->private_data;
3728 char buf[64];
3729 unsigned long val;
3730 int ret;
3731
3732 if (cnt >= sizeof(buf))
3733 return -EINVAL;
3734
3735 if (copy_from_user(&buf, ubuf, cnt))
3736 return -EFAULT;
3737
3738 buf[cnt] = 0;
3739
3740 ret = strict_strtoul(buf, 10, &val);
3741 if (ret < 0)
3742 return ret;
3743
3744 switch (val) {
3745 case 0:
3746 trace_flags &= ~(1 << index);
3747 break;
3748 case 1:
3749 trace_flags |= 1 << index;
3750 break;
3751
3752 default:
3753 return -EINVAL;
3754 }
3755
3756 *ppos += cnt;
3757
3758 return cnt;
3759}
3760
3761static const struct file_operations trace_options_core_fops = {
3762 .open = tracing_open_generic,
3763 .read = trace_options_core_read,
3764 .write = trace_options_core_write,
3765};
3766
3767static struct dentry *trace_options_init_dentry(void)
3768{
3769 struct dentry *d_tracer;
3770 static struct dentry *t_options;
3771
3772 if (t_options)
3773 return t_options;
3774
3775 d_tracer = tracing_init_dentry();
3776 if (!d_tracer)
3777 return NULL;
3778
3779 t_options = debugfs_create_dir("options", d_tracer);
3780 if (!t_options) {
3781 pr_warning("Could not create debugfs directory 'options'\n");
3782 return NULL;
3783 }
3784
3785 return t_options;
3786}
3787
3788static void
3789create_trace_option_file(struct trace_option_dentry *topt,
3790 struct tracer_flags *flags,
3791 struct tracer_opt *opt)
3792{
3793 struct dentry *t_options;
3794 struct dentry *entry;
3795
3796 t_options = trace_options_init_dentry();
3797 if (!t_options)
3798 return;
3799
3800 topt->flags = flags;
3801 topt->opt = opt;
3802
3803 entry = debugfs_create_file(opt->name, 0644, t_options, topt,
3804 &trace_options_fops);
3805
3806 topt->entry = entry;
3807
3808}
3809
3810static struct trace_option_dentry *
3811create_trace_option_files(struct tracer *tracer)
3812{
3813 struct trace_option_dentry *topts;
3814 struct tracer_flags *flags;
3815 struct tracer_opt *opts;
3816 int cnt;
3817
3818 if (!tracer)
3819 return NULL;
3820
3821 flags = tracer->flags;
3822
3823 if (!flags || !flags->opts)
3824 return NULL;
3825
3826 opts = flags->opts;
3827
3828 for (cnt = 0; opts[cnt].name; cnt++)
3829 ;
3830
3831 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
3832 if (!topts)
3833 return NULL;
3834
3835 for (cnt = 0; opts[cnt].name; cnt++)
3836 create_trace_option_file(&topts[cnt], flags,
3837 &opts[cnt]);
3838
3839 return topts;
3840}
3841
3842static void
3843destroy_trace_option_files(struct trace_option_dentry *topts)
3844{
3845 int cnt;
3846
3847 if (!topts)
3848 return;
3849
3850 for (cnt = 0; topts[cnt].opt; cnt++) {
3851 if (topts[cnt].entry)
3852 debugfs_remove(topts[cnt].entry);
3853 }
3854
3855 kfree(topts);
3856}
3857
3858static struct dentry *
3859create_trace_option_core_file(const char *option, long index)
3860{
3861 struct dentry *t_options;
3862 struct dentry *entry;
3863
3864 t_options = trace_options_init_dentry();
3865 if (!t_options)
3866 return NULL;
3867
3868 entry = debugfs_create_file(option, 0644, t_options, (void *)index,
3869 &trace_options_core_fops);
3870
3871 return entry;
3872}
3873
3874static __init void create_trace_options_dir(void)
3875{
3876 struct dentry *t_options;
3877 struct dentry *entry;
3878 int i;
3879
3880 t_options = trace_options_init_dentry();
3881 if (!t_options)
3882 return;
3883
3884 for (i = 0; trace_options[i]; i++) {
3885 entry = create_trace_option_core_file(trace_options[i], i);
3886 if (!entry)
3887 pr_warning("Could not create debugfs %s entry\n",
3888 trace_options[i]);
3889 }
3890}
3891
3534static __init int tracer_init_debugfs(void) 3892static __init int tracer_init_debugfs(void)
3535{ 3893{
3536 struct dentry *d_tracer; 3894 struct dentry *d_tracer;
3537 struct dentry *entry; 3895 struct dentry *entry;
3896 int cpu;
3538 3897
3539 d_tracer = tracing_init_dentry(); 3898 d_tracer = tracing_init_dentry();
3540 3899
@@ -3548,18 +3907,15 @@ static __init int tracer_init_debugfs(void)
3548 if (!entry) 3907 if (!entry)
3549 pr_warning("Could not create debugfs 'trace_options' entry\n"); 3908 pr_warning("Could not create debugfs 'trace_options' entry\n");
3550 3909
3910 create_trace_options_dir();
3911
3551 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, 3912 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
3552 NULL, &tracing_cpumask_fops); 3913 NULL, &tracing_cpumask_fops);
3553 if (!entry) 3914 if (!entry)
3554 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); 3915 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
3555 3916
3556 entry = debugfs_create_file("latency_trace", 0444, d_tracer, 3917 entry = debugfs_create_file("trace", 0644, d_tracer,
3557 &global_trace, &tracing_lt_fops); 3918 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
3558 if (!entry)
3559 pr_warning("Could not create debugfs 'latency_trace' entry\n");
3560
3561 entry = debugfs_create_file("trace", 0444, d_tracer,
3562 &global_trace, &tracing_fops);
3563 if (!entry) 3919 if (!entry)
3564 pr_warning("Could not create debugfs 'trace' entry\n"); 3920 pr_warning("Could not create debugfs 'trace' entry\n");
3565 3921
@@ -3590,8 +3946,8 @@ static __init int tracer_init_debugfs(void)
3590 if (!entry) 3946 if (!entry)
3591 pr_warning("Could not create debugfs 'README' entry\n"); 3947 pr_warning("Could not create debugfs 'README' entry\n");
3592 3948
3593 entry = debugfs_create_file("trace_pipe", 0644, d_tracer, 3949 entry = debugfs_create_file("trace_pipe", 0444, d_tracer,
3594 NULL, &tracing_pipe_fops); 3950 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
3595 if (!entry) 3951 if (!entry)
3596 pr_warning("Could not create debugfs " 3952 pr_warning("Could not create debugfs "
3597 "'trace_pipe' entry\n"); 3953 "'trace_pipe' entry\n");
@@ -3619,77 +3975,12 @@ static __init int tracer_init_debugfs(void)
3619#ifdef CONFIG_SYSPROF_TRACER 3975#ifdef CONFIG_SYSPROF_TRACER
3620 init_tracer_sysprof_debugfs(d_tracer); 3976 init_tracer_sysprof_debugfs(d_tracer);
3621#endif 3977#endif
3622 return 0;
3623}
3624
3625int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3626{
3627 static DEFINE_SPINLOCK(trace_buf_lock);
3628 static char trace_buf[TRACE_BUF_SIZE];
3629
3630 struct ring_buffer_event *event;
3631 struct trace_array *tr = &global_trace;
3632 struct trace_array_cpu *data;
3633 int cpu, len = 0, size, pc;
3634 struct print_entry *entry;
3635 unsigned long irq_flags;
3636
3637 if (tracing_disabled || tracing_selftest_running)
3638 return 0;
3639
3640 pc = preempt_count();
3641 preempt_disable_notrace();
3642 cpu = raw_smp_processor_id();
3643 data = tr->data[cpu];
3644
3645 if (unlikely(atomic_read(&data->disabled)))
3646 goto out;
3647
3648 pause_graph_tracing();
3649 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3650 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3651
3652 len = min(len, TRACE_BUF_SIZE-1);
3653 trace_buf[len] = 0;
3654
3655 size = sizeof(*entry) + len + 1;
3656 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
3657 if (!event)
3658 goto out_unlock;
3659 entry = ring_buffer_event_data(event);
3660 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3661 entry->ent.type = TRACE_PRINT;
3662 entry->ip = ip;
3663 entry->depth = depth;
3664
3665 memcpy(&entry->buf, trace_buf, len);
3666 entry->buf[len] = 0;
3667 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3668
3669 out_unlock:
3670 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3671 unpause_graph_tracing();
3672 out:
3673 preempt_enable_notrace();
3674
3675 return len;
3676}
3677EXPORT_SYMBOL_GPL(trace_vprintk);
3678 3978
3679int __ftrace_printk(unsigned long ip, const char *fmt, ...) 3979 for_each_tracing_cpu(cpu)
3680{ 3980 tracing_init_debugfs_percpu(cpu);
3681 int ret;
3682 va_list ap;
3683
3684 if (!(trace_flags & TRACE_ITER_PRINTK))
3685 return 0;
3686 3981
3687 va_start(ap, fmt); 3982 return 0;
3688 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
3689 va_end(ap);
3690 return ret;
3691} 3983}
3692EXPORT_SYMBOL_GPL(__ftrace_printk);
3693 3984
3694static int trace_panic_handler(struct notifier_block *this, 3985static int trace_panic_handler(struct notifier_block *this,
3695 unsigned long event, void *unused) 3986 unsigned long event, void *unused)
@@ -3750,14 +4041,15 @@ trace_printk_seq(struct trace_seq *s)
3750 4041
3751 printk(KERN_TRACE "%s", s->buffer); 4042 printk(KERN_TRACE "%s", s->buffer);
3752 4043
3753 trace_seq_reset(s); 4044 trace_seq_init(s);
3754} 4045}
3755 4046
3756void ftrace_dump(void) 4047static void __ftrace_dump(bool disable_tracing)
3757{ 4048{
3758 static DEFINE_SPINLOCK(ftrace_dump_lock); 4049 static DEFINE_SPINLOCK(ftrace_dump_lock);
3759 /* use static because iter can be a bit big for the stack */ 4050 /* use static because iter can be a bit big for the stack */
3760 static struct trace_iterator iter; 4051 static struct trace_iterator iter;
4052 unsigned int old_userobj;
3761 static int dump_ran; 4053 static int dump_ran;
3762 unsigned long flags; 4054 unsigned long flags;
3763 int cnt = 0, cpu; 4055 int cnt = 0, cpu;
@@ -3769,21 +4061,26 @@ void ftrace_dump(void)
3769 4061
3770 dump_ran = 1; 4062 dump_ran = 1;
3771 4063
3772 /* No turning back! */
3773 tracing_off(); 4064 tracing_off();
3774 ftrace_kill(); 4065
4066 if (disable_tracing)
4067 ftrace_kill();
3775 4068
3776 for_each_tracing_cpu(cpu) { 4069 for_each_tracing_cpu(cpu) {
3777 atomic_inc(&global_trace.data[cpu]->disabled); 4070 atomic_inc(&global_trace.data[cpu]->disabled);
3778 } 4071 }
3779 4072
4073 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4074
3780 /* don't look at user memory in panic mode */ 4075 /* don't look at user memory in panic mode */
3781 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4076 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
3782 4077
3783 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 4078 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3784 4079
4080 /* Simulate the iterator */
3785 iter.tr = &global_trace; 4081 iter.tr = &global_trace;
3786 iter.trace = current_trace; 4082 iter.trace = current_trace;
4083 iter.cpu_file = TRACE_PIPE_ALL_CPU;
3787 4084
3788 /* 4085 /*
3789 * We need to stop all tracing on all CPUS to read the 4086 * We need to stop all tracing on all CPUS to read the
@@ -3819,13 +4116,30 @@ void ftrace_dump(void)
3819 else 4116 else
3820 printk(KERN_TRACE "---------------------------------\n"); 4117 printk(KERN_TRACE "---------------------------------\n");
3821 4118
4119 /* Re-enable tracing if requested */
4120 if (!disable_tracing) {
4121 trace_flags |= old_userobj;
4122
4123 for_each_tracing_cpu(cpu) {
4124 atomic_dec(&global_trace.data[cpu]->disabled);
4125 }
4126 tracing_on();
4127 }
4128
3822 out: 4129 out:
3823 spin_unlock_irqrestore(&ftrace_dump_lock, flags); 4130 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3824} 4131}
3825 4132
4133/* By default: disable tracing after the dump */
4134void ftrace_dump(void)
4135{
4136 __ftrace_dump(true);
4137}
4138
3826__init static int tracer_alloc_buffers(void) 4139__init static int tracer_alloc_buffers(void)
3827{ 4140{
3828 struct trace_array_cpu *data; 4141 struct trace_array_cpu *data;
4142 int ring_buf_size;
3829 int i; 4143 int i;
3830 int ret = -ENOMEM; 4144 int ret = -ENOMEM;
3831 4145
@@ -3835,11 +4149,21 @@ __init static int tracer_alloc_buffers(void)
3835 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4149 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3836 goto out_free_buffer_mask; 4150 goto out_free_buffer_mask;
3837 4151
4152 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4153 goto out_free_tracing_cpumask;
4154
4155 /* To save memory, keep the ring buffer size to its minimum */
4156 if (ring_buffer_expanded)
4157 ring_buf_size = trace_buf_size;
4158 else
4159 ring_buf_size = 1;
4160
3838 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4161 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3839 cpumask_copy(tracing_cpumask, cpu_all_mask); 4162 cpumask_copy(tracing_cpumask, cpu_all_mask);
4163 cpumask_clear(tracing_reader_cpumask);
3840 4164
3841 /* TODO: make the number of buffers hot pluggable with CPUS */ 4165 /* TODO: make the number of buffers hot pluggable with CPUS */
3842 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 4166 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
3843 TRACE_BUFFER_FLAGS); 4167 TRACE_BUFFER_FLAGS);
3844 if (!global_trace.buffer) { 4168 if (!global_trace.buffer) {
3845 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 4169 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
@@ -3850,7 +4174,7 @@ __init static int tracer_alloc_buffers(void)
3850 4174
3851 4175
3852#ifdef CONFIG_TRACER_MAX_TRACE 4176#ifdef CONFIG_TRACER_MAX_TRACE
3853 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 4177 max_tr.buffer = ring_buffer_alloc(ring_buf_size,
3854 TRACE_BUFFER_FLAGS); 4178 TRACE_BUFFER_FLAGS);
3855 if (!max_tr.buffer) { 4179 if (!max_tr.buffer) {
3856 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4180 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
@@ -3871,14 +4195,10 @@ __init static int tracer_alloc_buffers(void)
3871 trace_init_cmdlines(); 4195 trace_init_cmdlines();
3872 4196
3873 register_tracer(&nop_trace); 4197 register_tracer(&nop_trace);
4198 current_trace = &nop_trace;
3874#ifdef CONFIG_BOOT_TRACER 4199#ifdef CONFIG_BOOT_TRACER
3875 register_tracer(&boot_tracer); 4200 register_tracer(&boot_tracer);
3876 current_trace = &boot_tracer;
3877 current_trace->init(&global_trace);
3878#else
3879 current_trace = &nop_trace;
3880#endif 4201#endif
3881
3882 /* All seems OK, enable tracing */ 4202 /* All seems OK, enable tracing */
3883 tracing_disabled = 0; 4203 tracing_disabled = 0;
3884 4204
@@ -3886,14 +4206,38 @@ __init static int tracer_alloc_buffers(void)
3886 &trace_panic_notifier); 4206 &trace_panic_notifier);
3887 4207
3888 register_die_notifier(&trace_die_notifier); 4208 register_die_notifier(&trace_die_notifier);
3889 ret = 0; 4209
4210 return 0;
3890 4211
3891out_free_cpumask: 4212out_free_cpumask:
4213 free_cpumask_var(tracing_reader_cpumask);
4214out_free_tracing_cpumask:
3892 free_cpumask_var(tracing_cpumask); 4215 free_cpumask_var(tracing_cpumask);
3893out_free_buffer_mask: 4216out_free_buffer_mask:
3894 free_cpumask_var(tracing_buffer_mask); 4217 free_cpumask_var(tracing_buffer_mask);
3895out: 4218out:
3896 return ret; 4219 return ret;
3897} 4220}
4221
4222__init static int clear_boot_tracer(void)
4223{
4224 /*
4225 * The default tracer at boot buffer is an init section.
4226 * This function is called in lateinit. If we did not
4227 * find the boot tracer, then clear it out, to prevent
4228 * later registration from accessing the buffer that is
4229 * about to be freed.
4230 */
4231 if (!default_bootup_tracer)
4232 return 0;
4233
4234 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
4235 default_bootup_tracer);
4236 default_bootup_tracer = NULL;
4237
4238 return 0;
4239}
4240
3898early_initcall(tracer_alloc_buffers); 4241early_initcall(tracer_alloc_buffers);
3899fs_initcall(tracer_init_debugfs); 4242fs_initcall(tracer_init_debugfs);
4243late_initcall(clear_boot_tracer);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4d3d381bfd95..cb0ce3fc36d3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -9,6 +9,8 @@
9#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/ftrace.h> 10#include <linux/ftrace.h>
11#include <trace/boot.h> 11#include <trace/boot.h>
12#include <trace/kmemtrace.h>
13#include <trace/power.h>
12 14
13enum trace_type { 15enum trace_type {
14 __TRACE_FIRST_TYPE = 0, 16 __TRACE_FIRST_TYPE = 0,
@@ -16,9 +18,9 @@ enum trace_type {
16 TRACE_FN, 18 TRACE_FN,
17 TRACE_CTX, 19 TRACE_CTX,
18 TRACE_WAKE, 20 TRACE_WAKE,
19 TRACE_CONT,
20 TRACE_STACK, 21 TRACE_STACK,
21 TRACE_PRINT, 22 TRACE_PRINT,
23 TRACE_BPRINT,
22 TRACE_SPECIAL, 24 TRACE_SPECIAL,
23 TRACE_MMIO_RW, 25 TRACE_MMIO_RW,
24 TRACE_MMIO_MAP, 26 TRACE_MMIO_MAP,
@@ -29,9 +31,14 @@ enum trace_type {
29 TRACE_GRAPH_ENT, 31 TRACE_GRAPH_ENT,
30 TRACE_USER_STACK, 32 TRACE_USER_STACK,
31 TRACE_HW_BRANCHES, 33 TRACE_HW_BRANCHES,
34 TRACE_SYSCALL_ENTER,
35 TRACE_SYSCALL_EXIT,
36 TRACE_KMEM_ALLOC,
37 TRACE_KMEM_FREE,
32 TRACE_POWER, 38 TRACE_POWER,
39 TRACE_BLK,
33 40
34 __TRACE_LAST_TYPE 41 __TRACE_LAST_TYPE,
35}; 42};
36 43
37/* 44/*
@@ -42,7 +49,6 @@ enum trace_type {
42 */ 49 */
43struct trace_entry { 50struct trace_entry {
44 unsigned char type; 51 unsigned char type;
45 unsigned char cpu;
46 unsigned char flags; 52 unsigned char flags;
47 unsigned char preempt_count; 53 unsigned char preempt_count;
48 int pid; 54 int pid;
@@ -60,13 +66,13 @@ struct ftrace_entry {
60 66
61/* Function call entry */ 67/* Function call entry */
62struct ftrace_graph_ent_entry { 68struct ftrace_graph_ent_entry {
63 struct trace_entry ent; 69 struct trace_entry ent;
64 struct ftrace_graph_ent graph_ent; 70 struct ftrace_graph_ent graph_ent;
65}; 71};
66 72
67/* Function return entry */ 73/* Function return entry */
68struct ftrace_graph_ret_entry { 74struct ftrace_graph_ret_entry {
69 struct trace_entry ent; 75 struct trace_entry ent;
70 struct ftrace_graph_ret ret; 76 struct ftrace_graph_ret ret;
71}; 77};
72extern struct tracer boot_tracer; 78extern struct tracer boot_tracer;
@@ -112,12 +118,18 @@ struct userstack_entry {
112}; 118};
113 119
114/* 120/*
115 * ftrace_printk entry: 121 * trace_printk entry:
116 */ 122 */
123struct bprint_entry {
124 struct trace_entry ent;
125 unsigned long ip;
126 const char *fmt;
127 u32 buf[];
128};
129
117struct print_entry { 130struct print_entry {
118 struct trace_entry ent; 131 struct trace_entry ent;
119 unsigned long ip; 132 unsigned long ip;
120 int depth;
121 char buf[]; 133 char buf[];
122}; 134};
123 135
@@ -170,15 +182,45 @@ struct trace_power {
170 struct power_trace state_data; 182 struct power_trace state_data;
171}; 183};
172 184
185struct kmemtrace_alloc_entry {
186 struct trace_entry ent;
187 enum kmemtrace_type_id type_id;
188 unsigned long call_site;
189 const void *ptr;
190 size_t bytes_req;
191 size_t bytes_alloc;
192 gfp_t gfp_flags;
193 int node;
194};
195
196struct kmemtrace_free_entry {
197 struct trace_entry ent;
198 enum kmemtrace_type_id type_id;
199 unsigned long call_site;
200 const void *ptr;
201};
202
203struct syscall_trace_enter {
204 struct trace_entry ent;
205 int nr;
206 unsigned long args[];
207};
208
209struct syscall_trace_exit {
210 struct trace_entry ent;
211 int nr;
212 unsigned long ret;
213};
214
215
173/* 216/*
174 * trace_flag_type is an enumeration that holds different 217 * trace_flag_type is an enumeration that holds different
175 * states when a trace occurs. These are: 218 * states when a trace occurs. These are:
176 * IRQS_OFF - interrupts were disabled 219 * IRQS_OFF - interrupts were disabled
177 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 220 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
178 * NEED_RESCED - reschedule is requested 221 * NEED_RESCED - reschedule is requested
179 * HARDIRQ - inside an interrupt handler 222 * HARDIRQ - inside an interrupt handler
180 * SOFTIRQ - inside a softirq handler 223 * SOFTIRQ - inside a softirq handler
181 * CONT - multiple entries hold the trace item
182 */ 224 */
183enum trace_flag_type { 225enum trace_flag_type {
184 TRACE_FLAG_IRQS_OFF = 0x01, 226 TRACE_FLAG_IRQS_OFF = 0x01,
@@ -186,7 +228,6 @@ enum trace_flag_type {
186 TRACE_FLAG_NEED_RESCHED = 0x04, 228 TRACE_FLAG_NEED_RESCHED = 0x04,
187 TRACE_FLAG_HARDIRQ = 0x08, 229 TRACE_FLAG_HARDIRQ = 0x08,
188 TRACE_FLAG_SOFTIRQ = 0x10, 230 TRACE_FLAG_SOFTIRQ = 0x10,
189 TRACE_FLAG_CONT = 0x20,
190}; 231};
191 232
192#define TRACE_BUF_SIZE 1024 233#define TRACE_BUF_SIZE 1024
@@ -198,6 +239,7 @@ enum trace_flag_type {
198 */ 239 */
199struct trace_array_cpu { 240struct trace_array_cpu {
200 atomic_t disabled; 241 atomic_t disabled;
242 void *buffer_page; /* ring buffer spare */
201 243
202 /* these fields get copied into max-trace: */ 244 /* these fields get copied into max-trace: */
203 unsigned long trace_idx; 245 unsigned long trace_idx;
@@ -262,10 +304,10 @@ extern void __ftrace_bad_type(void);
262 do { \ 304 do { \
263 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 305 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
264 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 306 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
265 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
266 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 307 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
267 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 308 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
268 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 309 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
310 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
269 IF_ASSIGN(var, ent, struct special_entry, 0); \ 311 IF_ASSIGN(var, ent, struct special_entry, 0); \
270 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 312 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
271 TRACE_MMIO_RW); \ 313 TRACE_MMIO_RW); \
@@ -279,7 +321,15 @@ extern void __ftrace_bad_type(void);
279 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 321 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
280 TRACE_GRAPH_RET); \ 322 TRACE_GRAPH_RET); \
281 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ 323 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
282 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ 324 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
325 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
326 TRACE_KMEM_ALLOC); \
327 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
328 TRACE_KMEM_FREE); \
329 IF_ASSIGN(var, ent, struct syscall_trace_enter, \
330 TRACE_SYSCALL_ENTER); \
331 IF_ASSIGN(var, ent, struct syscall_trace_exit, \
332 TRACE_SYSCALL_EXIT); \
283 __ftrace_bad_type(); \ 333 __ftrace_bad_type(); \
284 } while (0) 334 } while (0)
285 335
@@ -287,7 +337,8 @@ extern void __ftrace_bad_type(void);
287enum print_line_t { 337enum print_line_t {
288 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ 338 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
289 TRACE_TYPE_HANDLED = 1, 339 TRACE_TYPE_HANDLED = 1,
290 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ 340 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
341 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
291}; 342};
292 343
293 344
@@ -297,8 +348,8 @@ enum print_line_t {
297 * flags value in struct tracer_flags. 348 * flags value in struct tracer_flags.
298 */ 349 */
299struct tracer_opt { 350struct tracer_opt {
300 const char *name; /* Will appear on the trace_options file */ 351 const char *name; /* Will appear on the trace_options file */
301 u32 bit; /* Mask assigned in val field in tracer_flags */ 352 u32 bit; /* Mask assigned in val field in tracer_flags */
302}; 353};
303 354
304/* 355/*
@@ -307,28 +358,51 @@ struct tracer_opt {
307 */ 358 */
308struct tracer_flags { 359struct tracer_flags {
309 u32 val; 360 u32 val;
310 struct tracer_opt *opts; 361 struct tracer_opt *opts;
311}; 362};
312 363
313/* Makes more easy to define a tracer opt */ 364/* Makes more easy to define a tracer opt */
314#define TRACER_OPT(s, b) .name = #s, .bit = b 365#define TRACER_OPT(s, b) .name = #s, .bit = b
315 366
316/* 367
317 * A specific tracer, represented by methods that operate on a trace array: 368/**
369 * struct tracer - a specific tracer and its callbacks to interact with debugfs
370 * @name: the name chosen to select it on the available_tracers file
371 * @init: called when one switches to this tracer (echo name > current_tracer)
372 * @reset: called when one switches to another tracer
373 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
374 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
375 * @open: called when the trace file is opened
376 * @pipe_open: called when the trace_pipe file is opened
377 * @wait_pipe: override how the user waits for traces on trace_pipe
378 * @close: called when the trace file is released
379 * @read: override the default read callback on trace_pipe
380 * @splice_read: override the default splice_read callback on trace_pipe
381 * @selftest: selftest to run on boot (see trace_selftest.c)
382 * @print_headers: override the first lines that describe your columns
383 * @print_line: callback that prints a trace
384 * @set_flag: signals one of your private flags changed (trace_options file)
385 * @flags: your private flags
318 */ 386 */
319struct tracer { 387struct tracer {
320 const char *name; 388 const char *name;
321 /* Your tracer should raise a warning if init fails */
322 int (*init)(struct trace_array *tr); 389 int (*init)(struct trace_array *tr);
323 void (*reset)(struct trace_array *tr); 390 void (*reset)(struct trace_array *tr);
324 void (*start)(struct trace_array *tr); 391 void (*start)(struct trace_array *tr);
325 void (*stop)(struct trace_array *tr); 392 void (*stop)(struct trace_array *tr);
326 void (*open)(struct trace_iterator *iter); 393 void (*open)(struct trace_iterator *iter);
327 void (*pipe_open)(struct trace_iterator *iter); 394 void (*pipe_open)(struct trace_iterator *iter);
395 void (*wait_pipe)(struct trace_iterator *iter);
328 void (*close)(struct trace_iterator *iter); 396 void (*close)(struct trace_iterator *iter);
329 ssize_t (*read)(struct trace_iterator *iter, 397 ssize_t (*read)(struct trace_iterator *iter,
330 struct file *filp, char __user *ubuf, 398 struct file *filp, char __user *ubuf,
331 size_t cnt, loff_t *ppos); 399 size_t cnt, loff_t *ppos);
400 ssize_t (*splice_read)(struct trace_iterator *iter,
401 struct file *filp,
402 loff_t *ppos,
403 struct pipe_inode_info *pipe,
404 size_t len,
405 unsigned int flags);
332#ifdef CONFIG_FTRACE_STARTUP_TEST 406#ifdef CONFIG_FTRACE_STARTUP_TEST
333 int (*selftest)(struct tracer *trace, 407 int (*selftest)(struct tracer *trace,
334 struct trace_array *tr); 408 struct trace_array *tr);
@@ -339,7 +413,8 @@ struct tracer {
339 int (*set_flag)(u32 old_flags, u32 bit, int set); 413 int (*set_flag)(u32 old_flags, u32 bit, int set);
340 struct tracer *next; 414 struct tracer *next;
341 int print_max; 415 int print_max;
342 struct tracer_flags *flags; 416 struct tracer_flags *flags;
417 struct tracer_stat *stats;
343}; 418};
344 419
345struct trace_seq { 420struct trace_seq {
@@ -348,6 +423,16 @@ struct trace_seq {
348 unsigned int readpos; 423 unsigned int readpos;
349}; 424};
350 425
426static inline void
427trace_seq_init(struct trace_seq *s)
428{
429 s->len = 0;
430 s->readpos = 0;
431}
432
433
434#define TRACE_PIPE_ALL_CPU -1
435
351/* 436/*
352 * Trace iterator - used by printout routines who present trace 437 * Trace iterator - used by printout routines who present trace
353 * results to users and which routines might sleep, etc: 438 * results to users and which routines might sleep, etc:
@@ -356,6 +441,8 @@ struct trace_iterator {
356 struct trace_array *tr; 441 struct trace_array *tr;
357 struct tracer *trace; 442 struct tracer *trace;
358 void *private; 443 void *private;
444 int cpu_file;
445 struct mutex mutex;
359 struct ring_buffer_iter *buffer_iter[NR_CPUS]; 446 struct ring_buffer_iter *buffer_iter[NR_CPUS];
360 447
361 /* The below is zeroed out in pipe_read */ 448 /* The below is zeroed out in pipe_read */
@@ -371,6 +458,7 @@ struct trace_iterator {
371 cpumask_var_t started; 458 cpumask_var_t started;
372}; 459};
373 460
461int tracer_init(struct tracer *t, struct trace_array *tr);
374int tracing_is_enabled(void); 462int tracing_is_enabled(void);
375void trace_wake_up(void); 463void trace_wake_up(void);
376void tracing_reset(struct trace_array *tr, int cpu); 464void tracing_reset(struct trace_array *tr, int cpu);
@@ -379,26 +467,50 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
379struct dentry *tracing_init_dentry(void); 467struct dentry *tracing_init_dentry(void);
380void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 468void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
381 469
470struct ring_buffer_event;
471
472struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
473 unsigned char type,
474 unsigned long len,
475 unsigned long flags,
476 int pc);
477void trace_buffer_unlock_commit(struct trace_array *tr,
478 struct ring_buffer_event *event,
479 unsigned long flags, int pc);
480
481struct ring_buffer_event *
482trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
483 unsigned long flags, int pc);
484void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
485 unsigned long flags, int pc);
486void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
487 unsigned long flags, int pc);
488
382struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 489struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
383 struct trace_array_cpu *data); 490 struct trace_array_cpu *data);
491
492struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
493 int *ent_cpu, u64 *ent_ts);
494
384void tracing_generic_entry_update(struct trace_entry *entry, 495void tracing_generic_entry_update(struct trace_entry *entry,
385 unsigned long flags, 496 unsigned long flags,
386 int pc); 497 int pc);
387 498
499void default_wait_pipe(struct trace_iterator *iter);
500void poll_wait_pipe(struct trace_iterator *iter);
501
388void ftrace(struct trace_array *tr, 502void ftrace(struct trace_array *tr,
389 struct trace_array_cpu *data, 503 struct trace_array_cpu *data,
390 unsigned long ip, 504 unsigned long ip,
391 unsigned long parent_ip, 505 unsigned long parent_ip,
392 unsigned long flags, int pc); 506 unsigned long flags, int pc);
393void tracing_sched_switch_trace(struct trace_array *tr, 507void tracing_sched_switch_trace(struct trace_array *tr,
394 struct trace_array_cpu *data,
395 struct task_struct *prev, 508 struct task_struct *prev,
396 struct task_struct *next, 509 struct task_struct *next,
397 unsigned long flags, int pc); 510 unsigned long flags, int pc);
398void tracing_record_cmdline(struct task_struct *tsk); 511void tracing_record_cmdline(struct task_struct *tsk);
399 512
400void tracing_sched_wakeup_trace(struct trace_array *tr, 513void tracing_sched_wakeup_trace(struct trace_array *tr,
401 struct trace_array_cpu *data,
402 struct task_struct *wakee, 514 struct task_struct *wakee,
403 struct task_struct *cur, 515 struct task_struct *cur,
404 unsigned long flags, int pc); 516 unsigned long flags, int pc);
@@ -408,14 +520,12 @@ void trace_special(struct trace_array *tr,
408 unsigned long arg2, 520 unsigned long arg2,
409 unsigned long arg3, int pc); 521 unsigned long arg3, int pc);
410void trace_function(struct trace_array *tr, 522void trace_function(struct trace_array *tr,
411 struct trace_array_cpu *data,
412 unsigned long ip, 523 unsigned long ip,
413 unsigned long parent_ip, 524 unsigned long parent_ip,
414 unsigned long flags, int pc); 525 unsigned long flags, int pc);
415 526
416void trace_graph_return(struct ftrace_graph_ret *trace); 527void trace_graph_return(struct ftrace_graph_ret *trace);
417int trace_graph_entry(struct ftrace_graph_ent *trace); 528int trace_graph_entry(struct ftrace_graph_ent *trace);
418void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
419 529
420void tracing_start_cmdline_record(void); 530void tracing_start_cmdline_record(void);
421void tracing_stop_cmdline_record(void); 531void tracing_stop_cmdline_record(void);
@@ -434,15 +544,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
434void update_max_tr_single(struct trace_array *tr, 544void update_max_tr_single(struct trace_array *tr,
435 struct task_struct *tsk, int cpu); 545 struct task_struct *tsk, int cpu);
436 546
437extern cycle_t ftrace_now(int cpu); 547void __trace_stack(struct trace_array *tr,
548 unsigned long flags,
549 int skip, int pc);
438 550
439#ifdef CONFIG_FUNCTION_TRACER 551extern cycle_t ftrace_now(int cpu);
440void tracing_start_function_trace(void);
441void tracing_stop_function_trace(void);
442#else
443# define tracing_start_function_trace() do { } while (0)
444# define tracing_stop_function_trace() do { } while (0)
445#endif
446 552
447#ifdef CONFIG_CONTEXT_SWITCH_TRACER 553#ifdef CONFIG_CONTEXT_SWITCH_TRACER
448typedef void 554typedef void
@@ -456,10 +562,10 @@ struct tracer_switch_ops {
456 void *private; 562 void *private;
457 struct tracer_switch_ops *next; 563 struct tracer_switch_ops *next;
458}; 564};
459
460char *trace_find_cmdline(int pid);
461#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 565#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
462 566
567extern void trace_find_cmdline(int pid, char comm[]);
568
463#ifdef CONFIG_DYNAMIC_FTRACE 569#ifdef CONFIG_DYNAMIC_FTRACE
464extern unsigned long ftrace_update_tot_cnt; 570extern unsigned long ftrace_update_tot_cnt;
465#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 571#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
@@ -469,6 +575,8 @@ extern int DYN_FTRACE_TEST_NAME(void);
469#ifdef CONFIG_FTRACE_STARTUP_TEST 575#ifdef CONFIG_FTRACE_STARTUP_TEST
470extern int trace_selftest_startup_function(struct tracer *trace, 576extern int trace_selftest_startup_function(struct tracer *trace,
471 struct trace_array *tr); 577 struct trace_array *tr);
578extern int trace_selftest_startup_function_graph(struct tracer *trace,
579 struct trace_array *tr);
472extern int trace_selftest_startup_irqsoff(struct tracer *trace, 580extern int trace_selftest_startup_irqsoff(struct tracer *trace,
473 struct trace_array *tr); 581 struct trace_array *tr);
474extern int trace_selftest_startup_preemptoff(struct tracer *trace, 582extern int trace_selftest_startup_preemptoff(struct tracer *trace,
@@ -488,18 +596,11 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
488#endif /* CONFIG_FTRACE_STARTUP_TEST */ 596#endif /* CONFIG_FTRACE_STARTUP_TEST */
489 597
490extern void *head_page(struct trace_array_cpu *data); 598extern void *head_page(struct trace_array_cpu *data);
491extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
492extern void trace_seq_print_cont(struct trace_seq *s,
493 struct trace_iterator *iter);
494
495extern int
496seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
497 unsigned long sym_flags);
498extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
499 size_t cnt);
500extern long ns2usecs(cycle_t nsec); 599extern long ns2usecs(cycle_t nsec);
501extern int 600extern int
502trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); 601trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
602extern int
603trace_vprintk(unsigned long ip, const char *fmt, va_list args);
503 604
504extern unsigned long trace_flags; 605extern unsigned long trace_flags;
505 606
@@ -580,7 +681,11 @@ enum trace_iterator_flags {
580 TRACE_ITER_ANNOTATE = 0x2000, 681 TRACE_ITER_ANNOTATE = 0x2000,
581 TRACE_ITER_USERSTACKTRACE = 0x4000, 682 TRACE_ITER_USERSTACKTRACE = 0x4000,
582 TRACE_ITER_SYM_USEROBJ = 0x8000, 683 TRACE_ITER_SYM_USEROBJ = 0x8000,
583 TRACE_ITER_PRINTK_MSGONLY = 0x10000 684 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
685 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
686 TRACE_ITER_LATENCY_FMT = 0x40000,
687 TRACE_ITER_GLOBAL_CLK = 0x80000,
688 TRACE_ITER_SLEEP_TIME = 0x100000,
584}; 689};
585 690
586/* 691/*
@@ -601,12 +706,12 @@ extern struct tracer nop_trace;
601 * preempt_enable (after a disable), a schedule might take place 706 * preempt_enable (after a disable), a schedule might take place
602 * causing an infinite recursion. 707 * causing an infinite recursion.
603 * 708 *
604 * To prevent this, we read the need_recshed flag before 709 * To prevent this, we read the need_resched flag before
605 * disabling preemption. When we want to enable preemption we 710 * disabling preemption. When we want to enable preemption we
606 * check the flag, if it is set, then we call preempt_enable_no_resched. 711 * check the flag, if it is set, then we call preempt_enable_no_resched.
607 * Otherwise, we call preempt_enable. 712 * Otherwise, we call preempt_enable.
608 * 713 *
609 * The rational for doing the above is that if need resched is set 714 * The rational for doing the above is that if need_resched is set
610 * and we have yet to reschedule, we are either in an atomic location 715 * and we have yet to reschedule, we are either in an atomic location
611 * (where we do not need to check for scheduling) or we are inside 716 * (where we do not need to check for scheduling) or we are inside
612 * the scheduler and do not want to resched. 717 * the scheduler and do not want to resched.
@@ -627,7 +732,7 @@ static inline int ftrace_preempt_disable(void)
627 * 732 *
628 * This is a scheduler safe way to enable preemption and not miss 733 * This is a scheduler safe way to enable preemption and not miss
629 * any preemption checks. The disabled saved the state of preemption. 734 * any preemption checks. The disabled saved the state of preemption.
630 * If resched is set, then we were either inside an atomic or 735 * If resched is set, then we are either inside an atomic or
631 * are inside the scheduler (we would have already scheduled 736 * are inside the scheduler (we would have already scheduled
632 * otherwise). In this case, we do not want to call normal 737 * otherwise). In this case, we do not want to call normal
633 * preempt_enable, but preempt_enable_no_resched instead. 738 * preempt_enable, but preempt_enable_no_resched instead.
@@ -664,4 +769,118 @@ static inline void trace_branch_disable(void)
664} 769}
665#endif /* CONFIG_BRANCH_TRACER */ 770#endif /* CONFIG_BRANCH_TRACER */
666 771
772/* set ring buffers to default size if not already done so */
773int tracing_update_buffers(void);
774
775/* trace event type bit fields, not numeric */
776enum {
777 TRACE_EVENT_TYPE_PRINTF = 1,
778 TRACE_EVENT_TYPE_RAW = 2,
779};
780
781struct ftrace_event_field {
782 struct list_head link;
783 char *name;
784 char *type;
785 int offset;
786 int size;
787};
788
789struct ftrace_event_call {
790 char *name;
791 char *system;
792 struct dentry *dir;
793 int enabled;
794 int (*regfunc)(void);
795 void (*unregfunc)(void);
796 int id;
797 int (*raw_init)(void);
798 int (*show_format)(struct trace_seq *s);
799 int (*define_fields)(void);
800 struct list_head fields;
801 struct filter_pred **preds;
802
803#ifdef CONFIG_EVENT_PROFILE
804 atomic_t profile_count;
805 int (*profile_enable)(struct ftrace_event_call *);
806 void (*profile_disable)(struct ftrace_event_call *);
807#endif
808};
809
810struct event_subsystem {
811 struct list_head list;
812 const char *name;
813 struct dentry *entry;
814 struct filter_pred **preds;
815};
816
817#define events_for_each(event) \
818 for (event = __start_ftrace_events; \
819 (unsigned long)event < (unsigned long)__stop_ftrace_events; \
820 event++)
821
822#define MAX_FILTER_PRED 8
823
824struct filter_pred;
825
826typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
827
828struct filter_pred {
829 filter_pred_fn_t fn;
830 u64 val;
831 char *str_val;
832 int str_len;
833 char *field_name;
834 int offset;
835 int not;
836 int or;
837 int compound;
838 int clear;
839};
840
841int trace_define_field(struct ftrace_event_call *call, char *type,
842 char *name, int offset, int size);
843extern void filter_free_pred(struct filter_pred *pred);
844extern void filter_print_preds(struct filter_pred **preds,
845 struct trace_seq *s);
846extern int filter_parse(char **pbuf, struct filter_pred *pred);
847extern int filter_add_pred(struct ftrace_event_call *call,
848 struct filter_pred *pred);
849extern void filter_free_preds(struct ftrace_event_call *call);
850extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
851extern void filter_free_subsystem_preds(struct event_subsystem *system);
852extern int filter_add_subsystem_pred(struct event_subsystem *system,
853 struct filter_pred *pred);
854
855void event_trace_printk(unsigned long ip, const char *fmt, ...);
856extern struct ftrace_event_call __start_ftrace_events[];
857extern struct ftrace_event_call __stop_ftrace_events[];
858
859#define for_each_event(event) \
860 for (event = __start_ftrace_events; \
861 (unsigned long)event < (unsigned long)__stop_ftrace_events; \
862 event++)
863
864extern const char *__start___trace_bprintk_fmt[];
865extern const char *__stop___trace_bprintk_fmt[];
866
867/*
868 * The double __builtin_constant_p is because gcc will give us an error
869 * if we try to allocate the static variable to fmt if it is not a
870 * constant. Even with the outer if statement optimizing out.
871 */
872#define event_trace_printk(ip, fmt, args...) \
873do { \
874 __trace_printk_check_format(fmt, ##args); \
875 tracing_record_cmdline(current); \
876 if (__builtin_constant_p(fmt)) { \
877 static const char *trace_printk_fmt \
878 __attribute__((section("__trace_printk_fmt"))) = \
879 __builtin_constant_p(fmt) ? fmt : NULL; \
880 \
881 __trace_bprintk(ip, trace_printk_fmt, ##args); \
882 } else \
883 __trace_printk(ip, fmt, ##args); \
884} while (0)
885
667#endif /* _LINUX_KERNEL_TRACE_H */ 886#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 366c8c333e13..7a30fc4c3642 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -11,6 +11,7 @@
11#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
12 12
13#include "trace.h" 13#include "trace.h"
14#include "trace_output.h"
14 15
15static struct trace_array *boot_trace; 16static struct trace_array *boot_trace;
16static bool pre_initcalls_finished; 17static bool pre_initcalls_finished;
@@ -27,13 +28,13 @@ void start_boot_trace(void)
27 28
28void enable_boot_trace(void) 29void enable_boot_trace(void)
29{ 30{
30 if (pre_initcalls_finished) 31 if (boot_trace && pre_initcalls_finished)
31 tracing_start_sched_switch_record(); 32 tracing_start_sched_switch_record();
32} 33}
33 34
34void disable_boot_trace(void) 35void disable_boot_trace(void)
35{ 36{
36 if (pre_initcalls_finished) 37 if (boot_trace && pre_initcalls_finished)
37 tracing_stop_sched_switch_record(); 38 tracing_stop_sched_switch_record();
38} 39}
39 40
@@ -42,6 +43,9 @@ static int boot_trace_init(struct trace_array *tr)
42 int cpu; 43 int cpu;
43 boot_trace = tr; 44 boot_trace = tr;
44 45
46 if (!tr)
47 return 0;
48
45 for_each_cpu(cpu, cpu_possible_mask) 49 for_each_cpu(cpu, cpu_possible_mask)
46 tracing_reset(tr, cpu); 50 tracing_reset(tr, cpu);
47 51
@@ -128,10 +132,9 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
128{ 132{
129 struct ring_buffer_event *event; 133 struct ring_buffer_event *event;
130 struct trace_boot_call *entry; 134 struct trace_boot_call *entry;
131 unsigned long irq_flags;
132 struct trace_array *tr = boot_trace; 135 struct trace_array *tr = boot_trace;
133 136
134 if (!pre_initcalls_finished) 137 if (!tr || !pre_initcalls_finished)
135 return; 138 return;
136 139
137 /* Get its name now since this function could 140 /* Get its name now since this function could
@@ -140,18 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
140 sprint_symbol(bt->func, (unsigned long)fn); 143 sprint_symbol(bt->func, (unsigned long)fn);
141 preempt_disable(); 144 preempt_disable();
142 145
143 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 146 event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
144 &irq_flags); 147 sizeof(*entry), 0, 0);
145 if (!event) 148 if (!event)
146 goto out; 149 goto out;
147 entry = ring_buffer_event_data(event); 150 entry = ring_buffer_event_data(event);
148 tracing_generic_entry_update(&entry->ent, 0, 0);
149 entry->ent.type = TRACE_BOOT_CALL;
150 entry->boot_call = *bt; 151 entry->boot_call = *bt;
151 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 152 trace_buffer_unlock_commit(tr, event, 0, 0);
152
153 trace_wake_up();
154
155 out: 153 out:
156 preempt_enable(); 154 preempt_enable();
157} 155}
@@ -160,27 +158,21 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
160{ 158{
161 struct ring_buffer_event *event; 159 struct ring_buffer_event *event;
162 struct trace_boot_ret *entry; 160 struct trace_boot_ret *entry;
163 unsigned long irq_flags;
164 struct trace_array *tr = boot_trace; 161 struct trace_array *tr = boot_trace;
165 162
166 if (!pre_initcalls_finished) 163 if (!tr || !pre_initcalls_finished)
167 return; 164 return;
168 165
169 sprint_symbol(bt->func, (unsigned long)fn); 166 sprint_symbol(bt->func, (unsigned long)fn);
170 preempt_disable(); 167 preempt_disable();
171 168
172 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 169 event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
173 &irq_flags); 170 sizeof(*entry), 0, 0);
174 if (!event) 171 if (!event)
175 goto out; 172 goto out;
176 entry = ring_buffer_event_data(event); 173 entry = ring_buffer_event_data(event);
177 tracing_generic_entry_update(&entry->ent, 0, 0);
178 entry->ent.type = TRACE_BOOT_RET;
179 entry->boot_ret = *bt; 174 entry->boot_ret = *bt;
180 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 175 trace_buffer_unlock_commit(tr, event, 0, 0);
181
182 trace_wake_up();
183
184 out: 176 out:
185 preempt_enable(); 177 preempt_enable();
186} 178}
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 6c00feb3bac7..ad8c22efff41 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -14,12 +14,17 @@
14#include <linux/hash.h> 14#include <linux/hash.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <asm/local.h> 16#include <asm/local.h>
17
17#include "trace.h" 18#include "trace.h"
19#include "trace_stat.h"
20#include "trace_output.h"
18 21
19#ifdef CONFIG_BRANCH_TRACER 22#ifdef CONFIG_BRANCH_TRACER
20 23
24static struct tracer branch_trace;
21static int branch_tracing_enabled __read_mostly; 25static int branch_tracing_enabled __read_mostly;
22static DEFINE_MUTEX(branch_tracing_mutex); 26static DEFINE_MUTEX(branch_tracing_mutex);
27
23static struct trace_array *branch_tracer; 28static struct trace_array *branch_tracer;
24 29
25static void 30static void
@@ -28,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
28 struct trace_array *tr = branch_tracer; 33 struct trace_array *tr = branch_tracer;
29 struct ring_buffer_event *event; 34 struct ring_buffer_event *event;
30 struct trace_branch *entry; 35 struct trace_branch *entry;
31 unsigned long flags, irq_flags; 36 unsigned long flags;
32 int cpu, pc; 37 int cpu, pc;
33 const char *p; 38 const char *p;
34 39
@@ -47,15 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 52 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
48 goto out; 53 goto out;
49 54
50 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 55 pc = preempt_count();
51 &irq_flags); 56 event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
57 sizeof(*entry), flags, pc);
52 if (!event) 58 if (!event)
53 goto out; 59 goto out;
54 60
55 pc = preempt_count();
56 entry = ring_buffer_event_data(event); 61 entry = ring_buffer_event_data(event);
57 tracing_generic_entry_update(&entry->ent, flags, pc);
58 entry->ent.type = TRACE_BRANCH;
59 62
60 /* Strip off the path, only save the file */ 63 /* Strip off the path, only save the file */
61 p = f->file + strlen(f->file); 64 p = f->file + strlen(f->file);
@@ -70,7 +73,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
70 entry->line = f->line; 73 entry->line = f->line;
71 entry->correct = val == expect; 74 entry->correct = val == expect;
72 75
73 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 76 ring_buffer_unlock_commit(tr->buffer, event);
74 77
75 out: 78 out:
76 atomic_dec(&tr->data[cpu]->disabled); 79 atomic_dec(&tr->data[cpu]->disabled);
@@ -88,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
88 91
89int enable_branch_tracing(struct trace_array *tr) 92int enable_branch_tracing(struct trace_array *tr)
90{ 93{
91 int ret = 0;
92
93 mutex_lock(&branch_tracing_mutex); 94 mutex_lock(&branch_tracing_mutex);
94 branch_tracer = tr; 95 branch_tracer = tr;
95 /* 96 /*
@@ -100,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr)
100 branch_tracing_enabled++; 101 branch_tracing_enabled++;
101 mutex_unlock(&branch_tracing_mutex); 102 mutex_unlock(&branch_tracing_mutex);
102 103
103 return ret; 104 return 0;
104} 105}
105 106
106void disable_branch_tracing(void) 107void disable_branch_tracing(void)
@@ -128,11 +129,6 @@ static void stop_branch_trace(struct trace_array *tr)
128 129
129static int branch_trace_init(struct trace_array *tr) 130static int branch_trace_init(struct trace_array *tr)
130{ 131{
131 int cpu;
132
133 for_each_online_cpu(cpu)
134 tracing_reset(tr, cpu);
135
136 start_branch_trace(tr); 132 start_branch_trace(tr);
137 return 0; 133 return 0;
138} 134}
@@ -142,22 +138,53 @@ static void branch_trace_reset(struct trace_array *tr)
142 stop_branch_trace(tr); 138 stop_branch_trace(tr);
143} 139}
144 140
145struct tracer branch_trace __read_mostly = 141static enum print_line_t trace_branch_print(struct trace_iterator *iter,
142 int flags)
143{
144 struct trace_branch *field;
145
146 trace_assign_type(field, iter->ent);
147
148 if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
149 field->correct ? " ok " : " MISS ",
150 field->func,
151 field->file,
152 field->line))
153 return TRACE_TYPE_PARTIAL_LINE;
154
155 return TRACE_TYPE_HANDLED;
156}
157
158
159static struct trace_event trace_branch_event = {
160 .type = TRACE_BRANCH,
161 .trace = trace_branch_print,
162};
163
164static struct tracer branch_trace __read_mostly =
146{ 165{
147 .name = "branch", 166 .name = "branch",
148 .init = branch_trace_init, 167 .init = branch_trace_init,
149 .reset = branch_trace_reset, 168 .reset = branch_trace_reset,
150#ifdef CONFIG_FTRACE_SELFTEST 169#ifdef CONFIG_FTRACE_SELFTEST
151 .selftest = trace_selftest_startup_branch, 170 .selftest = trace_selftest_startup_branch,
152#endif 171#endif /* CONFIG_FTRACE_SELFTEST */
153}; 172};
154 173
155__init static int init_branch_trace(void) 174__init static int init_branch_tracer(void)
156{ 175{
176 int ret;
177
178 ret = register_ftrace_event(&trace_branch_event);
179 if (!ret) {
180 printk(KERN_WARNING "Warning: could not register "
181 "branch events\n");
182 return 1;
183 }
157 return register_tracer(&branch_trace); 184 return register_tracer(&branch_trace);
158} 185}
186device_initcall(init_branch_tracer);
159 187
160device_initcall(init_branch_trace);
161#else 188#else
162static inline 189static inline
163void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) 190void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
@@ -183,66 +210,39 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
183} 210}
184EXPORT_SYMBOL(ftrace_likely_update); 211EXPORT_SYMBOL(ftrace_likely_update);
185 212
186struct ftrace_pointer { 213extern unsigned long __start_annotated_branch_profile[];
187 void *start; 214extern unsigned long __stop_annotated_branch_profile[];
188 void *stop;
189 int hit;
190};
191 215
192static void * 216static int annotated_branch_stat_headers(struct seq_file *m)
193t_next(struct seq_file *m, void *v, loff_t *pos)
194{ 217{
195 const struct ftrace_pointer *f = m->private; 218 seq_printf(m, " correct incorrect %% ");
196 struct ftrace_branch_data *p = v; 219 seq_printf(m, " Function "
197 220 " File Line\n"
198 (*pos)++; 221 " ------- --------- - "
199 222 " -------- "
200 if (v == (void *)1) 223 " ---- ----\n");
201 return f->start; 224 return 0;
202
203 ++p;
204
205 if ((void *)p >= (void *)f->stop)
206 return NULL;
207
208 return p;
209} 225}
210 226
211static void *t_start(struct seq_file *m, loff_t *pos) 227static inline long get_incorrect_percent(struct ftrace_branch_data *p)
212{ 228{
213 void *t = (void *)1; 229 long percent;
214 loff_t l = 0;
215
216 for (; t && l < *pos; t = t_next(m, t, &l))
217 ;
218 230
219 return t; 231 if (p->correct) {
220} 232 percent = p->incorrect * 100;
233 percent /= p->correct + p->incorrect;
234 } else
235 percent = p->incorrect ? 100 : -1;
221 236
222static void t_stop(struct seq_file *m, void *p) 237 return percent;
223{
224} 238}
225 239
226static int t_show(struct seq_file *m, void *v) 240static int branch_stat_show(struct seq_file *m, void *v)
227{ 241{
228 const struct ftrace_pointer *fp = m->private;
229 struct ftrace_branch_data *p = v; 242 struct ftrace_branch_data *p = v;
230 const char *f; 243 const char *f;
231 long percent; 244 long percent;
232 245
233 if (v == (void *)1) {
234 if (fp->hit)
235 seq_printf(m, " miss hit %% ");
236 else
237 seq_printf(m, " correct incorrect %% ");
238 seq_printf(m, " Function "
239 " File Line\n"
240 " ------- --------- - "
241 " -------- "
242 " ---- ----\n");
243 return 0;
244 }
245
246 /* Only print the file, not the path */ 246 /* Only print the file, not the path */
247 f = p->file + strlen(p->file); 247 f = p->file + strlen(p->file);
248 while (f >= p->file && *f != '/') 248 while (f >= p->file && *f != '/')
@@ -252,11 +252,7 @@ static int t_show(struct seq_file *m, void *v)
252 /* 252 /*
253 * The miss is overlayed on correct, and hit on incorrect. 253 * The miss is overlayed on correct, and hit on incorrect.
254 */ 254 */
255 if (p->correct) { 255 percent = get_incorrect_percent(p);
256 percent = p->incorrect * 100;
257 percent /= p->correct + p->incorrect;
258 } else
259 percent = p->incorrect ? 100 : -1;
260 256
261 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); 257 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
262 if (percent < 0) 258 if (percent < 0)
@@ -267,76 +263,118 @@ static int t_show(struct seq_file *m, void *v)
267 return 0; 263 return 0;
268} 264}
269 265
270static struct seq_operations tracing_likely_seq_ops = { 266static void *annotated_branch_stat_start(void)
271 .start = t_start, 267{
272 .next = t_next, 268 return __start_annotated_branch_profile;
273 .stop = t_stop, 269}
274 .show = t_show, 270
271static void *
272annotated_branch_stat_next(void *v, int idx)
273{
274 struct ftrace_branch_data *p = v;
275
276 ++p;
277
278 if ((void *)p >= (void *)__stop_annotated_branch_profile)
279 return NULL;
280
281 return p;
282}
283
284static int annotated_branch_stat_cmp(void *p1, void *p2)
285{
286 struct ftrace_branch_data *a = p1;
287 struct ftrace_branch_data *b = p2;
288
289 long percent_a, percent_b;
290
291 percent_a = get_incorrect_percent(a);
292 percent_b = get_incorrect_percent(b);
293
294 if (percent_a < percent_b)
295 return -1;
296 if (percent_a > percent_b)
297 return 1;
298 else
299 return 0;
300}
301
302static struct tracer_stat annotated_branch_stats = {
303 .name = "branch_annotated",
304 .stat_start = annotated_branch_stat_start,
305 .stat_next = annotated_branch_stat_next,
306 .stat_cmp = annotated_branch_stat_cmp,
307 .stat_headers = annotated_branch_stat_headers,
308 .stat_show = branch_stat_show
275}; 309};
276 310
277static int tracing_branch_open(struct inode *inode, struct file *file) 311__init static int init_annotated_branch_stats(void)
278{ 312{
279 int ret; 313 int ret;
280 314
281 ret = seq_open(file, &tracing_likely_seq_ops); 315 ret = register_stat_tracer(&annotated_branch_stats);
282 if (!ret) { 316 if (!ret) {
283 struct seq_file *m = file->private_data; 317 printk(KERN_WARNING "Warning: could not register "
284 m->private = (void *)inode->i_private; 318 "annotated branches stats\n");
319 return 1;
285 } 320 }
286 321 return 0;
287 return ret;
288} 322}
289 323fs_initcall(init_annotated_branch_stats);
290static const struct file_operations tracing_branch_fops = {
291 .open = tracing_branch_open,
292 .read = seq_read,
293 .llseek = seq_lseek,
294};
295 324
296#ifdef CONFIG_PROFILE_ALL_BRANCHES 325#ifdef CONFIG_PROFILE_ALL_BRANCHES
326
297extern unsigned long __start_branch_profile[]; 327extern unsigned long __start_branch_profile[];
298extern unsigned long __stop_branch_profile[]; 328extern unsigned long __stop_branch_profile[];
299 329
300static const struct ftrace_pointer ftrace_branch_pos = { 330static int all_branch_stat_headers(struct seq_file *m)
301 .start = __start_branch_profile, 331{
302 .stop = __stop_branch_profile, 332 seq_printf(m, " miss hit %% ");
303 .hit = 1, 333 seq_printf(m, " Function "
304}; 334 " File Line\n"
335 " ------- --------- - "
336 " -------- "
337 " ---- ----\n");
338 return 0;
339}
305 340
306#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 341static void *all_branch_stat_start(void)
342{
343 return __start_branch_profile;
344}
307 345
308extern unsigned long __start_annotated_branch_profile[]; 346static void *
309extern unsigned long __stop_annotated_branch_profile[]; 347all_branch_stat_next(void *v, int idx)
348{
349 struct ftrace_branch_data *p = v;
310 350
311static const struct ftrace_pointer ftrace_annotated_branch_pos = { 351 ++p;
312 .start = __start_annotated_branch_profile,
313 .stop = __stop_annotated_branch_profile,
314};
315 352
316static __init int ftrace_branch_init(void) 353 if ((void *)p >= (void *)__stop_branch_profile)
317{ 354 return NULL;
318 struct dentry *d_tracer;
319 struct dentry *entry;
320 355
321 d_tracer = tracing_init_dentry(); 356 return p;
357}
322 358
323 entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer, 359static struct tracer_stat all_branch_stats = {
324 (void *)&ftrace_annotated_branch_pos, 360 .name = "branch_all",
325 &tracing_branch_fops); 361 .stat_start = all_branch_stat_start,
326 if (!entry) 362 .stat_next = all_branch_stat_next,
327 pr_warning("Could not create debugfs " 363 .stat_headers = all_branch_stat_headers,
328 "'profile_annotatet_branch' entry\n"); 364 .stat_show = branch_stat_show
365};
329 366
330#ifdef CONFIG_PROFILE_ALL_BRANCHES 367__init static int all_annotated_branch_stats(void)
331 entry = debugfs_create_file("profile_branch", 0444, d_tracer, 368{
332 (void *)&ftrace_branch_pos, 369 int ret;
333 &tracing_branch_fops);
334 if (!entry)
335 pr_warning("Could not create debugfs"
336 " 'profile_branch' entry\n");
337#endif
338 370
371 ret = register_stat_tracer(&all_branch_stats);
372 if (!ret) {
373 printk(KERN_WARNING "Warning: could not register "
374 "all branches stats\n");
375 return 1;
376 }
339 return 0; 377 return 0;
340} 378}
341 379fs_initcall(all_annotated_branch_stats);
342device_initcall(ftrace_branch_init); 380#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
new file mode 100644
index 000000000000..b588fd81f7f9
--- /dev/null
+++ b/kernel/trace/trace_clock.c
@@ -0,0 +1,109 @@
1/*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
17#include <linux/module.h>
18#include <linux/percpu.h>
19#include <linux/sched.h>
20#include <linux/ktime.h>
21#include <linux/trace_clock.h>
22
23/*
24 * trace_clock_local(): the simplest and least coherent tracing clock.
25 *
26 * Useful for tracing that does not cross to other CPUs nor
27 * does it go through idle events.
28 */
29u64 notrace trace_clock_local(void)
30{
31 unsigned long flags;
32 u64 clock;
33
34 /*
35 * sched_clock() is an architecture implemented, fast, scalable,
36 * lockless clock. It is not guaranteed to be coherent across
37 * CPUs, nor across CPU idle events.
38 */
39 raw_local_irq_save(flags);
40 clock = sched_clock();
41 raw_local_irq_restore(flags);
42
43 return clock;
44}
45
46/*
47 * trace_clock(): 'inbetween' trace clock. Not completely serialized,
48 * but not completely incorrect when crossing CPUs either.
49 *
50 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
51 * jitter between CPUs. So it's a pretty scalable clock, but there
52 * can be offsets in the trace data.
53 */
54u64 notrace trace_clock(void)
55{
56 return cpu_clock(raw_smp_processor_id());
57}
58
59
60/*
61 * trace_clock_global(): special globally coherent trace clock
62 *
63 * It has higher overhead than the other trace clocks but is still
64 * an order of magnitude faster than GTOD derived hardware clocks.
65 *
66 * Used by plugins that need globally coherent timestamps.
67 */
68
69static u64 prev_trace_clock_time;
70
71static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
72 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
73
74u64 notrace trace_clock_global(void)
75{
76 unsigned long flags;
77 int this_cpu;
78 u64 now;
79
80 raw_local_irq_save(flags);
81
82 this_cpu = raw_smp_processor_id();
83 now = cpu_clock(this_cpu);
84 /*
85 * If in an NMI context then dont risk lockups and return the
86 * cpu_clock() time:
87 */
88 if (unlikely(in_nmi()))
89 goto out;
90
91 __raw_spin_lock(&trace_clock_lock);
92
93 /*
94 * TODO: if this happens often then maybe we should reset
95 * my_scd->clock to prev_trace_clock_time+1, to make sure
96 * we start ticking with the local clock from now on?
97 */
98 if ((s64)(now - prev_trace_clock_time) < 0)
99 now = prev_trace_clock_time + 1;
100
101 prev_trace_clock_time = now;
102
103 __raw_spin_unlock(&trace_clock_lock);
104
105 out:
106 raw_local_irq_restore(flags);
107
108 return now;
109}
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
new file mode 100644
index 000000000000..22cba9970776
--- /dev/null
+++ b/kernel/trace/trace_event_profile.c
@@ -0,0 +1,31 @@
1/*
2 * trace event based perf counter profiling
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 */
7
8#include "trace.h"
9
10int ftrace_profile_enable(int event_id)
11{
12 struct ftrace_event_call *event;
13
14 for_each_event(event) {
15 if (event->id == event_id)
16 return event->profile_enable(event);
17 }
18
19 return -EINVAL;
20}
21
22void ftrace_profile_disable(int event_id)
23{
24 struct ftrace_event_call *event;
25
26 for_each_event(event) {
27 if (event->id == event_id)
28 return event->profile_disable(event);
29 }
30}
31
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h
new file mode 100644
index 000000000000..fd78bee71dd7
--- /dev/null
+++ b/kernel/trace/trace_event_types.h
@@ -0,0 +1,173 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM ftrace
3
4/*
5 * We cheat and use the proto type field as the ID
6 * and args as the entry type (minus 'struct')
7 */
8TRACE_EVENT_FORMAT(function, TRACE_FN, ftrace_entry, ignore,
9 TRACE_STRUCT(
10 TRACE_FIELD(unsigned long, ip, ip)
11 TRACE_FIELD(unsigned long, parent_ip, parent_ip)
12 ),
13 TP_RAW_FMT(" %lx <-- %lx")
14);
15
16TRACE_EVENT_FORMAT(funcgraph_entry, TRACE_GRAPH_ENT,
17 ftrace_graph_ent_entry, ignore,
18 TRACE_STRUCT(
19 TRACE_FIELD(unsigned long, graph_ent.func, func)
20 TRACE_FIELD(int, graph_ent.depth, depth)
21 ),
22 TP_RAW_FMT("--> %lx (%d)")
23);
24
25TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET,
26 ftrace_graph_ret_entry, ignore,
27 TRACE_STRUCT(
28 TRACE_FIELD(unsigned long, ret.func, func)
29 TRACE_FIELD(int, ret.depth, depth)
30 ),
31 TP_RAW_FMT("<-- %lx (%d)")
32);
33
34TRACE_EVENT_FORMAT(wakeup, TRACE_WAKE, ctx_switch_entry, ignore,
35 TRACE_STRUCT(
36 TRACE_FIELD(unsigned int, prev_pid, prev_pid)
37 TRACE_FIELD(unsigned char, prev_prio, prev_prio)
38 TRACE_FIELD(unsigned char, prev_state, prev_state)
39 TRACE_FIELD(unsigned int, next_pid, next_pid)
40 TRACE_FIELD(unsigned char, next_prio, next_prio)
41 TRACE_FIELD(unsigned char, next_state, next_state)
42 TRACE_FIELD(unsigned int, next_cpu, next_cpu)
43 ),
44 TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]")
45);
46
47TRACE_EVENT_FORMAT(context_switch, TRACE_CTX, ctx_switch_entry, ignore,
48 TRACE_STRUCT(
49 TRACE_FIELD(unsigned int, prev_pid, prev_pid)
50 TRACE_FIELD(unsigned char, prev_prio, prev_prio)
51 TRACE_FIELD(unsigned char, prev_state, prev_state)
52 TRACE_FIELD(unsigned int, next_pid, next_pid)
53 TRACE_FIELD(unsigned char, next_prio, next_prio)
54 TRACE_FIELD(unsigned char, next_state, next_state)
55 TRACE_FIELD(unsigned int, next_cpu, next_cpu)
56 ),
57 TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]")
58);
59
60TRACE_EVENT_FORMAT(special, TRACE_SPECIAL, special_entry, ignore,
61 TRACE_STRUCT(
62 TRACE_FIELD(unsigned long, arg1, arg1)
63 TRACE_FIELD(unsigned long, arg2, arg2)
64 TRACE_FIELD(unsigned long, arg3, arg3)
65 ),
66 TP_RAW_FMT("(%08lx) (%08lx) (%08lx)")
67);
68
69/*
70 * Stack-trace entry:
71 */
72
73/* #define FTRACE_STACK_ENTRIES 8 */
74
75TRACE_EVENT_FORMAT(kernel_stack, TRACE_STACK, stack_entry, ignore,
76 TRACE_STRUCT(
77 TRACE_FIELD(unsigned long, caller[0], stack0)
78 TRACE_FIELD(unsigned long, caller[1], stack1)
79 TRACE_FIELD(unsigned long, caller[2], stack2)
80 TRACE_FIELD(unsigned long, caller[3], stack3)
81 TRACE_FIELD(unsigned long, caller[4], stack4)
82 TRACE_FIELD(unsigned long, caller[5], stack5)
83 TRACE_FIELD(unsigned long, caller[6], stack6)
84 TRACE_FIELD(unsigned long, caller[7], stack7)
85 ),
86 TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
87 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
88);
89
90TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore,
91 TRACE_STRUCT(
92 TRACE_FIELD(unsigned long, caller[0], stack0)
93 TRACE_FIELD(unsigned long, caller[1], stack1)
94 TRACE_FIELD(unsigned long, caller[2], stack2)
95 TRACE_FIELD(unsigned long, caller[3], stack3)
96 TRACE_FIELD(unsigned long, caller[4], stack4)
97 TRACE_FIELD(unsigned long, caller[5], stack5)
98 TRACE_FIELD(unsigned long, caller[6], stack6)
99 TRACE_FIELD(unsigned long, caller[7], stack7)
100 ),
101 TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
102 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
103);
104
105TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore,
106 TRACE_STRUCT(
107 TRACE_FIELD(unsigned long, ip, ip)
108 TRACE_FIELD(char *, fmt, fmt)
109 TRACE_FIELD_ZERO_CHAR(buf)
110 ),
111 TP_RAW_FMT("%08lx (%d) fmt:%p %s")
112);
113
114TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore,
115 TRACE_STRUCT(
116 TRACE_FIELD(unsigned long, ip, ip)
117 TRACE_FIELD_ZERO_CHAR(buf)
118 ),
119 TP_RAW_FMT("%08lx (%d) fmt:%p %s")
120);
121
122TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore,
123 TRACE_STRUCT(
124 TRACE_FIELD(unsigned int, line, line)
125 TRACE_FIELD_SPECIAL(char func[TRACE_FUNC_SIZE+1], func, func)
126 TRACE_FIELD_SPECIAL(char file[TRACE_FUNC_SIZE+1], file, file)
127 TRACE_FIELD(char, correct, correct)
128 ),
129 TP_RAW_FMT("%u:%s:%s (%u)")
130);
131
132TRACE_EVENT_FORMAT(hw_branch, TRACE_HW_BRANCHES, hw_branch_entry, ignore,
133 TRACE_STRUCT(
134 TRACE_FIELD(u64, from, from)
135 TRACE_FIELD(u64, to, to)
136 ),
137 TP_RAW_FMT("from: %llx to: %llx")
138);
139
140TRACE_EVENT_FORMAT(power, TRACE_POWER, trace_power, ignore,
141 TRACE_STRUCT(
142 TRACE_FIELD(ktime_t, state_data.stamp, stamp)
143 TRACE_FIELD(ktime_t, state_data.end, end)
144 TRACE_FIELD(int, state_data.type, type)
145 TRACE_FIELD(int, state_data.state, state)
146 ),
147 TP_RAW_FMT("%llx->%llx type:%u state:%u")
148);
149
150TRACE_EVENT_FORMAT(kmem_alloc, TRACE_KMEM_ALLOC, kmemtrace_alloc_entry, ignore,
151 TRACE_STRUCT(
152 TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id)
153 TRACE_FIELD(unsigned long, call_site, call_site)
154 TRACE_FIELD(const void *, ptr, ptr)
155 TRACE_FIELD(size_t, bytes_req, bytes_req)
156 TRACE_FIELD(size_t, bytes_alloc, bytes_alloc)
157 TRACE_FIELD(gfp_t, gfp_flags, gfp_flags)
158 TRACE_FIELD(int, node, node)
159 ),
160 TP_RAW_FMT("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu"
161 " flags:%x node:%d")
162);
163
164TRACE_EVENT_FORMAT(kmem_free, TRACE_KMEM_FREE, kmemtrace_free_entry, ignore,
165 TRACE_STRUCT(
166 TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id)
167 TRACE_FIELD(unsigned long, call_site, call_site)
168 TRACE_FIELD(const void *, ptr, ptr)
169 ),
170 TP_RAW_FMT("type:%u call_site:%lx ptr:%p")
171);
172
173#undef TRACE_SYSTEM
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
new file mode 100644
index 000000000000..64ec4d278ffb
--- /dev/null
+++ b/kernel/trace/trace_events.c
@@ -0,0 +1,824 @@
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11#include <linux/debugfs.h>
12#include <linux/uaccess.h>
13#include <linux/module.h>
14#include <linux/ctype.h>
15
16#include "trace_output.h"
17
18#define TRACE_SYSTEM "TRACE_SYSTEM"
19
20static DEFINE_MUTEX(event_mutex);
21
22int trace_define_field(struct ftrace_event_call *call, char *type,
23 char *name, int offset, int size)
24{
25 struct ftrace_event_field *field;
26
27 field = kzalloc(sizeof(*field), GFP_KERNEL);
28 if (!field)
29 goto err;
30
31 field->name = kstrdup(name, GFP_KERNEL);
32 if (!field->name)
33 goto err;
34
35 field->type = kstrdup(type, GFP_KERNEL);
36 if (!field->type)
37 goto err;
38
39 field->offset = offset;
40 field->size = size;
41 list_add(&field->link, &call->fields);
42
43 return 0;
44
45err:
46 if (field) {
47 kfree(field->name);
48 kfree(field->type);
49 }
50 kfree(field);
51
52 return -ENOMEM;
53}
54
55static void ftrace_clear_events(void)
56{
57 struct ftrace_event_call *call = (void *)__start_ftrace_events;
58
59
60 while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
61
62 if (call->enabled) {
63 call->enabled = 0;
64 call->unregfunc();
65 }
66 call++;
67 }
68}
69
70static void ftrace_event_enable_disable(struct ftrace_event_call *call,
71 int enable)
72{
73
74 switch (enable) {
75 case 0:
76 if (call->enabled) {
77 call->enabled = 0;
78 call->unregfunc();
79 }
80 break;
81 case 1:
82 if (!call->enabled) {
83 call->enabled = 1;
84 call->regfunc();
85 }
86 break;
87 }
88}
89
90static int ftrace_set_clr_event(char *buf, int set)
91{
92 struct ftrace_event_call *call = __start_ftrace_events;
93 char *event = NULL, *sub = NULL, *match;
94 int ret = -EINVAL;
95
96 /*
97 * The buf format can be <subsystem>:<event-name>
98 * *:<event-name> means any event by that name.
99 * :<event-name> is the same.
100 *
101 * <subsystem>:* means all events in that subsystem
102 * <subsystem>: means the same.
103 *
104 * <name> (no ':') means all events in a subsystem with
105 * the name <name> or any event that matches <name>
106 */
107
108 match = strsep(&buf, ":");
109 if (buf) {
110 sub = match;
111 event = buf;
112 match = NULL;
113
114 if (!strlen(sub) || strcmp(sub, "*") == 0)
115 sub = NULL;
116 if (!strlen(event) || strcmp(event, "*") == 0)
117 event = NULL;
118 }
119
120 mutex_lock(&event_mutex);
121 for_each_event(call) {
122
123 if (!call->name || !call->regfunc)
124 continue;
125
126 if (match &&
127 strcmp(match, call->name) != 0 &&
128 strcmp(match, call->system) != 0)
129 continue;
130
131 if (sub && strcmp(sub, call->system) != 0)
132 continue;
133
134 if (event && strcmp(event, call->name) != 0)
135 continue;
136
137 ftrace_event_enable_disable(call, set);
138
139 ret = 0;
140 }
141 mutex_unlock(&event_mutex);
142
143 return ret;
144}
145
146/* 128 should be much more than enough */
147#define EVENT_BUF_SIZE 127
148
149static ssize_t
150ftrace_event_write(struct file *file, const char __user *ubuf,
151 size_t cnt, loff_t *ppos)
152{
153 size_t read = 0;
154 int i, set = 1;
155 ssize_t ret;
156 char *buf;
157 char ch;
158
159 if (!cnt || cnt < 0)
160 return 0;
161
162 ret = tracing_update_buffers();
163 if (ret < 0)
164 return ret;
165
166 ret = get_user(ch, ubuf++);
167 if (ret)
168 return ret;
169 read++;
170 cnt--;
171
172 /* skip white space */
173 while (cnt && isspace(ch)) {
174 ret = get_user(ch, ubuf++);
175 if (ret)
176 return ret;
177 read++;
178 cnt--;
179 }
180
181 /* Only white space found? */
182 if (isspace(ch)) {
183 file->f_pos += read;
184 ret = read;
185 return ret;
186 }
187
188 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
189 if (!buf)
190 return -ENOMEM;
191
192 if (cnt > EVENT_BUF_SIZE)
193 cnt = EVENT_BUF_SIZE;
194
195 i = 0;
196 while (cnt && !isspace(ch)) {
197 if (!i && ch == '!')
198 set = 0;
199 else
200 buf[i++] = ch;
201
202 ret = get_user(ch, ubuf++);
203 if (ret)
204 goto out_free;
205 read++;
206 cnt--;
207 }
208 buf[i] = 0;
209
210 file->f_pos += read;
211
212 ret = ftrace_set_clr_event(buf, set);
213 if (ret)
214 goto out_free;
215
216 ret = read;
217
218 out_free:
219 kfree(buf);
220
221 return ret;
222}
223
224static void *
225t_next(struct seq_file *m, void *v, loff_t *pos)
226{
227 struct ftrace_event_call *call = m->private;
228 struct ftrace_event_call *next = call;
229
230 (*pos)++;
231
232 for (;;) {
233 if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
234 return NULL;
235
236 /*
237 * The ftrace subsystem is for showing formats only.
238 * They can not be enabled or disabled via the event files.
239 */
240 if (call->regfunc)
241 break;
242
243 call++;
244 next = call;
245 }
246
247 m->private = ++next;
248
249 return call;
250}
251
252static void *t_start(struct seq_file *m, loff_t *pos)
253{
254 return t_next(m, NULL, pos);
255}
256
257static void *
258s_next(struct seq_file *m, void *v, loff_t *pos)
259{
260 struct ftrace_event_call *call = m->private;
261 struct ftrace_event_call *next;
262
263 (*pos)++;
264
265 retry:
266 if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
267 return NULL;
268
269 if (!call->enabled) {
270 call++;
271 goto retry;
272 }
273
274 next = call;
275 m->private = ++next;
276
277 return call;
278}
279
280static void *s_start(struct seq_file *m, loff_t *pos)
281{
282 return s_next(m, NULL, pos);
283}
284
285static int t_show(struct seq_file *m, void *v)
286{
287 struct ftrace_event_call *call = v;
288
289 if (strcmp(call->system, TRACE_SYSTEM) != 0)
290 seq_printf(m, "%s:", call->system);
291 seq_printf(m, "%s\n", call->name);
292
293 return 0;
294}
295
296static void t_stop(struct seq_file *m, void *p)
297{
298}
299
300static int
301ftrace_event_seq_open(struct inode *inode, struct file *file)
302{
303 int ret;
304 const struct seq_operations *seq_ops;
305
306 if ((file->f_mode & FMODE_WRITE) &&
307 !(file->f_flags & O_APPEND))
308 ftrace_clear_events();
309
310 seq_ops = inode->i_private;
311 ret = seq_open(file, seq_ops);
312 if (!ret) {
313 struct seq_file *m = file->private_data;
314
315 m->private = __start_ftrace_events;
316 }
317 return ret;
318}
319
320static ssize_t
321event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
322 loff_t *ppos)
323{
324 struct ftrace_event_call *call = filp->private_data;
325 char *buf;
326
327 if (call->enabled)
328 buf = "1\n";
329 else
330 buf = "0\n";
331
332 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
333}
334
335static ssize_t
336event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
337 loff_t *ppos)
338{
339 struct ftrace_event_call *call = filp->private_data;
340 char buf[64];
341 unsigned long val;
342 int ret;
343
344 if (cnt >= sizeof(buf))
345 return -EINVAL;
346
347 if (copy_from_user(&buf, ubuf, cnt))
348 return -EFAULT;
349
350 buf[cnt] = 0;
351
352 ret = strict_strtoul(buf, 10, &val);
353 if (ret < 0)
354 return ret;
355
356 ret = tracing_update_buffers();
357 if (ret < 0)
358 return ret;
359
360 switch (val) {
361 case 0:
362 case 1:
363 mutex_lock(&event_mutex);
364 ftrace_event_enable_disable(call, val);
365 mutex_unlock(&event_mutex);
366 break;
367
368 default:
369 return -EINVAL;
370 }
371
372 *ppos += cnt;
373
374 return cnt;
375}
376
377#undef FIELD
378#define FIELD(type, name) \
379 #type, "common_" #name, offsetof(typeof(field), name), \
380 sizeof(field.name)
381
382static int trace_write_header(struct trace_seq *s)
383{
384 struct trace_entry field;
385
386 /* struct trace_entry */
387 return trace_seq_printf(s,
388 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
389 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
390 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
391 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
392 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
393 "\n",
394 FIELD(unsigned char, type),
395 FIELD(unsigned char, flags),
396 FIELD(unsigned char, preempt_count),
397 FIELD(int, pid),
398 FIELD(int, tgid));
399}
400
401static ssize_t
402event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
403 loff_t *ppos)
404{
405 struct ftrace_event_call *call = filp->private_data;
406 struct trace_seq *s;
407 char *buf;
408 int r;
409
410 if (*ppos)
411 return 0;
412
413 s = kmalloc(sizeof(*s), GFP_KERNEL);
414 if (!s)
415 return -ENOMEM;
416
417 trace_seq_init(s);
418
419 /* If any of the first writes fail, so will the show_format. */
420
421 trace_seq_printf(s, "name: %s\n", call->name);
422 trace_seq_printf(s, "ID: %d\n", call->id);
423 trace_seq_printf(s, "format:\n");
424 trace_write_header(s);
425
426 r = call->show_format(s);
427 if (!r) {
428 /*
429 * ug! The format output is bigger than a PAGE!!
430 */
431 buf = "FORMAT TOO BIG\n";
432 r = simple_read_from_buffer(ubuf, cnt, ppos,
433 buf, strlen(buf));
434 goto out;
435 }
436
437 r = simple_read_from_buffer(ubuf, cnt, ppos,
438 s->buffer, s->len);
439 out:
440 kfree(s);
441 return r;
442}
443
444static ssize_t
445event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
446{
447 struct ftrace_event_call *call = filp->private_data;
448 struct trace_seq *s;
449 int r;
450
451 if (*ppos)
452 return 0;
453
454 s = kmalloc(sizeof(*s), GFP_KERNEL);
455 if (!s)
456 return -ENOMEM;
457
458 trace_seq_init(s);
459 trace_seq_printf(s, "%d\n", call->id);
460
461 r = simple_read_from_buffer(ubuf, cnt, ppos,
462 s->buffer, s->len);
463 kfree(s);
464 return r;
465}
466
467static ssize_t
468event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
469 loff_t *ppos)
470{
471 struct ftrace_event_call *call = filp->private_data;
472 struct trace_seq *s;
473 int r;
474
475 if (*ppos)
476 return 0;
477
478 s = kmalloc(sizeof(*s), GFP_KERNEL);
479 if (!s)
480 return -ENOMEM;
481
482 trace_seq_init(s);
483
484 filter_print_preds(call->preds, s);
485 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
486
487 kfree(s);
488
489 return r;
490}
491
492static ssize_t
493event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
494 loff_t *ppos)
495{
496 struct ftrace_event_call *call = filp->private_data;
497 char buf[64], *pbuf = buf;
498 struct filter_pred *pred;
499 int err;
500
501 if (cnt >= sizeof(buf))
502 return -EINVAL;
503
504 if (copy_from_user(&buf, ubuf, cnt))
505 return -EFAULT;
506
507 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
508 if (!pred)
509 return -ENOMEM;
510
511 err = filter_parse(&pbuf, pred);
512 if (err < 0) {
513 filter_free_pred(pred);
514 return err;
515 }
516
517 if (pred->clear) {
518 filter_free_preds(call);
519 filter_free_pred(pred);
520 return cnt;
521 }
522
523 if (filter_add_pred(call, pred)) {
524 filter_free_pred(pred);
525 return -EINVAL;
526 }
527
528 *ppos += cnt;
529
530 return cnt;
531}
532
533static ssize_t
534subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
535 loff_t *ppos)
536{
537 struct event_subsystem *system = filp->private_data;
538 struct trace_seq *s;
539 int r;
540
541 if (*ppos)
542 return 0;
543
544 s = kmalloc(sizeof(*s), GFP_KERNEL);
545 if (!s)
546 return -ENOMEM;
547
548 trace_seq_init(s);
549
550 filter_print_preds(system->preds, s);
551 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
552
553 kfree(s);
554
555 return r;
556}
557
558static ssize_t
559subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
560 loff_t *ppos)
561{
562 struct event_subsystem *system = filp->private_data;
563 char buf[64], *pbuf = buf;
564 struct filter_pred *pred;
565 int err;
566
567 if (cnt >= sizeof(buf))
568 return -EINVAL;
569
570 if (copy_from_user(&buf, ubuf, cnt))
571 return -EFAULT;
572
573 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
574 if (!pred)
575 return -ENOMEM;
576
577 err = filter_parse(&pbuf, pred);
578 if (err < 0) {
579 filter_free_pred(pred);
580 return err;
581 }
582
583 if (pred->clear) {
584 filter_free_subsystem_preds(system);
585 filter_free_pred(pred);
586 return cnt;
587 }
588
589 if (filter_add_subsystem_pred(system, pred)) {
590 filter_free_subsystem_preds(system);
591 filter_free_pred(pred);
592 return -EINVAL;
593 }
594
595 *ppos += cnt;
596
597 return cnt;
598}
599
600static const struct seq_operations show_event_seq_ops = {
601 .start = t_start,
602 .next = t_next,
603 .show = t_show,
604 .stop = t_stop,
605};
606
607static const struct seq_operations show_set_event_seq_ops = {
608 .start = s_start,
609 .next = s_next,
610 .show = t_show,
611 .stop = t_stop,
612};
613
614static const struct file_operations ftrace_avail_fops = {
615 .open = ftrace_event_seq_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = seq_release,
619};
620
621static const struct file_operations ftrace_set_event_fops = {
622 .open = ftrace_event_seq_open,
623 .read = seq_read,
624 .write = ftrace_event_write,
625 .llseek = seq_lseek,
626 .release = seq_release,
627};
628
629static const struct file_operations ftrace_enable_fops = {
630 .open = tracing_open_generic,
631 .read = event_enable_read,
632 .write = event_enable_write,
633};
634
635static const struct file_operations ftrace_event_format_fops = {
636 .open = tracing_open_generic,
637 .read = event_format_read,
638};
639
640static const struct file_operations ftrace_event_id_fops = {
641 .open = tracing_open_generic,
642 .read = event_id_read,
643};
644
645static const struct file_operations ftrace_event_filter_fops = {
646 .open = tracing_open_generic,
647 .read = event_filter_read,
648 .write = event_filter_write,
649};
650
651static const struct file_operations ftrace_subsystem_filter_fops = {
652 .open = tracing_open_generic,
653 .read = subsystem_filter_read,
654 .write = subsystem_filter_write,
655};
656
657static struct dentry *event_trace_events_dir(void)
658{
659 static struct dentry *d_tracer;
660 static struct dentry *d_events;
661
662 if (d_events)
663 return d_events;
664
665 d_tracer = tracing_init_dentry();
666 if (!d_tracer)
667 return NULL;
668
669 d_events = debugfs_create_dir("events", d_tracer);
670 if (!d_events)
671 pr_warning("Could not create debugfs "
672 "'events' directory\n");
673
674 return d_events;
675}
676
677static LIST_HEAD(event_subsystems);
678
679static struct dentry *
680event_subsystem_dir(const char *name, struct dentry *d_events)
681{
682 struct event_subsystem *system;
683
684 /* First see if we did not already create this dir */
685 list_for_each_entry(system, &event_subsystems, list) {
686 if (strcmp(system->name, name) == 0)
687 return system->entry;
688 }
689
690 /* need to create new entry */
691 system = kmalloc(sizeof(*system), GFP_KERNEL);
692 if (!system) {
693 pr_warning("No memory to create event subsystem %s\n",
694 name);
695 return d_events;
696 }
697
698 system->entry = debugfs_create_dir(name, d_events);
699 if (!system->entry) {
700 pr_warning("Could not create event subsystem %s\n",
701 name);
702 kfree(system);
703 return d_events;
704 }
705
706 system->name = name;
707 list_add(&system->list, &event_subsystems);
708
709 system->preds = NULL;
710
711 return system->entry;
712}
713
714static int
715event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
716{
717 struct dentry *entry;
718 int ret;
719
720 /*
721 * If the trace point header did not define TRACE_SYSTEM
722 * then the system would be called "TRACE_SYSTEM".
723 */
724 if (strcmp(call->system, "TRACE_SYSTEM") != 0)
725 d_events = event_subsystem_dir(call->system, d_events);
726
727 if (call->raw_init) {
728 ret = call->raw_init();
729 if (ret < 0) {
730 pr_warning("Could not initialize trace point"
731 " events/%s\n", call->name);
732 return ret;
733 }
734 }
735
736 call->dir = debugfs_create_dir(call->name, d_events);
737 if (!call->dir) {
738 pr_warning("Could not create debugfs "
739 "'%s' directory\n", call->name);
740 return -1;
741 }
742
743 if (call->regfunc) {
744 entry = debugfs_create_file("enable", 0644, call->dir, call,
745 &ftrace_enable_fops);
746 if (!entry)
747 pr_warning("Could not create debugfs "
748 "'%s/enable' entry\n", call->name);
749 }
750
751 if (call->id) {
752 entry = debugfs_create_file("id", 0444, call->dir, call,
753 &ftrace_event_id_fops);
754 if (!entry)
755 pr_warning("Could not create debugfs '%s/id' entry\n",
756 call->name);
757 }
758
759 if (call->define_fields) {
760 ret = call->define_fields();
761 if (ret < 0) {
762 pr_warning("Could not initialize trace point"
763 " events/%s\n", call->name);
764 return ret;
765 }
766 entry = debugfs_create_file("filter", 0644, call->dir, call,
767 &ftrace_event_filter_fops);
768 if (!entry)
769 pr_warning("Could not create debugfs "
770 "'%s/filter' entry\n", call->name);
771 }
772
773 /* A trace may not want to export its format */
774 if (!call->show_format)
775 return 0;
776
777 entry = debugfs_create_file("format", 0444, call->dir, call,
778 &ftrace_event_format_fops);
779 if (!entry)
780 pr_warning("Could not create debugfs "
781 "'%s/format' entry\n", call->name);
782
783 return 0;
784}
785
786static __init int event_trace_init(void)
787{
788 struct ftrace_event_call *call = __start_ftrace_events;
789 struct dentry *d_tracer;
790 struct dentry *entry;
791 struct dentry *d_events;
792
793 d_tracer = tracing_init_dentry();
794 if (!d_tracer)
795 return 0;
796
797 entry = debugfs_create_file("available_events", 0444, d_tracer,
798 (void *)&show_event_seq_ops,
799 &ftrace_avail_fops);
800 if (!entry)
801 pr_warning("Could not create debugfs "
802 "'available_events' entry\n");
803
804 entry = debugfs_create_file("set_event", 0644, d_tracer,
805 (void *)&show_set_event_seq_ops,
806 &ftrace_set_event_fops);
807 if (!entry)
808 pr_warning("Could not create debugfs "
809 "'set_event' entry\n");
810
811 d_events = event_trace_events_dir();
812 if (!d_events)
813 return 0;
814
815 for_each_event(call) {
816 /* The linker may leave blanks */
817 if (!call->name)
818 continue;
819 event_create_dir(call, d_events);
820 }
821
822 return 0;
823}
824fs_initcall(event_trace_init);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
new file mode 100644
index 000000000000..026be412f356
--- /dev/null
+++ b/kernel/trace/trace_events_filter.c
@@ -0,0 +1,427 @@
1/*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21#include <linux/debugfs.h>
22#include <linux/uaccess.h>
23#include <linux/module.h>
24#include <linux/ctype.h>
25
26#include "trace.h"
27#include "trace_output.h"
28
29static int filter_pred_64(struct filter_pred *pred, void *event)
30{
31 u64 *addr = (u64 *)(event + pred->offset);
32 u64 val = (u64)pred->val;
33 int match;
34
35 match = (val == *addr) ^ pred->not;
36
37 return match;
38}
39
40static int filter_pred_32(struct filter_pred *pred, void *event)
41{
42 u32 *addr = (u32 *)(event + pred->offset);
43 u32 val = (u32)pred->val;
44 int match;
45
46 match = (val == *addr) ^ pred->not;
47
48 return match;
49}
50
51static int filter_pred_16(struct filter_pred *pred, void *event)
52{
53 u16 *addr = (u16 *)(event + pred->offset);
54 u16 val = (u16)pred->val;
55 int match;
56
57 match = (val == *addr) ^ pred->not;
58
59 return match;
60}
61
62static int filter_pred_8(struct filter_pred *pred, void *event)
63{
64 u8 *addr = (u8 *)(event + pred->offset);
65 u8 val = (u8)pred->val;
66 int match;
67
68 match = (val == *addr) ^ pred->not;
69
70 return match;
71}
72
73static int filter_pred_string(struct filter_pred *pred, void *event)
74{
75 char *addr = (char *)(event + pred->offset);
76 int cmp, match;
77
78 cmp = strncmp(addr, pred->str_val, pred->str_len);
79
80 match = (!cmp) ^ pred->not;
81
82 return match;
83}
84
85/* return 1 if event matches, 0 otherwise (discard) */
86int filter_match_preds(struct ftrace_event_call *call, void *rec)
87{
88 int i, matched, and_failed = 0;
89 struct filter_pred *pred;
90
91 for (i = 0; i < MAX_FILTER_PRED; i++) {
92 if (call->preds[i]) {
93 pred = call->preds[i];
94 if (and_failed && !pred->or)
95 continue;
96 matched = pred->fn(pred, rec);
97 if (!matched && !pred->or) {
98 and_failed = 1;
99 continue;
100 } else if (matched && pred->or)
101 return 1;
102 } else
103 break;
104 }
105
106 if (and_failed)
107 return 0;
108
109 return 1;
110}
111
112void filter_print_preds(struct filter_pred **preds, struct trace_seq *s)
113{
114 char *field_name;
115 struct filter_pred *pred;
116 int i;
117
118 if (!preds) {
119 trace_seq_printf(s, "none\n");
120 return;
121 }
122
123 for (i = 0; i < MAX_FILTER_PRED; i++) {
124 if (preds[i]) {
125 pred = preds[i];
126 field_name = pred->field_name;
127 if (i)
128 trace_seq_printf(s, pred->or ? "|| " : "&& ");
129 trace_seq_printf(s, "%s ", field_name);
130 trace_seq_printf(s, pred->not ? "!= " : "== ");
131 if (pred->str_val)
132 trace_seq_printf(s, "%s\n", pred->str_val);
133 else
134 trace_seq_printf(s, "%llu\n", pred->val);
135 } else
136 break;
137 }
138}
139
140static struct ftrace_event_field *
141find_event_field(struct ftrace_event_call *call, char *name)
142{
143 struct ftrace_event_field *field;
144
145 list_for_each_entry(field, &call->fields, link) {
146 if (!strcmp(field->name, name))
147 return field;
148 }
149
150 return NULL;
151}
152
153void filter_free_pred(struct filter_pred *pred)
154{
155 if (!pred)
156 return;
157
158 kfree(pred->field_name);
159 kfree(pred->str_val);
160 kfree(pred);
161}
162
163void filter_free_preds(struct ftrace_event_call *call)
164{
165 int i;
166
167 if (call->preds) {
168 for (i = 0; i < MAX_FILTER_PRED; i++)
169 filter_free_pred(call->preds[i]);
170 kfree(call->preds);
171 call->preds = NULL;
172 }
173}
174
175void filter_free_subsystem_preds(struct event_subsystem *system)
176{
177 struct ftrace_event_call *call = __start_ftrace_events;
178 int i;
179
180 if (system->preds) {
181 for (i = 0; i < MAX_FILTER_PRED; i++)
182 filter_free_pred(system->preds[i]);
183 kfree(system->preds);
184 system->preds = NULL;
185 }
186
187 events_for_each(call) {
188 if (!call->name || !call->regfunc)
189 continue;
190
191 if (!strcmp(call->system, system->name))
192 filter_free_preds(call);
193 }
194}
195
196static int __filter_add_pred(struct ftrace_event_call *call,
197 struct filter_pred *pred)
198{
199 int i;
200
201 if (call->preds && !pred->compound)
202 filter_free_preds(call);
203
204 if (!call->preds) {
205 call->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
206 GFP_KERNEL);
207 if (!call->preds)
208 return -ENOMEM;
209 }
210
211 for (i = 0; i < MAX_FILTER_PRED; i++) {
212 if (!call->preds[i]) {
213 call->preds[i] = pred;
214 return 0;
215 }
216 }
217
218 return -ENOMEM;
219}
220
221static int is_string_field(const char *type)
222{
223 if (strchr(type, '[') && strstr(type, "char"))
224 return 1;
225
226 return 0;
227}
228
229int filter_add_pred(struct ftrace_event_call *call, struct filter_pred *pred)
230{
231 struct ftrace_event_field *field;
232
233 field = find_event_field(call, pred->field_name);
234 if (!field)
235 return -EINVAL;
236
237 pred->offset = field->offset;
238
239 if (is_string_field(field->type)) {
240 if (!pred->str_val)
241 return -EINVAL;
242 pred->fn = filter_pred_string;
243 pred->str_len = field->size;
244 return __filter_add_pred(call, pred);
245 } else {
246 if (pred->str_val)
247 return -EINVAL;
248 }
249
250 switch (field->size) {
251 case 8:
252 pred->fn = filter_pred_64;
253 break;
254 case 4:
255 pred->fn = filter_pred_32;
256 break;
257 case 2:
258 pred->fn = filter_pred_16;
259 break;
260 case 1:
261 pred->fn = filter_pred_8;
262 break;
263 default:
264 return -EINVAL;
265 }
266
267 return __filter_add_pred(call, pred);
268}
269
270static struct filter_pred *copy_pred(struct filter_pred *pred)
271{
272 struct filter_pred *new_pred = kmalloc(sizeof(*pred), GFP_KERNEL);
273 if (!new_pred)
274 return NULL;
275
276 memcpy(new_pred, pred, sizeof(*pred));
277
278 if (pred->field_name) {
279 new_pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
280 if (!new_pred->field_name) {
281 kfree(new_pred);
282 return NULL;
283 }
284 }
285
286 if (pred->str_val) {
287 new_pred->str_val = kstrdup(pred->str_val, GFP_KERNEL);
288 if (!new_pred->str_val) {
289 filter_free_pred(new_pred);
290 return NULL;
291 }
292 }
293
294 return new_pred;
295}
296
297int filter_add_subsystem_pred(struct event_subsystem *system,
298 struct filter_pred *pred)
299{
300 struct ftrace_event_call *call = __start_ftrace_events;
301 struct filter_pred *event_pred;
302 int i;
303
304 if (system->preds && !pred->compound)
305 filter_free_subsystem_preds(system);
306
307 if (!system->preds) {
308 system->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
309 GFP_KERNEL);
310 if (!system->preds)
311 return -ENOMEM;
312 }
313
314 for (i = 0; i < MAX_FILTER_PRED; i++) {
315 if (!system->preds[i]) {
316 system->preds[i] = pred;
317 break;
318 }
319 }
320
321 if (i == MAX_FILTER_PRED)
322 return -EINVAL;
323
324 events_for_each(call) {
325 int err;
326
327 if (!call->name || !call->regfunc)
328 continue;
329
330 if (strcmp(call->system, system->name))
331 continue;
332
333 if (!find_event_field(call, pred->field_name))
334 continue;
335
336 event_pred = copy_pred(pred);
337 if (!event_pred)
338 goto oom;
339
340 err = filter_add_pred(call, event_pred);
341 if (err)
342 filter_free_pred(event_pred);
343 if (err == -ENOMEM)
344 goto oom;
345 }
346
347 return 0;
348
349oom:
350 system->preds[i] = NULL;
351 return -ENOMEM;
352}
353
354int filter_parse(char **pbuf, struct filter_pred *pred)
355{
356 char *tmp, *tok, *val_str = NULL;
357 int tok_n = 0;
358
359 /* field ==/!= number, or/and field ==/!= number, number */
360 while ((tok = strsep(pbuf, " \n"))) {
361 if (tok_n == 0) {
362 if (!strcmp(tok, "0")) {
363 pred->clear = 1;
364 return 0;
365 } else if (!strcmp(tok, "&&")) {
366 pred->or = 0;
367 pred->compound = 1;
368 } else if (!strcmp(tok, "||")) {
369 pred->or = 1;
370 pred->compound = 1;
371 } else
372 pred->field_name = tok;
373 tok_n = 1;
374 continue;
375 }
376 if (tok_n == 1) {
377 if (!pred->field_name)
378 pred->field_name = tok;
379 else if (!strcmp(tok, "!="))
380 pred->not = 1;
381 else if (!strcmp(tok, "=="))
382 pred->not = 0;
383 else {
384 pred->field_name = NULL;
385 return -EINVAL;
386 }
387 tok_n = 2;
388 continue;
389 }
390 if (tok_n == 2) {
391 if (pred->compound) {
392 if (!strcmp(tok, "!="))
393 pred->not = 1;
394 else if (!strcmp(tok, "=="))
395 pred->not = 0;
396 else {
397 pred->field_name = NULL;
398 return -EINVAL;
399 }
400 } else {
401 val_str = tok;
402 break; /* done */
403 }
404 tok_n = 3;
405 continue;
406 }
407 if (tok_n == 3) {
408 val_str = tok;
409 break; /* done */
410 }
411 }
412
413 pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
414 if (!pred->field_name)
415 return -ENOMEM;
416
417 pred->val = simple_strtoull(val_str, &tmp, 10);
418 if (tmp == val_str) {
419 pred->str_val = kstrdup(val_str, GFP_KERNEL);
420 if (!pred->str_val)
421 return -ENOMEM;
422 }
423
424 return 0;
425}
426
427
diff --git a/kernel/trace/trace_events_stage_1.h b/kernel/trace/trace_events_stage_1.h
new file mode 100644
index 000000000000..38985f9b379c
--- /dev/null
+++ b/kernel/trace/trace_events_stage_1.h
@@ -0,0 +1,39 @@
1/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_event_types.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#undef TRACE_FORMAT
20#define TRACE_FORMAT(call, proto, args, fmt)
21
22#undef __array
23#define __array(type, item, len) type item[len];
24
25#undef __field
26#define __field(type, item) type item;
27
28#undef TP_STRUCT__entry
29#define TP_STRUCT__entry(args...) args
30
31#undef TRACE_EVENT
32#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
33 struct ftrace_raw_##name { \
34 struct trace_entry ent; \
35 tstruct \
36 }; \
37 static struct ftrace_event_call event_##name
38
39#include <trace/trace_event_types.h>
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h
new file mode 100644
index 000000000000..30743f7d4110
--- /dev/null
+++ b/kernel/trace/trace_events_stage_2.h
@@ -0,0 +1,176 @@
1/*
2 * Stage 2 of the trace events.
3 *
4 * Override the macros in <trace/trace_event_types.h> to include the following:
5 *
6 * enum print_line_t
7 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
8 * {
9 * struct trace_seq *s = &iter->seq;
10 * struct ftrace_raw_<call> *field; <-- defined in stage 1
11 * struct trace_entry *entry;
12 * int ret;
13 *
14 * entry = iter->ent;
15 *
16 * if (entry->type != event_<call>.id) {
17 * WARN_ON_ONCE(1);
18 * return TRACE_TYPE_UNHANDLED;
19 * }
20 *
21 * field = (typeof(field))entry;
22 *
23 * ret = trace_seq_printf(s, <TP_printk> "\n");
24 * if (!ret)
25 * return TRACE_TYPE_PARTIAL_LINE;
26 *
27 * return TRACE_TYPE_HANDLED;
28 * }
29 *
30 * This is the method used to print the raw event to the trace
31 * output format. Note, this is not needed if the data is read
32 * in binary.
33 */
34
35#undef __entry
36#define __entry field
37
38#undef TP_printk
39#define TP_printk(fmt, args...) fmt "\n", args
40
41#undef TRACE_EVENT
42#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
43enum print_line_t \
44ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
45{ \
46 struct trace_seq *s = &iter->seq; \
47 struct ftrace_raw_##call *field; \
48 struct trace_entry *entry; \
49 int ret; \
50 \
51 entry = iter->ent; \
52 \
53 if (entry->type != event_##call.id) { \
54 WARN_ON_ONCE(1); \
55 return TRACE_TYPE_UNHANDLED; \
56 } \
57 \
58 field = (typeof(field))entry; \
59 \
60 ret = trace_seq_printf(s, #call ": " print); \
61 if (!ret) \
62 return TRACE_TYPE_PARTIAL_LINE; \
63 \
64 return TRACE_TYPE_HANDLED; \
65}
66
67#include <trace/trace_event_types.h>
68
69/*
70 * Setup the showing format of trace point.
71 *
72 * int
73 * ftrace_format_##call(struct trace_seq *s)
74 * {
75 * struct ftrace_raw_##call field;
76 * int ret;
77 *
78 * ret = trace_seq_printf(s, #type " " #item ";"
79 * " offset:%u; size:%u;\n",
80 * offsetof(struct ftrace_raw_##call, item),
81 * sizeof(field.type));
82 *
83 * }
84 */
85
86#undef TP_STRUCT__entry
87#define TP_STRUCT__entry(args...) args
88
89#undef __field
90#define __field(type, item) \
91 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
92 "offset:%u;\tsize:%u;\n", \
93 (unsigned int)offsetof(typeof(field), item), \
94 (unsigned int)sizeof(field.item)); \
95 if (!ret) \
96 return 0;
97
98#undef __array
99#define __array(type, item, len) \
100 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
101 "offset:%u;\tsize:%u;\n", \
102 (unsigned int)offsetof(typeof(field), item), \
103 (unsigned int)sizeof(field.item)); \
104 if (!ret) \
105 return 0;
106
107#undef __entry
108#define __entry "REC"
109
110#undef TP_printk
111#define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args
112
113#undef TP_fast_assign
114#define TP_fast_assign(args...) args
115
116#undef TRACE_EVENT
117#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
118static int \
119ftrace_format_##call(struct trace_seq *s) \
120{ \
121 struct ftrace_raw_##call field; \
122 int ret; \
123 \
124 tstruct; \
125 \
126 trace_seq_printf(s, "\nprint fmt: " print); \
127 \
128 return ret; \
129}
130
131#include <trace/trace_event_types.h>
132
133#undef __field
134#define __field(type, item) \
135 ret = trace_define_field(event_call, #type, #item, \
136 offsetof(typeof(field), item), \
137 sizeof(field.item)); \
138 if (ret) \
139 return ret;
140
141#undef __array
142#define __array(type, item, len) \
143 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
144 offsetof(typeof(field), item), \
145 sizeof(field.item)); \
146 if (ret) \
147 return ret;
148
149#define __common_field(type, item) \
150 ret = trace_define_field(event_call, #type, "common_" #item, \
151 offsetof(typeof(field.ent), item), \
152 sizeof(field.ent.item)); \
153 if (ret) \
154 return ret;
155
156#undef TRACE_EVENT
157#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
158int \
159ftrace_define_fields_##call(void) \
160{ \
161 struct ftrace_raw_##call field; \
162 struct ftrace_event_call *event_call = &event_##call; \
163 int ret; \
164 \
165 __common_field(unsigned char, type); \
166 __common_field(unsigned char, flags); \
167 __common_field(unsigned char, preempt_count); \
168 __common_field(int, pid); \
169 __common_field(int, tgid); \
170 \
171 tstruct; \
172 \
173 return ret; \
174}
175
176#include <trace/trace_event_types.h>
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h
new file mode 100644
index 000000000000..9d2fa78cecca
--- /dev/null
+++ b/kernel/trace/trace_events_stage_3.h
@@ -0,0 +1,281 @@
1/*
2 * Stage 3 of the trace events.
3 *
4 * Override the macros in <trace/trace_event_types.h> to include the following:
5 *
6 * static void ftrace_event_<call>(proto)
7 * {
8 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
9 * }
10 *
11 * static int ftrace_reg_event_<call>(void)
12 * {
13 * int ret;
14 *
15 * ret = register_trace_<call>(ftrace_event_<call>);
16 * if (!ret)
17 * pr_info("event trace: Could not activate trace point "
18 * "probe to <call>");
19 * return ret;
20 * }
21 *
22 * static void ftrace_unreg_event_<call>(void)
23 * {
24 * unregister_trace_<call>(ftrace_event_<call>);
25 * }
26 *
27 * For those macros defined with TRACE_FORMAT:
28 *
29 * static struct ftrace_event_call __used
30 * __attribute__((__aligned__(4)))
31 * __attribute__((section("_ftrace_events"))) event_<call> = {
32 * .name = "<call>",
33 * .regfunc = ftrace_reg_event_<call>,
34 * .unregfunc = ftrace_unreg_event_<call>,
35 * }
36 *
37 *
38 * For those macros defined with TRACE_EVENT:
39 *
40 * static struct ftrace_event_call event_<call>;
41 *
42 * static void ftrace_raw_event_<call>(proto)
43 * {
44 * struct ring_buffer_event *event;
45 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
46 * unsigned long irq_flags;
47 * int pc;
48 *
49 * local_save_flags(irq_flags);
50 * pc = preempt_count();
51 *
52 * event = trace_current_buffer_lock_reserve(event_<call>.id,
53 * sizeof(struct ftrace_raw_<call>),
54 * irq_flags, pc);
55 * if (!event)
56 * return;
57 * entry = ring_buffer_event_data(event);
58 *
59 * <assign>; <-- Here we assign the entries by the __field and
60 * __array macros.
61 *
62 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
63 * }
64 *
65 * static int ftrace_raw_reg_event_<call>(void)
66 * {
67 * int ret;
68 *
69 * ret = register_trace_<call>(ftrace_raw_event_<call>);
70 * if (!ret)
71 * pr_info("event trace: Could not activate trace point "
72 * "probe to <call>");
73 * return ret;
74 * }
75 *
76 * static void ftrace_unreg_event_<call>(void)
77 * {
78 * unregister_trace_<call>(ftrace_raw_event_<call>);
79 * }
80 *
81 * static struct trace_event ftrace_event_type_<call> = {
82 * .trace = ftrace_raw_output_<call>, <-- stage 2
83 * };
84 *
85 * static int ftrace_raw_init_event_<call>(void)
86 * {
87 * int id;
88 *
89 * id = register_ftrace_event(&ftrace_event_type_<call>);
90 * if (!id)
91 * return -ENODEV;
92 * event_<call>.id = id;
93 * return 0;
94 * }
95 *
96 * static struct ftrace_event_call __used
97 * __attribute__((__aligned__(4)))
98 * __attribute__((section("_ftrace_events"))) event_<call> = {
99 * .name = "<call>",
100 * .system = "<system>",
101 * .raw_init = ftrace_raw_init_event_<call>,
102 * .regfunc = ftrace_reg_event_<call>,
103 * .unregfunc = ftrace_unreg_event_<call>,
104 * .show_format = ftrace_format_<call>,
105 * }
106 *
107 */
108
109#undef TP_FMT
110#define TP_FMT(fmt, args...) fmt "\n", ##args
111
112#ifdef CONFIG_EVENT_PROFILE
113#define _TRACE_PROFILE(call, proto, args) \
114static void ftrace_profile_##call(proto) \
115{ \
116 extern void perf_tpcounter_event(int); \
117 perf_tpcounter_event(event_##call.id); \
118} \
119 \
120static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \
121{ \
122 int ret = 0; \
123 \
124 if (!atomic_inc_return(&call->profile_count)) \
125 ret = register_trace_##call(ftrace_profile_##call); \
126 \
127 return ret; \
128} \
129 \
130static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \
131{ \
132 if (atomic_add_negative(-1, &call->profile_count)) \
133 unregister_trace_##call(ftrace_profile_##call); \
134}
135
136#define _TRACE_PROFILE_INIT(call) \
137 .profile_count = ATOMIC_INIT(-1), \
138 .profile_enable = ftrace_profile_enable_##call, \
139 .profile_disable = ftrace_profile_disable_##call,
140
141#else
142#define _TRACE_PROFILE(call, proto, args)
143#define _TRACE_PROFILE_INIT(call)
144#endif
145
146#define _TRACE_FORMAT(call, proto, args, fmt) \
147static void ftrace_event_##call(proto) \
148{ \
149 event_trace_printk(_RET_IP_, #call ": " fmt); \
150} \
151 \
152static int ftrace_reg_event_##call(void) \
153{ \
154 int ret; \
155 \
156 ret = register_trace_##call(ftrace_event_##call); \
157 if (ret) \
158 pr_info("event trace: Could not activate trace point " \
159 "probe to " #call "\n"); \
160 return ret; \
161} \
162 \
163static void ftrace_unreg_event_##call(void) \
164{ \
165 unregister_trace_##call(ftrace_event_##call); \
166} \
167 \
168static struct ftrace_event_call event_##call; \
169 \
170static int ftrace_init_event_##call(void) \
171{ \
172 int id; \
173 \
174 id = register_ftrace_event(NULL); \
175 if (!id) \
176 return -ENODEV; \
177 event_##call.id = id; \
178 return 0; \
179}
180
181#undef TRACE_FORMAT
182#define TRACE_FORMAT(call, proto, args, fmt) \
183_TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
184_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
185static struct ftrace_event_call __used \
186__attribute__((__aligned__(4))) \
187__attribute__((section("_ftrace_events"))) event_##call = { \
188 .name = #call, \
189 .system = __stringify(TRACE_SYSTEM), \
190 .raw_init = ftrace_init_event_##call, \
191 .regfunc = ftrace_reg_event_##call, \
192 .unregfunc = ftrace_unreg_event_##call, \
193 _TRACE_PROFILE_INIT(call) \
194}
195
196#undef __entry
197#define __entry entry
198
199#undef TRACE_EVENT
200#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
201_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
202 \
203static struct ftrace_event_call event_##call; \
204 \
205static void ftrace_raw_event_##call(proto) \
206{ \
207 struct ftrace_event_call *call = &event_##call; \
208 struct ring_buffer_event *event; \
209 struct ftrace_raw_##call *entry; \
210 unsigned long irq_flags; \
211 int pc; \
212 \
213 local_save_flags(irq_flags); \
214 pc = preempt_count(); \
215 \
216 event = trace_current_buffer_lock_reserve(event_##call.id, \
217 sizeof(struct ftrace_raw_##call), \
218 irq_flags, pc); \
219 if (!event) \
220 return; \
221 entry = ring_buffer_event_data(event); \
222 \
223 assign; \
224 \
225 if (call->preds && !filter_match_preds(call, entry)) \
226 ring_buffer_event_discard(event); \
227 \
228 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
229 \
230} \
231 \
232static int ftrace_raw_reg_event_##call(void) \
233{ \
234 int ret; \
235 \
236 ret = register_trace_##call(ftrace_raw_event_##call); \
237 if (ret) \
238 pr_info("event trace: Could not activate trace point " \
239 "probe to " #call "\n"); \
240 return ret; \
241} \
242 \
243static void ftrace_raw_unreg_event_##call(void) \
244{ \
245 unregister_trace_##call(ftrace_raw_event_##call); \
246} \
247 \
248static struct trace_event ftrace_event_type_##call = { \
249 .trace = ftrace_raw_output_##call, \
250}; \
251 \
252static int ftrace_raw_init_event_##call(void) \
253{ \
254 int id; \
255 \
256 id = register_ftrace_event(&ftrace_event_type_##call); \
257 if (!id) \
258 return -ENODEV; \
259 event_##call.id = id; \
260 INIT_LIST_HEAD(&event_##call.fields); \
261 return 0; \
262} \
263 \
264static struct ftrace_event_call __used \
265__attribute__((__aligned__(4))) \
266__attribute__((section("_ftrace_events"))) event_##call = { \
267 .name = #call, \
268 .system = __stringify(TRACE_SYSTEM), \
269 .raw_init = ftrace_raw_init_event_##call, \
270 .regfunc = ftrace_raw_reg_event_##call, \
271 .unregfunc = ftrace_raw_unreg_event_##call, \
272 .show_format = ftrace_format_##call, \
273 .define_fields = ftrace_define_fields_##call, \
274 _TRACE_PROFILE_INIT(call) \
275}
276
277#include <trace/trace_event_types.h>
278
279#undef _TRACE_PROFILE
280#undef _TRACE_PROFILE_INIT
281
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
new file mode 100644
index 000000000000..4d9952d3df50
--- /dev/null
+++ b/kernel/trace/trace_export.c
@@ -0,0 +1,102 @@
1/*
2 * trace_export.c - export basic ftrace utilities to user space
3 *
4 * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/stringify.h>
7#include <linux/kallsyms.h>
8#include <linux/seq_file.h>
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15
16#include "trace_output.h"
17
18
19#undef TRACE_STRUCT
20#define TRACE_STRUCT(args...) args
21
22#undef TRACE_FIELD
23#define TRACE_FIELD(type, item, assign) \
24 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
25 "offset:%u;\tsize:%u;\n", \
26 (unsigned int)offsetof(typeof(field), item), \
27 (unsigned int)sizeof(field.item)); \
28 if (!ret) \
29 return 0;
30
31
32#undef TRACE_FIELD_SPECIAL
33#define TRACE_FIELD_SPECIAL(type_item, item, cmd) \
34 ret = trace_seq_printf(s, "\tfield special:" #type_item ";\t" \
35 "offset:%u;\tsize:%u;\n", \
36 (unsigned int)offsetof(typeof(field), item), \
37 (unsigned int)sizeof(field.item)); \
38 if (!ret) \
39 return 0;
40
41#undef TRACE_FIELD_ZERO_CHAR
42#define TRACE_FIELD_ZERO_CHAR(item) \
43 ret = trace_seq_printf(s, "\tfield: char " #item ";\t" \
44 "offset:%u;\tsize:0;\n", \
45 (unsigned int)offsetof(typeof(field), item)); \
46 if (!ret) \
47 return 0;
48
49
50#undef TP_RAW_FMT
51#define TP_RAW_FMT(args...) args
52
53#undef TRACE_EVENT_FORMAT
54#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
55static int \
56ftrace_format_##call(struct trace_seq *s) \
57{ \
58 struct args field; \
59 int ret; \
60 \
61 tstruct; \
62 \
63 trace_seq_printf(s, "\nprint fmt: \"%s\"\n", tpfmt); \
64 \
65 return ret; \
66}
67
68#include "trace_event_types.h"
69
70#undef TRACE_ZERO_CHAR
71#define TRACE_ZERO_CHAR(arg)
72
73#undef TRACE_FIELD
74#define TRACE_FIELD(type, item, assign)\
75 entry->item = assign;
76
77#undef TRACE_FIELD
78#define TRACE_FIELD(type, item, assign)\
79 entry->item = assign;
80
81#undef TP_CMD
82#define TP_CMD(cmd...) cmd
83
84#undef TRACE_ENTRY
85#define TRACE_ENTRY entry
86
87#undef TRACE_FIELD_SPECIAL
88#define TRACE_FIELD_SPECIAL(type_item, item, cmd) \
89 cmd;
90
91#undef TRACE_EVENT_FORMAT
92#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
93 \
94static struct ftrace_event_call __used \
95__attribute__((__aligned__(4))) \
96__attribute__((section("_ftrace_events"))) event_##call = { \
97 .name = #call, \
98 .id = proto, \
99 .system = __stringify(TRACE_SYSTEM), \
100 .show_format = ftrace_format_##call, \
101}
102#include "trace_event_types.h"
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 9236d7e25a16..c9a0b7df44ff 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -9,6 +9,7 @@
9 * Copyright (C) 2004-2006 Ingo Molnar 9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III 10 * Copyright (C) 2004 William Lee Irwin III
11 */ 11 */
12#include <linux/ring_buffer.h>
12#include <linux/debugfs.h> 13#include <linux/debugfs.h>
13#include <linux/uaccess.h> 14#include <linux/uaccess.h>
14#include <linux/ftrace.h> 15#include <linux/ftrace.h>
@@ -16,52 +17,388 @@
16 17
17#include "trace.h" 18#include "trace.h"
18 19
19static void start_function_trace(struct trace_array *tr) 20/* function tracing enabled */
21static int ftrace_function_enabled;
22
23static struct trace_array *func_trace;
24
25static void tracing_start_function_trace(void);
26static void tracing_stop_function_trace(void);
27
28static int function_trace_init(struct trace_array *tr)
20{ 29{
30 func_trace = tr;
21 tr->cpu = get_cpu(); 31 tr->cpu = get_cpu();
22 tracing_reset_online_cpus(tr);
23 put_cpu(); 32 put_cpu();
24 33
25 tracing_start_cmdline_record(); 34 tracing_start_cmdline_record();
26 tracing_start_function_trace(); 35 tracing_start_function_trace();
36 return 0;
27} 37}
28 38
29static void stop_function_trace(struct trace_array *tr) 39static void function_trace_reset(struct trace_array *tr)
30{ 40{
31 tracing_stop_function_trace(); 41 tracing_stop_function_trace();
32 tracing_stop_cmdline_record(); 42 tracing_stop_cmdline_record();
33} 43}
34 44
35static int function_trace_init(struct trace_array *tr) 45static void function_trace_start(struct trace_array *tr)
36{ 46{
37 start_function_trace(tr); 47 tracing_reset_online_cpus(tr);
38 return 0;
39} 48}
40 49
41static void function_trace_reset(struct trace_array *tr) 50static void
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52{
53 struct trace_array *tr = func_trace;
54 struct trace_array_cpu *data;
55 unsigned long flags;
56 long disabled;
57 int cpu, resched;
58 int pc;
59
60 if (unlikely(!ftrace_function_enabled))
61 return;
62
63 pc = preempt_count();
64 resched = ftrace_preempt_disable();
65 local_save_flags(flags);
66 cpu = raw_smp_processor_id();
67 data = tr->data[cpu];
68 disabled = atomic_inc_return(&data->disabled);
69
70 if (likely(disabled == 1))
71 trace_function(tr, ip, parent_ip, flags, pc);
72
73 atomic_dec(&data->disabled);
74 ftrace_preempt_enable(resched);
75}
76
77static void
78function_trace_call(unsigned long ip, unsigned long parent_ip)
42{ 79{
43 stop_function_trace(tr); 80 struct trace_array *tr = func_trace;
81 struct trace_array_cpu *data;
82 unsigned long flags;
83 long disabled;
84 int cpu;
85 int pc;
86
87 if (unlikely(!ftrace_function_enabled))
88 return;
89
90 /*
91 * Need to use raw, since this must be called before the
92 * recursive protection is performed.
93 */
94 local_irq_save(flags);
95 cpu = raw_smp_processor_id();
96 data = tr->data[cpu];
97 disabled = atomic_inc_return(&data->disabled);
98
99 if (likely(disabled == 1)) {
100 pc = preempt_count();
101 trace_function(tr, ip, parent_ip, flags, pc);
102 }
103
104 atomic_dec(&data->disabled);
105 local_irq_restore(flags);
44} 106}
45 107
46static void function_trace_start(struct trace_array *tr) 108static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
47{ 110{
48 tracing_reset_online_cpus(tr); 111 struct trace_array *tr = func_trace;
112 struct trace_array_cpu *data;
113 unsigned long flags;
114 long disabled;
115 int cpu;
116 int pc;
117
118 if (unlikely(!ftrace_function_enabled))
119 return;
120
121 /*
122 * Need to use raw, since this must be called before the
123 * recursive protection is performed.
124 */
125 local_irq_save(flags);
126 cpu = raw_smp_processor_id();
127 data = tr->data[cpu];
128 disabled = atomic_inc_return(&data->disabled);
129
130 if (likely(disabled == 1)) {
131 pc = preempt_count();
132 trace_function(tr, ip, parent_ip, flags, pc);
133 /*
134 * skip over 5 funcs:
135 * __ftrace_trace_stack,
136 * __trace_stack,
137 * function_stack_trace_call
138 * ftrace_list_func
139 * ftrace_call
140 */
141 __trace_stack(tr, flags, 5, pc);
142 }
143
144 atomic_dec(&data->disabled);
145 local_irq_restore(flags);
146}
147
148
149static struct ftrace_ops trace_ops __read_mostly =
150{
151 .func = function_trace_call,
152};
153
154static struct ftrace_ops trace_stack_ops __read_mostly =
155{
156 .func = function_stack_trace_call,
157};
158
159/* Our two options */
160enum {
161 TRACE_FUNC_OPT_STACK = 0x1,
162};
163
164static struct tracer_opt func_opts[] = {
165#ifdef CONFIG_STACKTRACE
166 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
167#endif
168 { } /* Always set a last empty entry */
169};
170
171static struct tracer_flags func_flags = {
172 .val = 0, /* By default: all flags disabled */
173 .opts = func_opts
174};
175
176static void tracing_start_function_trace(void)
177{
178 ftrace_function_enabled = 0;
179
180 if (trace_flags & TRACE_ITER_PREEMPTONLY)
181 trace_ops.func = function_trace_call_preempt_only;
182 else
183 trace_ops.func = function_trace_call;
184
185 if (func_flags.val & TRACE_FUNC_OPT_STACK)
186 register_ftrace_function(&trace_stack_ops);
187 else
188 register_ftrace_function(&trace_ops);
189
190 ftrace_function_enabled = 1;
191}
192
193static void tracing_stop_function_trace(void)
194{
195 ftrace_function_enabled = 0;
196 /* OK if they are not registered */
197 unregister_ftrace_function(&trace_stack_ops);
198 unregister_ftrace_function(&trace_ops);
199}
200
201static int func_set_flag(u32 old_flags, u32 bit, int set)
202{
203 if (bit == TRACE_FUNC_OPT_STACK) {
204 /* do nothing if already set */
205 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
206 return 0;
207
208 if (set) {
209 unregister_ftrace_function(&trace_ops);
210 register_ftrace_function(&trace_stack_ops);
211 } else {
212 unregister_ftrace_function(&trace_stack_ops);
213 register_ftrace_function(&trace_ops);
214 }
215
216 return 0;
217 }
218
219 return -EINVAL;
49} 220}
50 221
51static struct tracer function_trace __read_mostly = 222static struct tracer function_trace __read_mostly =
52{ 223{
53 .name = "function", 224 .name = "function",
54 .init = function_trace_init, 225 .init = function_trace_init,
55 .reset = function_trace_reset, 226 .reset = function_trace_reset,
56 .start = function_trace_start, 227 .start = function_trace_start,
228 .wait_pipe = poll_wait_pipe,
229 .flags = &func_flags,
230 .set_flag = func_set_flag,
57#ifdef CONFIG_FTRACE_SELFTEST 231#ifdef CONFIG_FTRACE_SELFTEST
58 .selftest = trace_selftest_startup_function, 232 .selftest = trace_selftest_startup_function,
59#endif 233#endif
60}; 234};
61 235
236#ifdef CONFIG_DYNAMIC_FTRACE
237static void
238ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
239{
240 long *count = (long *)data;
241
242 if (tracing_is_on())
243 return;
244
245 if (!*count)
246 return;
247
248 if (*count != -1)
249 (*count)--;
250
251 tracing_on();
252}
253
254static void
255ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
256{
257 long *count = (long *)data;
258
259 if (!tracing_is_on())
260 return;
261
262 if (!*count)
263 return;
264
265 if (*count != -1)
266 (*count)--;
267
268 tracing_off();
269}
270
271static int
272ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
273 struct ftrace_probe_ops *ops, void *data);
274
275static struct ftrace_probe_ops traceon_probe_ops = {
276 .func = ftrace_traceon,
277 .print = ftrace_trace_onoff_print,
278};
279
280static struct ftrace_probe_ops traceoff_probe_ops = {
281 .func = ftrace_traceoff,
282 .print = ftrace_trace_onoff_print,
283};
284
285static int
286ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
287 struct ftrace_probe_ops *ops, void *data)
288{
289 char str[KSYM_SYMBOL_LEN];
290 long count = (long)data;
291
292 kallsyms_lookup(ip, NULL, NULL, NULL, str);
293 seq_printf(m, "%s:", str);
294
295 if (ops == &traceon_probe_ops)
296 seq_printf(m, "traceon");
297 else
298 seq_printf(m, "traceoff");
299
300 if (count == -1)
301 seq_printf(m, ":unlimited\n");
302 else
303 seq_printf(m, ":count=%ld", count);
304 seq_putc(m, '\n');
305
306 return 0;
307}
308
309static int
310ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
311{
312 struct ftrace_probe_ops *ops;
313
314 /* we register both traceon and traceoff to this callback */
315 if (strcmp(cmd, "traceon") == 0)
316 ops = &traceon_probe_ops;
317 else
318 ops = &traceoff_probe_ops;
319
320 unregister_ftrace_function_probe_func(glob, ops);
321
322 return 0;
323}
324
325static int
326ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
327{
328 struct ftrace_probe_ops *ops;
329 void *count = (void *)-1;
330 char *number;
331 int ret;
332
333 /* hash funcs only work with set_ftrace_filter */
334 if (!enable)
335 return -EINVAL;
336
337 if (glob[0] == '!')
338 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
339
340 /* we register both traceon and traceoff to this callback */
341 if (strcmp(cmd, "traceon") == 0)
342 ops = &traceon_probe_ops;
343 else
344 ops = &traceoff_probe_ops;
345
346 if (!param)
347 goto out_reg;
348
349 number = strsep(&param, ":");
350
351 if (!strlen(number))
352 goto out_reg;
353
354 /*
355 * We use the callback data field (which is a pointer)
356 * as our counter.
357 */
358 ret = strict_strtoul(number, 0, (unsigned long *)&count);
359 if (ret)
360 return ret;
361
362 out_reg:
363 ret = register_ftrace_function_probe(glob, ops, count);
364
365 return ret;
366}
367
368static struct ftrace_func_command ftrace_traceon_cmd = {
369 .name = "traceon",
370 .func = ftrace_trace_onoff_callback,
371};
372
373static struct ftrace_func_command ftrace_traceoff_cmd = {
374 .name = "traceoff",
375 .func = ftrace_trace_onoff_callback,
376};
377
378static int __init init_func_cmd_traceon(void)
379{
380 int ret;
381
382 ret = register_ftrace_command(&ftrace_traceoff_cmd);
383 if (ret)
384 return ret;
385
386 ret = register_ftrace_command(&ftrace_traceon_cmd);
387 if (ret)
388 unregister_ftrace_command(&ftrace_traceoff_cmd);
389 return ret;
390}
391#else
392static inline int init_func_cmd_traceon(void)
393{
394 return 0;
395}
396#endif /* CONFIG_DYNAMIC_FTRACE */
397
62static __init int init_function_trace(void) 398static __init int init_function_trace(void)
63{ 399{
400 init_func_cmd_traceon();
64 return register_tracer(&function_trace); 401 return register_tracer(&function_trace);
65} 402}
66
67device_initcall(init_function_trace); 403device_initcall(init_function_trace);
404
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index dce71a5b51bc..d28687e7b3a7 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * Function graph tracer. 3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which 5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 * 7 *
@@ -12,6 +12,12 @@
12#include <linux/fs.h> 12#include <linux/fs.h>
13 13
14#include "trace.h" 14#include "trace.h"
15#include "trace_output.h"
16
17struct fgraph_data {
18 pid_t last_pid;
19 int depth;
20};
15 21
16#define TRACE_GRAPH_INDENT 2 22#define TRACE_GRAPH_INDENT 2
17 23
@@ -20,9 +26,11 @@
20#define TRACE_GRAPH_PRINT_CPU 0x2 26#define TRACE_GRAPH_PRINT_CPU 0x2
21#define TRACE_GRAPH_PRINT_OVERHEAD 0x4 27#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
22#define TRACE_GRAPH_PRINT_PROC 0x8 28#define TRACE_GRAPH_PRINT_PROC 0x8
29#define TRACE_GRAPH_PRINT_DURATION 0x10
30#define TRACE_GRAPH_PRINT_ABS_TIME 0X20
23 31
24static struct tracer_opt trace_opts[] = { 32static struct tracer_opt trace_opts[] = {
25 /* Display overruns ? */ 33 /* Display overruns? (for self-debug purpose) */
26 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 34 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
27 /* Display CPU ? */ 35 /* Display CPU ? */
28 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 36 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
@@ -30,23 +38,28 @@ static struct tracer_opt trace_opts[] = {
30 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 38 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
31 /* Display proc name/pid */ 39 /* Display proc name/pid */
32 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 40 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
41 /* Display duration of execution */
42 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
43 /* Display absolute time of an entry */
44 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
33 { } /* Empty entry */ 45 { } /* Empty entry */
34}; 46};
35 47
36static struct tracer_flags tracer_flags = { 48static struct tracer_flags tracer_flags = {
37 /* Don't display overruns and proc by default */ 49 /* Don't display overruns and proc by default */
38 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, 50 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
51 TRACE_GRAPH_PRINT_DURATION,
39 .opts = trace_opts 52 .opts = trace_opts
40}; 53};
41 54
42/* pid on the last trace processed */ 55/* pid on the last trace processed */
43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; 56
44 57
45/* Add a function return address to the trace stack on thread info.*/ 58/* Add a function return address to the trace stack on thread info.*/
46int 59int
47ftrace_push_return_trace(unsigned long ret, unsigned long long time, 60ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
48 unsigned long func, int *depth)
49{ 61{
62 unsigned long long calltime;
50 int index; 63 int index;
51 64
52 if (!current->ret_stack) 65 if (!current->ret_stack)
@@ -58,11 +71,13 @@ ftrace_push_return_trace(unsigned long ret, unsigned long long time,
58 return -EBUSY; 71 return -EBUSY;
59 } 72 }
60 73
74 calltime = trace_clock_local();
75
61 index = ++current->curr_ret_stack; 76 index = ++current->curr_ret_stack;
62 barrier(); 77 barrier();
63 current->ret_stack[index].ret = ret; 78 current->ret_stack[index].ret = ret;
64 current->ret_stack[index].func = func; 79 current->ret_stack[index].func = func;
65 current->ret_stack[index].calltime = time; 80 current->ret_stack[index].calltime = calltime;
66 *depth = index; 81 *depth = index;
67 82
68 return 0; 83 return 0;
@@ -104,7 +119,7 @@ unsigned long ftrace_return_to_handler(void)
104 unsigned long ret; 119 unsigned long ret;
105 120
106 ftrace_pop_return_trace(&trace, &ret); 121 ftrace_pop_return_trace(&trace, &ret);
107 trace.rettime = cpu_clock(raw_smp_processor_id()); 122 trace.rettime = trace_clock_local();
108 ftrace_graph_return(&trace); 123 ftrace_graph_return(&trace);
109 124
110 if (unlikely(!ret)) { 125 if (unlikely(!ret)) {
@@ -119,12 +134,7 @@ unsigned long ftrace_return_to_handler(void)
119 134
120static int graph_trace_init(struct trace_array *tr) 135static int graph_trace_init(struct trace_array *tr)
121{ 136{
122 int cpu, ret; 137 int ret = register_ftrace_graph(&trace_graph_return,
123
124 for_each_online_cpu(cpu)
125 tracing_reset(tr, cpu);
126
127 ret = register_ftrace_graph(&trace_graph_return,
128 &trace_graph_entry); 138 &trace_graph_entry);
129 if (ret) 139 if (ret)
130 return ret; 140 return ret;
@@ -187,15 +197,15 @@ print_graph_cpu(struct trace_seq *s, int cpu)
187static enum print_line_t 197static enum print_line_t
188print_graph_proc(struct trace_seq *s, pid_t pid) 198print_graph_proc(struct trace_seq *s, pid_t pid)
189{ 199{
190 int i; 200 char comm[TASK_COMM_LEN];
191 int ret;
192 int len;
193 char comm[8];
194 int spaces = 0;
195 /* sign + log10(MAX_INT) + '\0' */ 201 /* sign + log10(MAX_INT) + '\0' */
196 char pid_str[11]; 202 char pid_str[11];
203 int spaces = 0;
204 int ret;
205 int len;
206 int i;
197 207
198 strncpy(comm, trace_find_cmdline(pid), 7); 208 trace_find_cmdline(pid, comm);
199 comm[7] = '\0'; 209 comm[7] = '\0';
200 sprintf(pid_str, "%d", pid); 210 sprintf(pid_str, "%d", pid);
201 211
@@ -228,17 +238,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
228 238
229/* If the pid changed since the last trace, output this event */ 239/* If the pid changed since the last trace, output this event */
230static enum print_line_t 240static enum print_line_t
231verif_pid(struct trace_seq *s, pid_t pid, int cpu) 241verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
232{ 242{
233 pid_t prev_pid; 243 pid_t prev_pid;
244 pid_t *last_pid;
234 int ret; 245 int ret;
235 246
236 if (last_pid[cpu] != -1 && last_pid[cpu] == pid) 247 if (!data)
248 return TRACE_TYPE_HANDLED;
249
250 last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
251
252 if (*last_pid == pid)
237 return TRACE_TYPE_HANDLED; 253 return TRACE_TYPE_HANDLED;
238 254
239 prev_pid = last_pid[cpu]; 255 prev_pid = *last_pid;
240 last_pid[cpu] = pid; 256 *last_pid = pid;
241 257
258 if (prev_pid == -1)
259 return TRACE_TYPE_HANDLED;
242/* 260/*
243 * Context-switch trace line: 261 * Context-switch trace line:
244 262
@@ -250,34 +268,34 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu)
250 ret = trace_seq_printf(s, 268 ret = trace_seq_printf(s,
251 " ------------------------------------------\n"); 269 " ------------------------------------------\n");
252 if (!ret) 270 if (!ret)
253 TRACE_TYPE_PARTIAL_LINE; 271 return TRACE_TYPE_PARTIAL_LINE;
254 272
255 ret = print_graph_cpu(s, cpu); 273 ret = print_graph_cpu(s, cpu);
256 if (ret == TRACE_TYPE_PARTIAL_LINE) 274 if (ret == TRACE_TYPE_PARTIAL_LINE)
257 TRACE_TYPE_PARTIAL_LINE; 275 return TRACE_TYPE_PARTIAL_LINE;
258 276
259 ret = print_graph_proc(s, prev_pid); 277 ret = print_graph_proc(s, prev_pid);
260 if (ret == TRACE_TYPE_PARTIAL_LINE) 278 if (ret == TRACE_TYPE_PARTIAL_LINE)
261 TRACE_TYPE_PARTIAL_LINE; 279 return TRACE_TYPE_PARTIAL_LINE;
262 280
263 ret = trace_seq_printf(s, " => "); 281 ret = trace_seq_printf(s, " => ");
264 if (!ret) 282 if (!ret)
265 TRACE_TYPE_PARTIAL_LINE; 283 return TRACE_TYPE_PARTIAL_LINE;
266 284
267 ret = print_graph_proc(s, pid); 285 ret = print_graph_proc(s, pid);
268 if (ret == TRACE_TYPE_PARTIAL_LINE) 286 if (ret == TRACE_TYPE_PARTIAL_LINE)
269 TRACE_TYPE_PARTIAL_LINE; 287 return TRACE_TYPE_PARTIAL_LINE;
270 288
271 ret = trace_seq_printf(s, 289 ret = trace_seq_printf(s,
272 "\n ------------------------------------------\n\n"); 290 "\n ------------------------------------------\n\n");
273 if (!ret) 291 if (!ret)
274 TRACE_TYPE_PARTIAL_LINE; 292 return TRACE_TYPE_PARTIAL_LINE;
275 293
276 return ret; 294 return TRACE_TYPE_HANDLED;
277} 295}
278 296
279static bool 297static struct ftrace_graph_ret_entry *
280trace_branch_is_leaf(struct trace_iterator *iter, 298get_return_for_leaf(struct trace_iterator *iter,
281 struct ftrace_graph_ent_entry *curr) 299 struct ftrace_graph_ent_entry *curr)
282{ 300{
283 struct ring_buffer_iter *ring_iter; 301 struct ring_buffer_iter *ring_iter;
@@ -286,65 +304,123 @@ trace_branch_is_leaf(struct trace_iterator *iter,
286 304
287 ring_iter = iter->buffer_iter[iter->cpu]; 305 ring_iter = iter->buffer_iter[iter->cpu];
288 306
289 if (!ring_iter) 307 /* First peek to compare current entry and the next one */
290 return false; 308 if (ring_iter)
291 309 event = ring_buffer_iter_peek(ring_iter, NULL);
292 event = ring_buffer_iter_peek(ring_iter, NULL); 310 else {
311 /* We need to consume the current entry to see the next one */
312 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
313 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
314 NULL);
315 }
293 316
294 if (!event) 317 if (!event)
295 return false; 318 return NULL;
296 319
297 next = ring_buffer_event_data(event); 320 next = ring_buffer_event_data(event);
298 321
299 if (next->ent.type != TRACE_GRAPH_RET) 322 if (next->ent.type != TRACE_GRAPH_RET)
300 return false; 323 return NULL;
301 324
302 if (curr->ent.pid != next->ent.pid || 325 if (curr->ent.pid != next->ent.pid ||
303 curr->graph_ent.func != next->ret.func) 326 curr->graph_ent.func != next->ret.func)
304 return false; 327 return NULL;
328
329 /* this is a leaf, now advance the iterator */
330 if (ring_iter)
331 ring_buffer_read(ring_iter, NULL);
332
333 return next;
334}
335
336/* Signal a overhead of time execution to the output */
337static int
338print_graph_overhead(unsigned long long duration, struct trace_seq *s)
339{
340 /* If duration disappear, we don't need anything */
341 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
342 return 1;
343
344 /* Non nested entry or return */
345 if (duration == -1)
346 return trace_seq_printf(s, " ");
347
348 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
349 /* Duration exceeded 100 msecs */
350 if (duration > 100000ULL)
351 return trace_seq_printf(s, "! ");
352
353 /* Duration exceeded 10 msecs */
354 if (duration > 10000ULL)
355 return trace_seq_printf(s, "+ ");
356 }
357
358 return trace_seq_printf(s, " ");
359}
360
361static int print_graph_abs_time(u64 t, struct trace_seq *s)
362{
363 unsigned long usecs_rem;
364
365 usecs_rem = do_div(t, NSEC_PER_SEC);
366 usecs_rem /= 1000;
305 367
306 return true; 368 return trace_seq_printf(s, "%5lu.%06lu | ",
369 (unsigned long)t, usecs_rem);
307} 370}
308 371
309static enum print_line_t 372static enum print_line_t
310print_graph_irq(struct trace_seq *s, unsigned long addr, 373print_graph_irq(struct trace_iterator *iter, unsigned long addr,
311 enum trace_type type, int cpu, pid_t pid) 374 enum trace_type type, int cpu, pid_t pid)
312{ 375{
313 int ret; 376 int ret;
377 struct trace_seq *s = &iter->seq;
314 378
315 if (addr < (unsigned long)__irqentry_text_start || 379 if (addr < (unsigned long)__irqentry_text_start ||
316 addr >= (unsigned long)__irqentry_text_end) 380 addr >= (unsigned long)__irqentry_text_end)
317 return TRACE_TYPE_UNHANDLED; 381 return TRACE_TYPE_UNHANDLED;
318 382
319 if (type == TRACE_GRAPH_ENT) { 383 /* Absolute time */
320 ret = trace_seq_printf(s, "==========> | "); 384 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
321 } else { 385 ret = print_graph_abs_time(iter->ts, s);
322 /* Cpu */ 386 if (!ret)
323 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 387 return TRACE_TYPE_PARTIAL_LINE;
324 ret = print_graph_cpu(s, cpu); 388 }
325 if (ret == TRACE_TYPE_PARTIAL_LINE)
326 return TRACE_TYPE_PARTIAL_LINE;
327 }
328 /* Proc */
329 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
330 ret = print_graph_proc(s, pid);
331 if (ret == TRACE_TYPE_PARTIAL_LINE)
332 return TRACE_TYPE_PARTIAL_LINE;
333 389
334 ret = trace_seq_printf(s, " | "); 390 /* Cpu */
335 if (!ret) 391 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
336 return TRACE_TYPE_PARTIAL_LINE; 392 ret = print_graph_cpu(s, cpu);
337 } 393 if (ret == TRACE_TYPE_PARTIAL_LINE)
394 return TRACE_TYPE_PARTIAL_LINE;
395 }
396 /* Proc */
397 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
398 ret = print_graph_proc(s, pid);
399 if (ret == TRACE_TYPE_PARTIAL_LINE)
400 return TRACE_TYPE_PARTIAL_LINE;
401 ret = trace_seq_printf(s, " | ");
402 if (!ret)
403 return TRACE_TYPE_PARTIAL_LINE;
404 }
338 405
339 /* No overhead */ 406 /* No overhead */
340 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 407 ret = print_graph_overhead(-1, s);
341 ret = trace_seq_printf(s, " "); 408 if (!ret)
342 if (!ret) 409 return TRACE_TYPE_PARTIAL_LINE;
343 return TRACE_TYPE_PARTIAL_LINE; 410
344 } 411 if (type == TRACE_GRAPH_ENT)
412 ret = trace_seq_printf(s, "==========>");
413 else
414 ret = trace_seq_printf(s, "<==========");
415
416 if (!ret)
417 return TRACE_TYPE_PARTIAL_LINE;
418
419 /* Don't close the duration column if haven't one */
420 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
421 trace_seq_printf(s, " |");
422 ret = trace_seq_printf(s, "\n");
345 423
346 ret = trace_seq_printf(s, "<========== |\n");
347 }
348 if (!ret) 424 if (!ret)
349 return TRACE_TYPE_PARTIAL_LINE; 425 return TRACE_TYPE_PARTIAL_LINE;
350 return TRACE_TYPE_HANDLED; 426 return TRACE_TYPE_HANDLED;
@@ -363,7 +439,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
363 sprintf(msecs_str, "%lu", (unsigned long) duration); 439 sprintf(msecs_str, "%lu", (unsigned long) duration);
364 440
365 /* Print msecs */ 441 /* Print msecs */
366 ret = trace_seq_printf(s, msecs_str); 442 ret = trace_seq_printf(s, "%s", msecs_str);
367 if (!ret) 443 if (!ret)
368 return TRACE_TYPE_PARTIAL_LINE; 444 return TRACE_TYPE_PARTIAL_LINE;
369 445
@@ -396,52 +472,47 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
396 472
397} 473}
398 474
399/* Signal a overhead of time execution to the output */
400static int
401print_graph_overhead(unsigned long long duration, struct trace_seq *s)
402{
403 /* Duration exceeded 100 msecs */
404 if (duration > 100000ULL)
405 return trace_seq_printf(s, "! ");
406
407 /* Duration exceeded 10 msecs */
408 if (duration > 10000ULL)
409 return trace_seq_printf(s, "+ ");
410
411 return trace_seq_printf(s, " ");
412}
413
414/* Case of a leaf function on its call entry */ 475/* Case of a leaf function on its call entry */
415static enum print_line_t 476static enum print_line_t
416print_graph_entry_leaf(struct trace_iterator *iter, 477print_graph_entry_leaf(struct trace_iterator *iter,
417 struct ftrace_graph_ent_entry *entry, struct trace_seq *s) 478 struct ftrace_graph_ent_entry *entry,
479 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
418{ 480{
419 struct ftrace_graph_ret_entry *ret_entry; 481 struct fgraph_data *data = iter->private;
420 struct ftrace_graph_ret *graph_ret; 482 struct ftrace_graph_ret *graph_ret;
421 struct ring_buffer_event *event;
422 struct ftrace_graph_ent *call; 483 struct ftrace_graph_ent *call;
423 unsigned long long duration; 484 unsigned long long duration;
424 int ret; 485 int ret;
425 int i; 486 int i;
426 487
427 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
428 ret_entry = ring_buffer_event_data(event);
429 graph_ret = &ret_entry->ret; 488 graph_ret = &ret_entry->ret;
430 call = &entry->graph_ent; 489 call = &entry->graph_ent;
431 duration = graph_ret->rettime - graph_ret->calltime; 490 duration = graph_ret->rettime - graph_ret->calltime;
432 491
433 /* Overhead */ 492 if (data) {
434 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 493 int cpu = iter->cpu;
435 ret = print_graph_overhead(duration, s); 494 int *depth = &(per_cpu_ptr(data, cpu)->depth);
436 if (!ret) 495
437 return TRACE_TYPE_PARTIAL_LINE; 496 /*
497 * Comments display at + 1 to depth. Since
498 * this is a leaf function, keep the comments
499 * equal to this depth.
500 */
501 *depth = call->depth - 1;
438 } 502 }
439 503
440 /* Duration */ 504 /* Overhead */
441 ret = print_graph_duration(duration, s); 505 ret = print_graph_overhead(duration, s);
442 if (ret == TRACE_TYPE_PARTIAL_LINE) 506 if (!ret)
443 return TRACE_TYPE_PARTIAL_LINE; 507 return TRACE_TYPE_PARTIAL_LINE;
444 508
509 /* Duration */
510 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
511 ret = print_graph_duration(duration, s);
512 if (ret == TRACE_TYPE_PARTIAL_LINE)
513 return TRACE_TYPE_PARTIAL_LINE;
514 }
515
445 /* Function */ 516 /* Function */
446 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 517 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
447 ret = trace_seq_printf(s, " "); 518 ret = trace_seq_printf(s, " ");
@@ -461,33 +532,34 @@ print_graph_entry_leaf(struct trace_iterator *iter,
461} 532}
462 533
463static enum print_line_t 534static enum print_line_t
464print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, 535print_graph_entry_nested(struct trace_iterator *iter,
465 struct trace_seq *s, pid_t pid, int cpu) 536 struct ftrace_graph_ent_entry *entry,
537 struct trace_seq *s, int cpu)
466{ 538{
467 int i;
468 int ret;
469 struct ftrace_graph_ent *call = &entry->graph_ent; 539 struct ftrace_graph_ent *call = &entry->graph_ent;
540 struct fgraph_data *data = iter->private;
541 int ret;
542 int i;
470 543
471 /* No overhead */ 544 if (data) {
472 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 545 int cpu = iter->cpu;
473 ret = trace_seq_printf(s, " "); 546 int *depth = &(per_cpu_ptr(data, cpu)->depth);
474 if (!ret) 547
475 return TRACE_TYPE_PARTIAL_LINE; 548 *depth = call->depth;
476 } 549 }
477 550
478 /* Interrupt */ 551 /* No overhead */
479 ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid); 552 ret = print_graph_overhead(-1, s);
480 if (ret == TRACE_TYPE_UNHANDLED) { 553 if (!ret)
481 /* No time */ 554 return TRACE_TYPE_PARTIAL_LINE;
555
556 /* No time */
557 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
482 ret = trace_seq_printf(s, " | "); 558 ret = trace_seq_printf(s, " | ");
483 if (!ret) 559 if (!ret)
484 return TRACE_TYPE_PARTIAL_LINE; 560 return TRACE_TYPE_PARTIAL_LINE;
485 } else {
486 if (ret == TRACE_TYPE_PARTIAL_LINE)
487 return TRACE_TYPE_PARTIAL_LINE;
488 } 561 }
489 562
490
491 /* Function */ 563 /* Function */
492 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 564 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
493 ret = trace_seq_printf(s, " "); 565 ret = trace_seq_printf(s, " ");
@@ -503,20 +575,40 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
503 if (!ret) 575 if (!ret)
504 return TRACE_TYPE_PARTIAL_LINE; 576 return TRACE_TYPE_PARTIAL_LINE;
505 577
506 return TRACE_TYPE_HANDLED; 578 /*
579 * we already consumed the current entry to check the next one
580 * and see if this is a leaf.
581 */
582 return TRACE_TYPE_NO_CONSUME;
507} 583}
508 584
509static enum print_line_t 585static enum print_line_t
510print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 586print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
511 struct trace_iterator *iter, int cpu) 587 int type, unsigned long addr)
512{ 588{
513 int ret; 589 struct fgraph_data *data = iter->private;
514 struct trace_entry *ent = iter->ent; 590 struct trace_entry *ent = iter->ent;
591 int cpu = iter->cpu;
592 int ret;
515 593
516 /* Pid */ 594 /* Pid */
517 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) 595 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
518 return TRACE_TYPE_PARTIAL_LINE; 596 return TRACE_TYPE_PARTIAL_LINE;
519 597
598 if (type) {
599 /* Interrupt */
600 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
601 if (ret == TRACE_TYPE_PARTIAL_LINE)
602 return TRACE_TYPE_PARTIAL_LINE;
603 }
604
605 /* Absolute time */
606 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
607 ret = print_graph_abs_time(iter->ts, s);
608 if (!ret)
609 return TRACE_TYPE_PARTIAL_LINE;
610 }
611
520 /* Cpu */ 612 /* Cpu */
521 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 613 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
522 ret = print_graph_cpu(s, cpu); 614 ret = print_graph_cpu(s, cpu);
@@ -535,54 +627,65 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
535 return TRACE_TYPE_PARTIAL_LINE; 627 return TRACE_TYPE_PARTIAL_LINE;
536 } 628 }
537 629
538 if (trace_branch_is_leaf(iter, field)) 630 return 0;
539 return print_graph_entry_leaf(iter, field, s); 631}
632
633static enum print_line_t
634print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
635 struct trace_iterator *iter)
636{
637 int cpu = iter->cpu;
638 struct ftrace_graph_ent *call = &field->graph_ent;
639 struct ftrace_graph_ret_entry *leaf_ret;
640
641 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
642 return TRACE_TYPE_PARTIAL_LINE;
643
644 leaf_ret = get_return_for_leaf(iter, field);
645 if (leaf_ret)
646 return print_graph_entry_leaf(iter, field, leaf_ret, s);
540 else 647 else
541 return print_graph_entry_nested(field, s, iter->ent->pid, cpu); 648 return print_graph_entry_nested(iter, field, s, cpu);
542 649
543} 650}
544 651
545static enum print_line_t 652static enum print_line_t
546print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 653print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
547 struct trace_entry *ent, int cpu) 654 struct trace_entry *ent, struct trace_iterator *iter)
548{ 655{
549 int i;
550 int ret;
551 unsigned long long duration = trace->rettime - trace->calltime; 656 unsigned long long duration = trace->rettime - trace->calltime;
657 struct fgraph_data *data = iter->private;
658 pid_t pid = ent->pid;
659 int cpu = iter->cpu;
660 int ret;
661 int i;
552 662
553 /* Pid */ 663 if (data) {
554 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) 664 int cpu = iter->cpu;
555 return TRACE_TYPE_PARTIAL_LINE; 665 int *depth = &(per_cpu_ptr(data, cpu)->depth);
556 666
557 /* Cpu */ 667 /*
558 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 668 * Comments display at + 1 to depth. This is the
559 ret = print_graph_cpu(s, cpu); 669 * return from a function, we now want the comments
560 if (ret == TRACE_TYPE_PARTIAL_LINE) 670 * to display at the same level of the bracket.
561 return TRACE_TYPE_PARTIAL_LINE; 671 */
672 *depth = trace->depth - 1;
562 } 673 }
563 674
564 /* Proc */ 675 if (print_graph_prologue(iter, s, 0, 0))
565 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 676 return TRACE_TYPE_PARTIAL_LINE;
566 ret = print_graph_proc(s, ent->pid);
567 if (ret == TRACE_TYPE_PARTIAL_LINE)
568 return TRACE_TYPE_PARTIAL_LINE;
569
570 ret = trace_seq_printf(s, " | ");
571 if (!ret)
572 return TRACE_TYPE_PARTIAL_LINE;
573 }
574 677
575 /* Overhead */ 678 /* Overhead */
576 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 679 ret = print_graph_overhead(duration, s);
577 ret = print_graph_overhead(duration, s); 680 if (!ret)
578 if (!ret) 681 return TRACE_TYPE_PARTIAL_LINE;
579 return TRACE_TYPE_PARTIAL_LINE;
580 }
581 682
582 /* Duration */ 683 /* Duration */
583 ret = print_graph_duration(duration, s); 684 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
584 if (ret == TRACE_TYPE_PARTIAL_LINE) 685 ret = print_graph_duration(duration, s);
585 return TRACE_TYPE_PARTIAL_LINE; 686 if (ret == TRACE_TYPE_PARTIAL_LINE)
687 return TRACE_TYPE_PARTIAL_LINE;
688 }
586 689
587 /* Closing brace */ 690 /* Closing brace */
588 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 691 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
@@ -603,7 +706,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
603 return TRACE_TYPE_PARTIAL_LINE; 706 return TRACE_TYPE_PARTIAL_LINE;
604 } 707 }
605 708
606 ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid); 709 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
607 if (ret == TRACE_TYPE_PARTIAL_LINE) 710 if (ret == TRACE_TYPE_PARTIAL_LINE)
608 return TRACE_TYPE_PARTIAL_LINE; 711 return TRACE_TYPE_PARTIAL_LINE;
609 712
@@ -611,61 +714,73 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
611} 714}
612 715
613static enum print_line_t 716static enum print_line_t
614print_graph_comment(struct print_entry *trace, struct trace_seq *s, 717print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
615 struct trace_entry *ent, struct trace_iterator *iter) 718 struct trace_iterator *iter)
616{ 719{
617 int i; 720 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
721 struct fgraph_data *data = iter->private;
722 struct trace_event *event;
723 int depth = 0;
618 int ret; 724 int ret;
725 int i;
619 726
620 /* Pid */ 727 if (data)
621 if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) 728 depth = per_cpu_ptr(data, iter->cpu)->depth;
622 return TRACE_TYPE_PARTIAL_LINE;
623
624 /* Cpu */
625 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
626 ret = print_graph_cpu(s, iter->cpu);
627 if (ret == TRACE_TYPE_PARTIAL_LINE)
628 return TRACE_TYPE_PARTIAL_LINE;
629 }
630
631 /* Proc */
632 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
633 ret = print_graph_proc(s, ent->pid);
634 if (ret == TRACE_TYPE_PARTIAL_LINE)
635 return TRACE_TYPE_PARTIAL_LINE;
636 729
637 ret = trace_seq_printf(s, " | "); 730 if (print_graph_prologue(iter, s, 0, 0))
638 if (!ret) 731 return TRACE_TYPE_PARTIAL_LINE;
639 return TRACE_TYPE_PARTIAL_LINE;
640 }
641 732
642 /* No overhead */ 733 /* No overhead */
643 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 734 ret = print_graph_overhead(-1, s);
644 ret = trace_seq_printf(s, " "); 735 if (!ret)
736 return TRACE_TYPE_PARTIAL_LINE;
737
738 /* No time */
739 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
740 ret = trace_seq_printf(s, " | ");
645 if (!ret) 741 if (!ret)
646 return TRACE_TYPE_PARTIAL_LINE; 742 return TRACE_TYPE_PARTIAL_LINE;
647 } 743 }
648 744
649 /* No time */
650 ret = trace_seq_printf(s, " | ");
651 if (!ret)
652 return TRACE_TYPE_PARTIAL_LINE;
653
654 /* Indentation */ 745 /* Indentation */
655 if (trace->depth > 0) 746 if (depth > 0)
656 for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { 747 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
657 ret = trace_seq_printf(s, " "); 748 ret = trace_seq_printf(s, " ");
658 if (!ret) 749 if (!ret)
659 return TRACE_TYPE_PARTIAL_LINE; 750 return TRACE_TYPE_PARTIAL_LINE;
660 } 751 }
661 752
662 /* The comment */ 753 /* The comment */
663 ret = trace_seq_printf(s, "/* %s", trace->buf); 754 ret = trace_seq_printf(s, "/* ");
664 if (!ret) 755 if (!ret)
665 return TRACE_TYPE_PARTIAL_LINE; 756 return TRACE_TYPE_PARTIAL_LINE;
666 757
667 if (ent->flags & TRACE_FLAG_CONT) 758 switch (iter->ent->type) {
668 trace_seq_print_cont(s, iter); 759 case TRACE_BPRINT:
760 ret = trace_print_bprintk_msg_only(iter);
761 if (ret != TRACE_TYPE_HANDLED)
762 return ret;
763 break;
764 case TRACE_PRINT:
765 ret = trace_print_printk_msg_only(iter);
766 if (ret != TRACE_TYPE_HANDLED)
767 return ret;
768 break;
769 default:
770 event = ftrace_find_event(ent->type);
771 if (!event)
772 return TRACE_TYPE_UNHANDLED;
773
774 ret = event->trace(iter, sym_flags);
775 if (ret != TRACE_TYPE_HANDLED)
776 return ret;
777 }
778
779 /* Strip ending newline */
780 if (s->buffer[s->len - 1] == '\n') {
781 s->buffer[s->len - 1] = '\0';
782 s->len--;
783 }
669 784
670 ret = trace_seq_printf(s, " */\n"); 785 ret = trace_seq_printf(s, " */\n");
671 if (!ret) 786 if (!ret)
@@ -678,62 +793,91 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s,
678enum print_line_t 793enum print_line_t
679print_graph_function(struct trace_iterator *iter) 794print_graph_function(struct trace_iterator *iter)
680{ 795{
681 struct trace_seq *s = &iter->seq;
682 struct trace_entry *entry = iter->ent; 796 struct trace_entry *entry = iter->ent;
797 struct trace_seq *s = &iter->seq;
683 798
684 switch (entry->type) { 799 switch (entry->type) {
685 case TRACE_GRAPH_ENT: { 800 case TRACE_GRAPH_ENT: {
686 struct ftrace_graph_ent_entry *field; 801 struct ftrace_graph_ent_entry *field;
687 trace_assign_type(field, entry); 802 trace_assign_type(field, entry);
688 return print_graph_entry(field, s, iter, 803 return print_graph_entry(field, s, iter);
689 iter->cpu);
690 } 804 }
691 case TRACE_GRAPH_RET: { 805 case TRACE_GRAPH_RET: {
692 struct ftrace_graph_ret_entry *field; 806 struct ftrace_graph_ret_entry *field;
693 trace_assign_type(field, entry); 807 trace_assign_type(field, entry);
694 return print_graph_return(&field->ret, s, entry, iter->cpu); 808 return print_graph_return(&field->ret, s, entry, iter);
695 }
696 case TRACE_PRINT: {
697 struct print_entry *field;
698 trace_assign_type(field, entry);
699 return print_graph_comment(field, s, entry, iter);
700 } 809 }
701 default: 810 default:
702 return TRACE_TYPE_UNHANDLED; 811 return print_graph_comment(s, entry, iter);
703 } 812 }
813
814 return TRACE_TYPE_HANDLED;
704} 815}
705 816
706static void print_graph_headers(struct seq_file *s) 817static void print_graph_headers(struct seq_file *s)
707{ 818{
708 /* 1st line */ 819 /* 1st line */
709 seq_printf(s, "# "); 820 seq_printf(s, "# ");
821 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
822 seq_printf(s, " TIME ");
710 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 823 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
711 seq_printf(s, "CPU "); 824 seq_printf(s, "CPU");
712 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 825 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
713 seq_printf(s, "TASK/PID "); 826 seq_printf(s, " TASK/PID ");
714 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) 827 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
715 seq_printf(s, "OVERHEAD/"); 828 seq_printf(s, " DURATION ");
716 seq_printf(s, "DURATION FUNCTION CALLS\n"); 829 seq_printf(s, " FUNCTION CALLS\n");
717 830
718 /* 2nd line */ 831 /* 2nd line */
719 seq_printf(s, "# "); 832 seq_printf(s, "# ");
833 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
834 seq_printf(s, " | ");
720 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 835 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
721 seq_printf(s, "| "); 836 seq_printf(s, "| ");
722 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 837 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
723 seq_printf(s, "| | "); 838 seq_printf(s, " | | ");
724 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 839 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
725 seq_printf(s, "| "); 840 seq_printf(s, " | | ");
726 seq_printf(s, "| | | | |\n"); 841 seq_printf(s, " | | | |\n");
727 } else 842}
728 seq_printf(s, " | | | | |\n"); 843
844static void graph_trace_open(struct trace_iterator *iter)
845{
846 /* pid and depth on the last trace processed */
847 struct fgraph_data *data = alloc_percpu(struct fgraph_data);
848 int cpu;
849
850 if (!data)
851 pr_warning("function graph tracer: not enough memory\n");
852 else
853 for_each_possible_cpu(cpu) {
854 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
855 int *depth = &(per_cpu_ptr(data, cpu)->depth);
856 *pid = -1;
857 *depth = 0;
858 }
859
860 iter->private = data;
729} 861}
862
863static void graph_trace_close(struct trace_iterator *iter)
864{
865 free_percpu(iter->private);
866}
867
730static struct tracer graph_trace __read_mostly = { 868static struct tracer graph_trace __read_mostly = {
731 .name = "function_graph", 869 .name = "function_graph",
732 .init = graph_trace_init, 870 .open = graph_trace_open,
733 .reset = graph_trace_reset, 871 .close = graph_trace_close,
872 .wait_pipe = poll_wait_pipe,
873 .init = graph_trace_init,
874 .reset = graph_trace_reset,
734 .print_line = print_graph_function, 875 .print_line = print_graph_function,
735 .print_header = print_graph_headers, 876 .print_header = print_graph_headers,
736 .flags = &tracer_flags, 877 .flags = &tracer_flags,
878#ifdef CONFIG_FTRACE_SELFTEST
879 .selftest = trace_selftest_startup_function_graph,
880#endif
737}; 881};
738 882
739static __init int init_graph_trace(void) 883static __init int init_graph_trace(void)
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 649df22d435f..7bfdf4c2347f 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -1,30 +1,53 @@
1/* 1/*
2 * h/w branch tracer for x86 based on bts 2 * h/w branch tracer for x86 based on bts
3 * 3 *
4 * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com> 4 * Copyright (C) 2008-2009 Intel Corporation.
5 * 5 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
6 */ 6 */
7 7#include <linux/spinlock.h>
8#include <linux/module.h> 8#include <linux/kallsyms.h>
9#include <linux/fs.h>
10#include <linux/debugfs.h> 9#include <linux/debugfs.h>
11#include <linux/ftrace.h> 10#include <linux/ftrace.h>
12#include <linux/kallsyms.h> 11#include <linux/module.h>
12#include <linux/cpu.h>
13#include <linux/smp.h>
14#include <linux/fs.h>
13 15
14#include <asm/ds.h> 16#include <asm/ds.h>
15 17
16#include "trace.h" 18#include "trace.h"
19#include "trace_output.h"
17 20
18 21
19#define SIZEOF_BTS (1 << 13) 22#define SIZEOF_BTS (1 << 13)
20 23
24/*
25 * The tracer lock protects the below per-cpu tracer array.
26 * It needs to be held to:
27 * - start tracing on all cpus
28 * - stop tracing on all cpus
29 * - start tracing on a single hotplug cpu
30 * - stop tracing on a single hotplug cpu
31 * - read the trace from all cpus
32 * - read the trace from a single cpu
33 */
34static DEFINE_SPINLOCK(bts_tracer_lock);
21static DEFINE_PER_CPU(struct bts_tracer *, tracer); 35static DEFINE_PER_CPU(struct bts_tracer *, tracer);
22static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); 36static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
23 37
24#define this_tracer per_cpu(tracer, smp_processor_id()) 38#define this_tracer per_cpu(tracer, smp_processor_id())
25#define this_buffer per_cpu(buffer, smp_processor_id()) 39#define this_buffer per_cpu(buffer, smp_processor_id())
26 40
41static int __read_mostly trace_hw_branches_enabled;
42static struct trace_array *hw_branch_trace __read_mostly;
43
27 44
45/*
46 * Start tracing on the current cpu.
47 * The argument is ignored.
48 *
49 * pre: bts_tracer_lock must be locked.
50 */
28static void bts_trace_start_cpu(void *arg) 51static void bts_trace_start_cpu(void *arg)
29{ 52{
30 if (this_tracer) 53 if (this_tracer)
@@ -42,14 +65,20 @@ static void bts_trace_start_cpu(void *arg)
42 65
43static void bts_trace_start(struct trace_array *tr) 66static void bts_trace_start(struct trace_array *tr)
44{ 67{
45 int cpu; 68 spin_lock(&bts_tracer_lock);
46 69
47 tracing_reset_online_cpus(tr); 70 on_each_cpu(bts_trace_start_cpu, NULL, 1);
71 trace_hw_branches_enabled = 1;
48 72
49 for_each_cpu(cpu, cpu_possible_mask) 73 spin_unlock(&bts_tracer_lock);
50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
51} 74}
52 75
76/*
77 * Stop tracing on the current cpu.
78 * The argument is ignored.
79 *
80 * pre: bts_tracer_lock must be locked.
81 */
53static void bts_trace_stop_cpu(void *arg) 82static void bts_trace_stop_cpu(void *arg)
54{ 83{
55 if (this_tracer) { 84 if (this_tracer) {
@@ -60,26 +89,60 @@ static void bts_trace_stop_cpu(void *arg)
60 89
61static void bts_trace_stop(struct trace_array *tr) 90static void bts_trace_stop(struct trace_array *tr)
62{ 91{
63 int cpu; 92 spin_lock(&bts_tracer_lock);
93
94 trace_hw_branches_enabled = 0;
95 on_each_cpu(bts_trace_stop_cpu, NULL, 1);
96
97 spin_unlock(&bts_tracer_lock);
98}
99
100static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
101 unsigned long action, void *hcpu)
102{
103 unsigned int cpu = (unsigned long)hcpu;
64 104
65 for_each_cpu(cpu, cpu_possible_mask) 105 spin_lock(&bts_tracer_lock);
106
107 if (!trace_hw_branches_enabled)
108 goto out;
109
110 switch (action) {
111 case CPU_ONLINE:
112 case CPU_DOWN_FAILED:
113 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
114 break;
115 case CPU_DOWN_PREPARE:
66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 116 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
117 break;
118 }
119
120 out:
121 spin_unlock(&bts_tracer_lock);
122 return NOTIFY_DONE;
67} 123}
68 124
125static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
126 .notifier_call = bts_hotcpu_handler
127};
128
69static int bts_trace_init(struct trace_array *tr) 129static int bts_trace_init(struct trace_array *tr)
70{ 130{
71 tracing_reset_online_cpus(tr); 131 hw_branch_trace = tr;
132
72 bts_trace_start(tr); 133 bts_trace_start(tr);
73 134
74 return 0; 135 return 0;
75} 136}
76 137
138static void bts_trace_reset(struct trace_array *tr)
139{
140 bts_trace_stop(tr);
141}
142
77static void bts_trace_print_header(struct seq_file *m) 143static void bts_trace_print_header(struct seq_file *m)
78{ 144{
79 seq_puts(m, 145 seq_puts(m, "# CPU# TO <- FROM\n");
80 "# CPU# FROM TO FUNCTION\n");
81 seq_puts(m,
82 "# | | | |\n");
83} 146}
84 147
85static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) 148static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
@@ -87,15 +150,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
87 struct trace_entry *entry = iter->ent; 150 struct trace_entry *entry = iter->ent;
88 struct trace_seq *seq = &iter->seq; 151 struct trace_seq *seq = &iter->seq;
89 struct hw_branch_entry *it; 152 struct hw_branch_entry *it;
153 unsigned long symflags = TRACE_ITER_SYM_OFFSET;
90 154
91 trace_assign_type(it, entry); 155 trace_assign_type(it, entry);
92 156
93 if (entry->type == TRACE_HW_BRANCHES) { 157 if (entry->type == TRACE_HW_BRANCHES) {
94 if (trace_seq_printf(seq, "%4d ", entry->cpu) && 158 if (trace_seq_printf(seq, "%4d ", iter->cpu) &&
95 trace_seq_printf(seq, "0x%016llx -> 0x%016llx ", 159 seq_print_ip_sym(seq, it->to, symflags) &&
96 it->from, it->to) && 160 trace_seq_printf(seq, "\t <- ") &&
97 (!it->from || 161 seq_print_ip_sym(seq, it->from, symflags) &&
98 seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
99 trace_seq_printf(seq, "\n")) 162 trace_seq_printf(seq, "\n"))
100 return TRACE_TYPE_HANDLED; 163 return TRACE_TYPE_HANDLED;
101 return TRACE_TYPE_PARTIAL_LINE;; 164 return TRACE_TYPE_PARTIAL_LINE;;
@@ -103,26 +166,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
103 return TRACE_TYPE_UNHANDLED; 166 return TRACE_TYPE_UNHANDLED;
104} 167}
105 168
106void trace_hw_branch(struct trace_array *tr, u64 from, u64 to) 169void trace_hw_branch(u64 from, u64 to)
107{ 170{
171 struct trace_array *tr = hw_branch_trace;
108 struct ring_buffer_event *event; 172 struct ring_buffer_event *event;
109 struct hw_branch_entry *entry; 173 struct hw_branch_entry *entry;
110 unsigned long irq; 174 unsigned long irq1;
175 int cpu;
111 176
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); 177 if (unlikely(!tr))
113 if (!event)
114 return; 178 return;
179
180 if (unlikely(!trace_hw_branches_enabled))
181 return;
182
183 local_irq_save(irq1);
184 cpu = raw_smp_processor_id();
185 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
186 goto out;
187
188 event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
189 sizeof(*entry), 0, 0);
190 if (!event)
191 goto out;
115 entry = ring_buffer_event_data(event); 192 entry = ring_buffer_event_data(event);
116 tracing_generic_entry_update(&entry->ent, 0, from); 193 tracing_generic_entry_update(&entry->ent, 0, from);
117 entry->ent.type = TRACE_HW_BRANCHES; 194 entry->ent.type = TRACE_HW_BRANCHES;
118 entry->ent.cpu = smp_processor_id();
119 entry->from = from; 195 entry->from = from;
120 entry->to = to; 196 entry->to = to;
121 ring_buffer_unlock_commit(tr->buffer, event, irq); 197 trace_buffer_unlock_commit(tr, event, 0, 0);
198
199 out:
200 atomic_dec(&tr->data[cpu]->disabled);
201 local_irq_restore(irq1);
122} 202}
123 203
124static void trace_bts_at(struct trace_array *tr, 204static void trace_bts_at(const struct bts_trace *trace, void *at)
125 const struct bts_trace *trace, void *at)
126{ 205{
127 struct bts_struct bts; 206 struct bts_struct bts;
128 int err = 0; 207 int err = 0;
@@ -137,18 +216,29 @@ static void trace_bts_at(struct trace_array *tr,
137 216
138 switch (bts.qualifier) { 217 switch (bts.qualifier) {
139 case BTS_BRANCH: 218 case BTS_BRANCH:
140 trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to); 219 trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
141 break; 220 break;
142 } 221 }
143} 222}
144 223
224/*
225 * Collect the trace on the current cpu and write it into the ftrace buffer.
226 *
227 * pre: bts_tracer_lock must be locked
228 */
145static void trace_bts_cpu(void *arg) 229static void trace_bts_cpu(void *arg)
146{ 230{
147 struct trace_array *tr = (struct trace_array *) arg; 231 struct trace_array *tr = (struct trace_array *) arg;
148 const struct bts_trace *trace; 232 const struct bts_trace *trace;
149 unsigned char *at; 233 unsigned char *at;
150 234
151 if (!this_tracer) 235 if (unlikely(!tr))
236 return;
237
238 if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
239 return;
240
241 if (unlikely(!this_tracer))
152 return; 242 return;
153 243
154 ds_suspend_bts(this_tracer); 244 ds_suspend_bts(this_tracer);
@@ -158,11 +248,11 @@ static void trace_bts_cpu(void *arg)
158 248
159 for (at = trace->ds.top; (void *)at < trace->ds.end; 249 for (at = trace->ds.top; (void *)at < trace->ds.end;
160 at += trace->ds.size) 250 at += trace->ds.size)
161 trace_bts_at(tr, trace, at); 251 trace_bts_at(trace, at);
162 252
163 for (at = trace->ds.begin; (void *)at < trace->ds.top; 253 for (at = trace->ds.begin; (void *)at < trace->ds.top;
164 at += trace->ds.size) 254 at += trace->ds.size)
165 trace_bts_at(tr, trace, at); 255 trace_bts_at(trace, at);
166 256
167out: 257out:
168 ds_resume_bts(this_tracer); 258 ds_resume_bts(this_tracer);
@@ -170,26 +260,43 @@ out:
170 260
171static void trace_bts_prepare(struct trace_iterator *iter) 261static void trace_bts_prepare(struct trace_iterator *iter)
172{ 262{
173 int cpu; 263 spin_lock(&bts_tracer_lock);
264
265 on_each_cpu(trace_bts_cpu, iter->tr, 1);
266
267 spin_unlock(&bts_tracer_lock);
268}
269
270static void trace_bts_close(struct trace_iterator *iter)
271{
272 tracing_reset_online_cpus(iter->tr);
273}
274
275void trace_hw_branch_oops(void)
276{
277 spin_lock(&bts_tracer_lock);
278
279 trace_bts_cpu(hw_branch_trace);
174 280
175 for_each_cpu(cpu, cpu_possible_mask) 281 spin_unlock(&bts_tracer_lock);
176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
177} 282}
178 283
179struct tracer bts_tracer __read_mostly = 284struct tracer bts_tracer __read_mostly =
180{ 285{
181 .name = "hw-branch-tracer", 286 .name = "hw-branch-tracer",
182 .init = bts_trace_init, 287 .init = bts_trace_init,
183 .reset = bts_trace_stop, 288 .reset = bts_trace_reset,
184 .print_header = bts_trace_print_header, 289 .print_header = bts_trace_print_header,
185 .print_line = bts_trace_print_line, 290 .print_line = bts_trace_print_line,
186 .start = bts_trace_start, 291 .start = bts_trace_start,
187 .stop = bts_trace_stop, 292 .stop = bts_trace_stop,
188 .open = trace_bts_prepare 293 .open = trace_bts_prepare,
294 .close = trace_bts_close
189}; 295};
190 296
191__init static int init_bts_trace(void) 297__init static int init_bts_trace(void)
192{ 298{
299 register_hotcpu_notifier(&bts_hotcpu_notifier);
193 return register_tracer(&bts_tracer); 300 return register_tracer(&bts_tracer);
194} 301}
195device_initcall(init_bts_trace); 302device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 62a78d943534..b923d13e2fad 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * trace irqs off criticall timings 2 * trace irqs off critical timings
3 * 3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
@@ -32,6 +32,8 @@ enum {
32 32
33static int trace_type __read_mostly; 33static int trace_type __read_mostly;
34 34
35static int save_lat_flag;
36
35#ifdef CONFIG_PREEMPT_TRACER 37#ifdef CONFIG_PREEMPT_TRACER
36static inline int 38static inline int
37preempt_trace(void) 39preempt_trace(void)
@@ -95,7 +97,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
95 disabled = atomic_inc_return(&data->disabled); 97 disabled = atomic_inc_return(&data->disabled);
96 98
97 if (likely(disabled == 1)) 99 if (likely(disabled == 1))
98 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 100 trace_function(tr, ip, parent_ip, flags, preempt_count());
99 101
100 atomic_dec(&data->disabled); 102 atomic_dec(&data->disabled);
101} 103}
@@ -153,7 +155,7 @@ check_critical_timing(struct trace_array *tr,
153 if (!report_latency(delta)) 155 if (!report_latency(delta))
154 goto out_unlock; 156 goto out_unlock;
155 157
156 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); 158 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
157 159
158 latency = nsecs_to_usecs(delta); 160 latency = nsecs_to_usecs(delta);
159 161
@@ -177,7 +179,7 @@ out:
177 data->critical_sequence = max_sequence; 179 data->critical_sequence = max_sequence;
178 data->preempt_timestamp = ftrace_now(cpu); 180 data->preempt_timestamp = ftrace_now(cpu);
179 tracing_reset(tr, cpu); 181 tracing_reset(tr, cpu);
180 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); 182 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
181} 183}
182 184
183static inline void 185static inline void
@@ -210,7 +212,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
210 212
211 local_save_flags(flags); 213 local_save_flags(flags);
212 214
213 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 215 trace_function(tr, ip, parent_ip, flags, preempt_count());
214 216
215 per_cpu(tracing_cpu, cpu) = 1; 217 per_cpu(tracing_cpu, cpu) = 1;
216 218
@@ -244,7 +246,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
244 atomic_inc(&data->disabled); 246 atomic_inc(&data->disabled);
245 247
246 local_save_flags(flags); 248 local_save_flags(flags);
247 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 249 trace_function(tr, ip, parent_ip, flags, preempt_count());
248 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 250 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
249 data->critical_start = 0; 251 data->critical_start = 0;
250 atomic_dec(&data->disabled); 252 atomic_dec(&data->disabled);
@@ -353,33 +355,26 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
353} 355}
354#endif /* CONFIG_PREEMPT_TRACER */ 356#endif /* CONFIG_PREEMPT_TRACER */
355 357
356/*
357 * save_tracer_enabled is used to save the state of the tracer_enabled
358 * variable when we disable it when we open a trace output file.
359 */
360static int save_tracer_enabled;
361
362static void start_irqsoff_tracer(struct trace_array *tr) 358static void start_irqsoff_tracer(struct trace_array *tr)
363{ 359{
364 register_ftrace_function(&trace_ops); 360 register_ftrace_function(&trace_ops);
365 if (tracing_is_enabled()) { 361 if (tracing_is_enabled())
366 tracer_enabled = 1; 362 tracer_enabled = 1;
367 save_tracer_enabled = 1; 363 else
368 } else {
369 tracer_enabled = 0; 364 tracer_enabled = 0;
370 save_tracer_enabled = 0;
371 }
372} 365}
373 366
374static void stop_irqsoff_tracer(struct trace_array *tr) 367static void stop_irqsoff_tracer(struct trace_array *tr)
375{ 368{
376 tracer_enabled = 0; 369 tracer_enabled = 0;
377 save_tracer_enabled = 0;
378 unregister_ftrace_function(&trace_ops); 370 unregister_ftrace_function(&trace_ops);
379} 371}
380 372
381static void __irqsoff_tracer_init(struct trace_array *tr) 373static void __irqsoff_tracer_init(struct trace_array *tr)
382{ 374{
375 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
376 trace_flags |= TRACE_ITER_LATENCY_FMT;
377
383 tracing_max_latency = 0; 378 tracing_max_latency = 0;
384 irqsoff_trace = tr; 379 irqsoff_trace = tr;
385 /* make sure that the tracer is visible */ 380 /* make sure that the tracer is visible */
@@ -390,30 +385,19 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
390static void irqsoff_tracer_reset(struct trace_array *tr) 385static void irqsoff_tracer_reset(struct trace_array *tr)
391{ 386{
392 stop_irqsoff_tracer(tr); 387 stop_irqsoff_tracer(tr);
388
389 if (!save_lat_flag)
390 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
393} 391}
394 392
395static void irqsoff_tracer_start(struct trace_array *tr) 393static void irqsoff_tracer_start(struct trace_array *tr)
396{ 394{
397 tracer_enabled = 1; 395 tracer_enabled = 1;
398 save_tracer_enabled = 1;
399} 396}
400 397
401static void irqsoff_tracer_stop(struct trace_array *tr) 398static void irqsoff_tracer_stop(struct trace_array *tr)
402{ 399{
403 tracer_enabled = 0; 400 tracer_enabled = 0;
404 save_tracer_enabled = 0;
405}
406
407static void irqsoff_tracer_open(struct trace_iterator *iter)
408{
409 /* stop the trace while dumping */
410 tracer_enabled = 0;
411}
412
413static void irqsoff_tracer_close(struct trace_iterator *iter)
414{
415 /* restart tracing */
416 tracer_enabled = save_tracer_enabled;
417} 401}
418 402
419#ifdef CONFIG_IRQSOFF_TRACER 403#ifdef CONFIG_IRQSOFF_TRACER
@@ -431,8 +415,6 @@ static struct tracer irqsoff_tracer __read_mostly =
431 .reset = irqsoff_tracer_reset, 415 .reset = irqsoff_tracer_reset,
432 .start = irqsoff_tracer_start, 416 .start = irqsoff_tracer_start,
433 .stop = irqsoff_tracer_stop, 417 .stop = irqsoff_tracer_stop,
434 .open = irqsoff_tracer_open,
435 .close = irqsoff_tracer_close,
436 .print_max = 1, 418 .print_max = 1,
437#ifdef CONFIG_FTRACE_SELFTEST 419#ifdef CONFIG_FTRACE_SELFTEST
438 .selftest = trace_selftest_startup_irqsoff, 420 .selftest = trace_selftest_startup_irqsoff,
@@ -459,8 +441,6 @@ static struct tracer preemptoff_tracer __read_mostly =
459 .reset = irqsoff_tracer_reset, 441 .reset = irqsoff_tracer_reset,
460 .start = irqsoff_tracer_start, 442 .start = irqsoff_tracer_start,
461 .stop = irqsoff_tracer_stop, 443 .stop = irqsoff_tracer_stop,
462 .open = irqsoff_tracer_open,
463 .close = irqsoff_tracer_close,
464 .print_max = 1, 444 .print_max = 1,
465#ifdef CONFIG_FTRACE_SELFTEST 445#ifdef CONFIG_FTRACE_SELFTEST
466 .selftest = trace_selftest_startup_preemptoff, 446 .selftest = trace_selftest_startup_preemptoff,
@@ -489,8 +469,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
489 .reset = irqsoff_tracer_reset, 469 .reset = irqsoff_tracer_reset,
490 .start = irqsoff_tracer_start, 470 .start = irqsoff_tracer_start,
491 .stop = irqsoff_tracer_stop, 471 .stop = irqsoff_tracer_stop,
492 .open = irqsoff_tracer_open,
493 .close = irqsoff_tracer_close,
494 .print_max = 1, 472 .print_max = 1,
495#ifdef CONFIG_FTRACE_SELFTEST 473#ifdef CONFIG_FTRACE_SELFTEST
496 .selftest = trace_selftest_startup_preemptirqsoff, 474 .selftest = trace_selftest_startup_preemptirqsoff,
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 80e503ef6136..8e37fcddd8b4 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -12,6 +12,7 @@
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13 13
14#include "trace.h" 14#include "trace.h"
15#include "trace_output.h"
15 16
16struct header_iter { 17struct header_iter {
17 struct pci_dev *dev; 18 struct pci_dev *dev;
@@ -183,21 +184,22 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
183 switch (rw->opcode) { 184 switch (rw->opcode) {
184 case MMIO_READ: 185 case MMIO_READ:
185 ret = trace_seq_printf(s, 186 ret = trace_seq_printf(s,
186 "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", 187 "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
187 rw->width, secs, usec_rem, rw->map_id, 188 rw->width, secs, usec_rem, rw->map_id,
188 (unsigned long long)rw->phys, 189 (unsigned long long)rw->phys,
189 rw->value, rw->pc, 0); 190 rw->value, rw->pc, 0);
190 break; 191 break;
191 case MMIO_WRITE: 192 case MMIO_WRITE:
192 ret = trace_seq_printf(s, 193 ret = trace_seq_printf(s,
193 "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", 194 "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
194 rw->width, secs, usec_rem, rw->map_id, 195 rw->width, secs, usec_rem, rw->map_id,
195 (unsigned long long)rw->phys, 196 (unsigned long long)rw->phys,
196 rw->value, rw->pc, 0); 197 rw->value, rw->pc, 0);
197 break; 198 break;
198 case MMIO_UNKNOWN_OP: 199 case MMIO_UNKNOWN_OP:
199 ret = trace_seq_printf(s, 200 ret = trace_seq_printf(s,
200 "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n", 201 "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
202 "%02lx 0x%lx %d\n",
201 secs, usec_rem, rw->map_id, 203 secs, usec_rem, rw->map_id,
202 (unsigned long long)rw->phys, 204 (unsigned long long)rw->phys,
203 (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, 205 (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
@@ -229,14 +231,14 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
229 switch (m->opcode) { 231 switch (m->opcode) {
230 case MMIO_PROBE: 232 case MMIO_PROBE:
231 ret = trace_seq_printf(s, 233 ret = trace_seq_printf(s,
232 "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", 234 "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
233 secs, usec_rem, m->map_id, 235 secs, usec_rem, m->map_id,
234 (unsigned long long)m->phys, m->virt, m->len, 236 (unsigned long long)m->phys, m->virt, m->len,
235 0UL, 0); 237 0UL, 0);
236 break; 238 break;
237 case MMIO_UNPROBE: 239 case MMIO_UNPROBE:
238 ret = trace_seq_printf(s, 240 ret = trace_seq_printf(s,
239 "UNMAP %lu.%06lu %d 0x%lx %d\n", 241 "UNMAP %u.%06lu %d 0x%lx %d\n",
240 secs, usec_rem, m->map_id, 0UL, 0); 242 secs, usec_rem, m->map_id, 0UL, 0);
241 break; 243 break;
242 default: 244 default:
@@ -255,18 +257,15 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
255 const char *msg = print->buf; 257 const char *msg = print->buf;
256 struct trace_seq *s = &iter->seq; 258 struct trace_seq *s = &iter->seq;
257 unsigned long long t = ns2usecs(iter->ts); 259 unsigned long long t = ns2usecs(iter->ts);
258 unsigned long usec_rem = do_div(t, 1000000ULL); 260 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
259 unsigned secs = (unsigned long)t; 261 unsigned secs = (unsigned long)t;
260 int ret; 262 int ret;
261 263
262 /* The trailing newline must be in the message. */ 264 /* The trailing newline must be in the message. */
263 ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg); 265 ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
264 if (!ret) 266 if (!ret)
265 return TRACE_TYPE_PARTIAL_LINE; 267 return TRACE_TYPE_PARTIAL_LINE;
266 268
267 if (entry->flags & TRACE_FLAG_CONT)
268 trace_seq_print_cont(s, iter);
269
270 return TRACE_TYPE_HANDLED; 269 return TRACE_TYPE_HANDLED;
271} 270}
272 271
@@ -308,21 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
308{ 307{
309 struct ring_buffer_event *event; 308 struct ring_buffer_event *event;
310 struct trace_mmiotrace_rw *entry; 309 struct trace_mmiotrace_rw *entry;
311 unsigned long irq_flags; 310 int pc = preempt_count();
312 311
313 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 312 event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW,
314 &irq_flags); 313 sizeof(*entry), 0, pc);
315 if (!event) { 314 if (!event) {
316 atomic_inc(&dropped_count); 315 atomic_inc(&dropped_count);
317 return; 316 return;
318 } 317 }
319 entry = ring_buffer_event_data(event); 318 entry = ring_buffer_event_data(event);
320 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
321 entry->ent.type = TRACE_MMIO_RW;
322 entry->rw = *rw; 319 entry->rw = *rw;
323 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 320 trace_buffer_unlock_commit(tr, event, 0, pc);
324
325 trace_wake_up();
326} 321}
327 322
328void mmio_trace_rw(struct mmiotrace_rw *rw) 323void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -338,21 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
338{ 333{
339 struct ring_buffer_event *event; 334 struct ring_buffer_event *event;
340 struct trace_mmiotrace_map *entry; 335 struct trace_mmiotrace_map *entry;
341 unsigned long irq_flags; 336 int pc = preempt_count();
342 337
343 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 338 event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP,
344 &irq_flags); 339 sizeof(*entry), 0, pc);
345 if (!event) { 340 if (!event) {
346 atomic_inc(&dropped_count); 341 atomic_inc(&dropped_count);
347 return; 342 return;
348 } 343 }
349 entry = ring_buffer_event_data(event); 344 entry = ring_buffer_event_data(event);
350 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
351 entry->ent.type = TRACE_MMIO_MAP;
352 entry->map = *map; 345 entry->map = *map;
353 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 346 trace_buffer_unlock_commit(tr, event, 0, pc);
354
355 trace_wake_up();
356} 347}
357 348
358void mmio_trace_mapping(struct mmiotrace_map *map) 349void mmio_trace_mapping(struct mmiotrace_map *map)
@@ -368,5 +359,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
368 359
369int mmio_trace_printk(const char *fmt, va_list args) 360int mmio_trace_printk(const char *fmt, va_list args)
370{ 361{
371 return trace_vprintk(0, -1, fmt, args); 362 return trace_vprintk(0, fmt, args);
372} 363}
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index b9767acd30ac..394f94417e2f 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -47,12 +47,7 @@ static void stop_nop_trace(struct trace_array *tr)
47 47
48static int nop_trace_init(struct trace_array *tr) 48static int nop_trace_init(struct trace_array *tr)
49{ 49{
50 int cpu;
51 ctx_trace = tr; 50 ctx_trace = tr;
52
53 for_each_online_cpu(cpu)
54 tracing_reset(tr, cpu);
55
56 start_nop_trace(tr); 51 start_nop_trace(tr);
57 return 0; 52 return 0;
58} 53}
@@ -96,6 +91,7 @@ struct tracer nop_trace __read_mostly =
96 .name = "nop", 91 .name = "nop",
97 .init = nop_trace_init, 92 .init = nop_trace_init,
98 .reset = nop_trace_reset, 93 .reset = nop_trace_reset,
94 .wait_pipe = poll_wait_pipe,
99#ifdef CONFIG_FTRACE_SELFTEST 95#ifdef CONFIG_FTRACE_SELFTEST
100 .selftest = trace_selftest_startup_nop, 96 .selftest = trace_selftest_startup_nop,
101#endif 97#endif
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
new file mode 100644
index 000000000000..d72b9a63b247
--- /dev/null
+++ b/kernel/trace/trace_output.c
@@ -0,0 +1,1017 @@
1/*
2 * trace_output.c
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/mutex.h>
10#include <linux/ftrace.h>
11
12#include "trace_output.h"
13
14/* must be a power of 2 */
15#define EVENT_HASHSIZE 128
16
17static DEFINE_MUTEX(trace_event_mutex);
18static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
19
20static int next_event_type = __TRACE_LAST_TYPE + 1;
21
22enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
23{
24 struct trace_seq *s = &iter->seq;
25 struct trace_entry *entry = iter->ent;
26 struct bprint_entry *field;
27 int ret;
28
29 trace_assign_type(field, entry);
30
31 ret = trace_seq_bprintf(s, field->fmt, field->buf);
32 if (!ret)
33 return TRACE_TYPE_PARTIAL_LINE;
34
35 return TRACE_TYPE_HANDLED;
36}
37
38enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
39{
40 struct trace_seq *s = &iter->seq;
41 struct trace_entry *entry = iter->ent;
42 struct print_entry *field;
43 int ret;
44
45 trace_assign_type(field, entry);
46
47 ret = trace_seq_printf(s, "%s", field->buf);
48 if (!ret)
49 return TRACE_TYPE_PARTIAL_LINE;
50
51 return TRACE_TYPE_HANDLED;
52}
53
54/**
55 * trace_seq_printf - sequence printing of trace information
56 * @s: trace sequence descriptor
57 * @fmt: printf format string
58 *
59 * The tracer may use either sequence operations or its own
60 * copy to user routines. To simplify formating of a trace
61 * trace_seq_printf is used to store strings into a special
62 * buffer (@s). Then the output may be either used by
63 * the sequencer or pulled into another buffer.
64 */
65int
66trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
67{
68 int len = (PAGE_SIZE - 1) - s->len;
69 va_list ap;
70 int ret;
71
72 if (!len)
73 return 0;
74
75 va_start(ap, fmt);
76 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
77 va_end(ap);
78
79 /* If we can't write it all, don't bother writing anything */
80 if (ret >= len)
81 return 0;
82
83 s->len += ret;
84
85 return len;
86}
87
88int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
89{
90 int len = (PAGE_SIZE - 1) - s->len;
91 int ret;
92
93 if (!len)
94 return 0;
95
96 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
97
98 /* If we can't write it all, don't bother writing anything */
99 if (ret >= len)
100 return 0;
101
102 s->len += ret;
103
104 return len;
105}
106
107/**
108 * trace_seq_puts - trace sequence printing of simple string
109 * @s: trace sequence descriptor
110 * @str: simple string to record
111 *
112 * The tracer may use either the sequence operations or its own
113 * copy to user routines. This function records a simple string
114 * into a special buffer (@s) for later retrieval by a sequencer
115 * or other mechanism.
116 */
117int trace_seq_puts(struct trace_seq *s, const char *str)
118{
119 int len = strlen(str);
120
121 if (len > ((PAGE_SIZE - 1) - s->len))
122 return 0;
123
124 memcpy(s->buffer + s->len, str, len);
125 s->len += len;
126
127 return len;
128}
129
130int trace_seq_putc(struct trace_seq *s, unsigned char c)
131{
132 if (s->len >= (PAGE_SIZE - 1))
133 return 0;
134
135 s->buffer[s->len++] = c;
136
137 return 1;
138}
139
140int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
141{
142 if (len > ((PAGE_SIZE - 1) - s->len))
143 return 0;
144
145 memcpy(s->buffer + s->len, mem, len);
146 s->len += len;
147
148 return len;
149}
150
151int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
152{
153 unsigned char hex[HEX_CHARS];
154 const unsigned char *data = mem;
155 int i, j;
156
157#ifdef __BIG_ENDIAN
158 for (i = 0, j = 0; i < len; i++) {
159#else
160 for (i = len-1, j = 0; i >= 0; i--) {
161#endif
162 hex[j++] = hex_asc_hi(data[i]);
163 hex[j++] = hex_asc_lo(data[i]);
164 }
165 hex[j++] = ' ';
166
167 return trace_seq_putmem(s, hex, j);
168}
169
170void *trace_seq_reserve(struct trace_seq *s, size_t len)
171{
172 void *ret;
173
174 if (len > ((PAGE_SIZE - 1) - s->len))
175 return NULL;
176
177 ret = s->buffer + s->len;
178 s->len += len;
179
180 return ret;
181}
182
183int trace_seq_path(struct trace_seq *s, struct path *path)
184{
185 unsigned char *p;
186
187 if (s->len >= (PAGE_SIZE - 1))
188 return 0;
189 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
190 if (!IS_ERR(p)) {
191 p = mangle_path(s->buffer + s->len, p, "\n");
192 if (p) {
193 s->len = p - s->buffer;
194 return 1;
195 }
196 } else {
197 s->buffer[s->len++] = '?';
198 return 1;
199 }
200
201 return 0;
202}
203
204#ifdef CONFIG_KRETPROBES
205static inline const char *kretprobed(const char *name)
206{
207 static const char tramp_name[] = "kretprobe_trampoline";
208 int size = sizeof(tramp_name);
209
210 if (strncmp(tramp_name, name, size) == 0)
211 return "[unknown/kretprobe'd]";
212 return name;
213}
214#else
215static inline const char *kretprobed(const char *name)
216{
217 return name;
218}
219#endif /* CONFIG_KRETPROBES */
220
221static int
222seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
223{
224#ifdef CONFIG_KALLSYMS
225 char str[KSYM_SYMBOL_LEN];
226 const char *name;
227
228 kallsyms_lookup(address, NULL, NULL, NULL, str);
229
230 name = kretprobed(str);
231
232 return trace_seq_printf(s, fmt, name);
233#endif
234 return 1;
235}
236
237static int
238seq_print_sym_offset(struct trace_seq *s, const char *fmt,
239 unsigned long address)
240{
241#ifdef CONFIG_KALLSYMS
242 char str[KSYM_SYMBOL_LEN];
243 const char *name;
244
245 sprint_symbol(str, address);
246 name = kretprobed(str);
247
248 return trace_seq_printf(s, fmt, name);
249#endif
250 return 1;
251}
252
253#ifndef CONFIG_64BIT
254# define IP_FMT "%08lx"
255#else
256# define IP_FMT "%016lx"
257#endif
258
259int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
260 unsigned long ip, unsigned long sym_flags)
261{
262 struct file *file = NULL;
263 unsigned long vmstart = 0;
264 int ret = 1;
265
266 if (mm) {
267 const struct vm_area_struct *vma;
268
269 down_read(&mm->mmap_sem);
270 vma = find_vma(mm, ip);
271 if (vma) {
272 file = vma->vm_file;
273 vmstart = vma->vm_start;
274 }
275 if (file) {
276 ret = trace_seq_path(s, &file->f_path);
277 if (ret)
278 ret = trace_seq_printf(s, "[+0x%lx]",
279 ip - vmstart);
280 }
281 up_read(&mm->mmap_sem);
282 }
283 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
284 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
285 return ret;
286}
287
288int
289seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
290 unsigned long sym_flags)
291{
292 struct mm_struct *mm = NULL;
293 int ret = 1;
294 unsigned int i;
295
296 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
297 struct task_struct *task;
298 /*
299 * we do the lookup on the thread group leader,
300 * since individual threads might have already quit!
301 */
302 rcu_read_lock();
303 task = find_task_by_vpid(entry->ent.tgid);
304 if (task)
305 mm = get_task_mm(task);
306 rcu_read_unlock();
307 }
308
309 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
310 unsigned long ip = entry->caller[i];
311
312 if (ip == ULONG_MAX || !ret)
313 break;
314 if (i && ret)
315 ret = trace_seq_puts(s, " <- ");
316 if (!ip) {
317 if (ret)
318 ret = trace_seq_puts(s, "??");
319 continue;
320 }
321 if (!ret)
322 break;
323 if (ret)
324 ret = seq_print_user_ip(s, mm, ip, sym_flags);
325 }
326
327 if (mm)
328 mmput(mm);
329 return ret;
330}
331
332int
333seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
334{
335 int ret;
336
337 if (!ip)
338 return trace_seq_printf(s, "0");
339
340 if (sym_flags & TRACE_ITER_SYM_OFFSET)
341 ret = seq_print_sym_offset(s, "%s", ip);
342 else
343 ret = seq_print_sym_short(s, "%s", ip);
344
345 if (!ret)
346 return 0;
347
348 if (sym_flags & TRACE_ITER_SYM_ADDR)
349 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
350 return ret;
351}
352
353static int
354lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
355{
356 int hardirq, softirq;
357 char comm[TASK_COMM_LEN];
358
359 trace_find_cmdline(entry->pid, comm);
360 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
361 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
362
363 if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
364 comm, entry->pid, cpu,
365 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
366 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
367 'X' : '.',
368 (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
369 'N' : '.',
370 (hardirq && softirq) ? 'H' :
371 hardirq ? 'h' : softirq ? 's' : '.'))
372 return 0;
373
374 if (entry->preempt_count)
375 return trace_seq_printf(s, "%x", entry->preempt_count);
376 return trace_seq_puts(s, ".");
377}
378
379static unsigned long preempt_mark_thresh = 100;
380
381static int
382lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
383 unsigned long rel_usecs)
384{
385 return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
386 rel_usecs > preempt_mark_thresh ? '!' :
387 rel_usecs > 1 ? '+' : ' ');
388}
389
390int trace_print_context(struct trace_iterator *iter)
391{
392 struct trace_seq *s = &iter->seq;
393 struct trace_entry *entry = iter->ent;
394 unsigned long long t = ns2usecs(iter->ts);
395 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
396 unsigned long secs = (unsigned long)t;
397 char comm[TASK_COMM_LEN];
398
399 trace_find_cmdline(entry->pid, comm);
400
401 return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
402 comm, entry->pid, iter->cpu, secs, usec_rem);
403}
404
405int trace_print_lat_context(struct trace_iterator *iter)
406{
407 u64 next_ts;
408 int ret;
409 struct trace_seq *s = &iter->seq;
410 struct trace_entry *entry = iter->ent,
411 *next_entry = trace_find_next_entry(iter, NULL,
412 &next_ts);
413 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
414 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
415 unsigned long rel_usecs;
416
417 if (!next_entry)
418 next_ts = iter->ts;
419 rel_usecs = ns2usecs(next_ts - iter->ts);
420
421 if (verbose) {
422 char comm[TASK_COMM_LEN];
423
424 trace_find_cmdline(entry->pid, comm);
425
426 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]"
427 " %ld.%03ldms (+%ld.%03ldms): ", comm,
428 entry->pid, iter->cpu, entry->flags,
429 entry->preempt_count, iter->idx,
430 ns2usecs(iter->ts),
431 abs_usecs / USEC_PER_MSEC,
432 abs_usecs % USEC_PER_MSEC,
433 rel_usecs / USEC_PER_MSEC,
434 rel_usecs % USEC_PER_MSEC);
435 } else {
436 ret = lat_print_generic(s, entry, iter->cpu);
437 if (ret)
438 ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
439 }
440
441 return ret;
442}
443
444static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
445
446static int task_state_char(unsigned long state)
447{
448 int bit = state ? __ffs(state) + 1 : 0;
449
450 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
451}
452
453/**
454 * ftrace_find_event - find a registered event
455 * @type: the type of event to look for
456 *
457 * Returns an event of type @type otherwise NULL
458 */
459struct trace_event *ftrace_find_event(int type)
460{
461 struct trace_event *event;
462 struct hlist_node *n;
463 unsigned key;
464
465 key = type & (EVENT_HASHSIZE - 1);
466
467 hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
468 if (event->type == type)
469 return event;
470 }
471
472 return NULL;
473}
474
475/**
476 * register_ftrace_event - register output for an event type
477 * @event: the event type to register
478 *
479 * Event types are stored in a hash and this hash is used to
480 * find a way to print an event. If the @event->type is set
481 * then it will use that type, otherwise it will assign a
482 * type to use.
483 *
484 * If you assign your own type, please make sure it is added
485 * to the trace_type enum in trace.h, to avoid collisions
486 * with the dynamic types.
487 *
488 * Returns the event type number or zero on error.
489 */
490int register_ftrace_event(struct trace_event *event)
491{
492 unsigned key;
493 int ret = 0;
494
495 mutex_lock(&trace_event_mutex);
496
497 if (!event) {
498 ret = next_event_type++;
499 goto out;
500 }
501
502 if (!event->type)
503 event->type = next_event_type++;
504 else if (event->type > __TRACE_LAST_TYPE) {
505 printk(KERN_WARNING "Need to add type to trace.h\n");
506 WARN_ON(1);
507 }
508
509 if (ftrace_find_event(event->type))
510 goto out;
511
512 if (event->trace == NULL)
513 event->trace = trace_nop_print;
514 if (event->raw == NULL)
515 event->raw = trace_nop_print;
516 if (event->hex == NULL)
517 event->hex = trace_nop_print;
518 if (event->binary == NULL)
519 event->binary = trace_nop_print;
520
521 key = event->type & (EVENT_HASHSIZE - 1);
522
523 hlist_add_head_rcu(&event->node, &event_hash[key]);
524
525 ret = event->type;
526 out:
527 mutex_unlock(&trace_event_mutex);
528
529 return ret;
530}
531
532/**
533 * unregister_ftrace_event - remove a no longer used event
534 * @event: the event to remove
535 */
536int unregister_ftrace_event(struct trace_event *event)
537{
538 mutex_lock(&trace_event_mutex);
539 hlist_del(&event->node);
540 mutex_unlock(&trace_event_mutex);
541
542 return 0;
543}
544
545/*
546 * Standard events
547 */
548
549enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
550{
551 return TRACE_TYPE_HANDLED;
552}
553
554/* TRACE_FN */
555static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
556{
557 struct ftrace_entry *field;
558 struct trace_seq *s = &iter->seq;
559
560 trace_assign_type(field, iter->ent);
561
562 if (!seq_print_ip_sym(s, field->ip, flags))
563 goto partial;
564
565 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
566 if (!trace_seq_printf(s, " <-"))
567 goto partial;
568 if (!seq_print_ip_sym(s,
569 field->parent_ip,
570 flags))
571 goto partial;
572 }
573 if (!trace_seq_printf(s, "\n"))
574 goto partial;
575
576 return TRACE_TYPE_HANDLED;
577
578 partial:
579 return TRACE_TYPE_PARTIAL_LINE;
580}
581
582static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
583{
584 struct ftrace_entry *field;
585
586 trace_assign_type(field, iter->ent);
587
588 if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
589 field->ip,
590 field->parent_ip))
591 return TRACE_TYPE_PARTIAL_LINE;
592
593 return TRACE_TYPE_HANDLED;
594}
595
596static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
597{
598 struct ftrace_entry *field;
599 struct trace_seq *s = &iter->seq;
600
601 trace_assign_type(field, iter->ent);
602
603 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
604 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
605
606 return TRACE_TYPE_HANDLED;
607}
608
609static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
610{
611 struct ftrace_entry *field;
612 struct trace_seq *s = &iter->seq;
613
614 trace_assign_type(field, iter->ent);
615
616 SEQ_PUT_FIELD_RET(s, field->ip);
617 SEQ_PUT_FIELD_RET(s, field->parent_ip);
618
619 return TRACE_TYPE_HANDLED;
620}
621
622static struct trace_event trace_fn_event = {
623 .type = TRACE_FN,
624 .trace = trace_fn_trace,
625 .raw = trace_fn_raw,
626 .hex = trace_fn_hex,
627 .binary = trace_fn_bin,
628};
629
630/* TRACE_CTX an TRACE_WAKE */
631static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
632 char *delim)
633{
634 struct ctx_switch_entry *field;
635 char comm[TASK_COMM_LEN];
636 int S, T;
637
638
639 trace_assign_type(field, iter->ent);
640
641 T = task_state_char(field->next_state);
642 S = task_state_char(field->prev_state);
643 trace_find_cmdline(field->next_pid, comm);
644 if (!trace_seq_printf(&iter->seq,
645 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
646 field->prev_pid,
647 field->prev_prio,
648 S, delim,
649 field->next_cpu,
650 field->next_pid,
651 field->next_prio,
652 T, comm))
653 return TRACE_TYPE_PARTIAL_LINE;
654
655 return TRACE_TYPE_HANDLED;
656}
657
658static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
659{
660 return trace_ctxwake_print(iter, "==>");
661}
662
663static enum print_line_t trace_wake_print(struct trace_iterator *iter,
664 int flags)
665{
666 return trace_ctxwake_print(iter, " +");
667}
668
669static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
670{
671 struct ctx_switch_entry *field;
672 int T;
673
674 trace_assign_type(field, iter->ent);
675
676 if (!S)
677 task_state_char(field->prev_state);
678 T = task_state_char(field->next_state);
679 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
680 field->prev_pid,
681 field->prev_prio,
682 S,
683 field->next_cpu,
684 field->next_pid,
685 field->next_prio,
686 T))
687 return TRACE_TYPE_PARTIAL_LINE;
688
689 return TRACE_TYPE_HANDLED;
690}
691
692static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
693{
694 return trace_ctxwake_raw(iter, 0);
695}
696
697static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
698{
699 return trace_ctxwake_raw(iter, '+');
700}
701
702
703static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
704{
705 struct ctx_switch_entry *field;
706 struct trace_seq *s = &iter->seq;
707 int T;
708
709 trace_assign_type(field, iter->ent);
710
711 if (!S)
712 task_state_char(field->prev_state);
713 T = task_state_char(field->next_state);
714
715 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
716 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
717 SEQ_PUT_HEX_FIELD_RET(s, S);
718 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
719 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
720 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
721 SEQ_PUT_HEX_FIELD_RET(s, T);
722
723 return TRACE_TYPE_HANDLED;
724}
725
726static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
727{
728 return trace_ctxwake_hex(iter, 0);
729}
730
731static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
732{
733 return trace_ctxwake_hex(iter, '+');
734}
735
736static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
737 int flags)
738{
739 struct ctx_switch_entry *field;
740 struct trace_seq *s = &iter->seq;
741
742 trace_assign_type(field, iter->ent);
743
744 SEQ_PUT_FIELD_RET(s, field->prev_pid);
745 SEQ_PUT_FIELD_RET(s, field->prev_prio);
746 SEQ_PUT_FIELD_RET(s, field->prev_state);
747 SEQ_PUT_FIELD_RET(s, field->next_pid);
748 SEQ_PUT_FIELD_RET(s, field->next_prio);
749 SEQ_PUT_FIELD_RET(s, field->next_state);
750
751 return TRACE_TYPE_HANDLED;
752}
753
754static struct trace_event trace_ctx_event = {
755 .type = TRACE_CTX,
756 .trace = trace_ctx_print,
757 .raw = trace_ctx_raw,
758 .hex = trace_ctx_hex,
759 .binary = trace_ctxwake_bin,
760};
761
762static struct trace_event trace_wake_event = {
763 .type = TRACE_WAKE,
764 .trace = trace_wake_print,
765 .raw = trace_wake_raw,
766 .hex = trace_wake_hex,
767 .binary = trace_ctxwake_bin,
768};
769
770/* TRACE_SPECIAL */
771static enum print_line_t trace_special_print(struct trace_iterator *iter,
772 int flags)
773{
774 struct special_entry *field;
775
776 trace_assign_type(field, iter->ent);
777
778 if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
779 field->arg1,
780 field->arg2,
781 field->arg3))
782 return TRACE_TYPE_PARTIAL_LINE;
783
784 return TRACE_TYPE_HANDLED;
785}
786
787static enum print_line_t trace_special_hex(struct trace_iterator *iter,
788 int flags)
789{
790 struct special_entry *field;
791 struct trace_seq *s = &iter->seq;
792
793 trace_assign_type(field, iter->ent);
794
795 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
796 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
797 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
798
799 return TRACE_TYPE_HANDLED;
800}
801
802static enum print_line_t trace_special_bin(struct trace_iterator *iter,
803 int flags)
804{
805 struct special_entry *field;
806 struct trace_seq *s = &iter->seq;
807
808 trace_assign_type(field, iter->ent);
809
810 SEQ_PUT_FIELD_RET(s, field->arg1);
811 SEQ_PUT_FIELD_RET(s, field->arg2);
812 SEQ_PUT_FIELD_RET(s, field->arg3);
813
814 return TRACE_TYPE_HANDLED;
815}
816
817static struct trace_event trace_special_event = {
818 .type = TRACE_SPECIAL,
819 .trace = trace_special_print,
820 .raw = trace_special_print,
821 .hex = trace_special_hex,
822 .binary = trace_special_bin,
823};
824
825/* TRACE_STACK */
826
827static enum print_line_t trace_stack_print(struct trace_iterator *iter,
828 int flags)
829{
830 struct stack_entry *field;
831 struct trace_seq *s = &iter->seq;
832 int i;
833
834 trace_assign_type(field, iter->ent);
835
836 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
837 if (i) {
838 if (!trace_seq_puts(s, " <= "))
839 goto partial;
840
841 if (!seq_print_ip_sym(s, field->caller[i], flags))
842 goto partial;
843 }
844 if (!trace_seq_puts(s, "\n"))
845 goto partial;
846 }
847
848 return TRACE_TYPE_HANDLED;
849
850 partial:
851 return TRACE_TYPE_PARTIAL_LINE;
852}
853
854static struct trace_event trace_stack_event = {
855 .type = TRACE_STACK,
856 .trace = trace_stack_print,
857 .raw = trace_special_print,
858 .hex = trace_special_hex,
859 .binary = trace_special_bin,
860};
861
862/* TRACE_USER_STACK */
863static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
864 int flags)
865{
866 struct userstack_entry *field;
867 struct trace_seq *s = &iter->seq;
868
869 trace_assign_type(field, iter->ent);
870
871 if (!seq_print_userip_objs(field, s, flags))
872 goto partial;
873
874 if (!trace_seq_putc(s, '\n'))
875 goto partial;
876
877 return TRACE_TYPE_HANDLED;
878
879 partial:
880 return TRACE_TYPE_PARTIAL_LINE;
881}
882
883static struct trace_event trace_user_stack_event = {
884 .type = TRACE_USER_STACK,
885 .trace = trace_user_stack_print,
886 .raw = trace_special_print,
887 .hex = trace_special_hex,
888 .binary = trace_special_bin,
889};
890
891/* TRACE_BPRINT */
892static enum print_line_t
893trace_bprint_print(struct trace_iterator *iter, int flags)
894{
895 struct trace_entry *entry = iter->ent;
896 struct trace_seq *s = &iter->seq;
897 struct bprint_entry *field;
898
899 trace_assign_type(field, entry);
900
901 if (!seq_print_ip_sym(s, field->ip, flags))
902 goto partial;
903
904 if (!trace_seq_puts(s, ": "))
905 goto partial;
906
907 if (!trace_seq_bprintf(s, field->fmt, field->buf))
908 goto partial;
909
910 return TRACE_TYPE_HANDLED;
911
912 partial:
913 return TRACE_TYPE_PARTIAL_LINE;
914}
915
916
917static enum print_line_t
918trace_bprint_raw(struct trace_iterator *iter, int flags)
919{
920 struct bprint_entry *field;
921 struct trace_seq *s = &iter->seq;
922
923 trace_assign_type(field, iter->ent);
924
925 if (!trace_seq_printf(s, ": %lx : ", field->ip))
926 goto partial;
927
928 if (!trace_seq_bprintf(s, field->fmt, field->buf))
929 goto partial;
930
931 return TRACE_TYPE_HANDLED;
932
933 partial:
934 return TRACE_TYPE_PARTIAL_LINE;
935}
936
937
938static struct trace_event trace_bprint_event = {
939 .type = TRACE_BPRINT,
940 .trace = trace_bprint_print,
941 .raw = trace_bprint_raw,
942};
943
944/* TRACE_PRINT */
945static enum print_line_t trace_print_print(struct trace_iterator *iter,
946 int flags)
947{
948 struct print_entry *field;
949 struct trace_seq *s = &iter->seq;
950
951 trace_assign_type(field, iter->ent);
952
953 if (!seq_print_ip_sym(s, field->ip, flags))
954 goto partial;
955
956 if (!trace_seq_printf(s, ": %s", field->buf))
957 goto partial;
958
959 return TRACE_TYPE_HANDLED;
960
961 partial:
962 return TRACE_TYPE_PARTIAL_LINE;
963}
964
965static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
966{
967 struct print_entry *field;
968
969 trace_assign_type(field, iter->ent);
970
971 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
972 goto partial;
973
974 return TRACE_TYPE_HANDLED;
975
976 partial:
977 return TRACE_TYPE_PARTIAL_LINE;
978}
979
980static struct trace_event trace_print_event = {
981 .type = TRACE_PRINT,
982 .trace = trace_print_print,
983 .raw = trace_print_raw,
984};
985
986
987static struct trace_event *events[] __initdata = {
988 &trace_fn_event,
989 &trace_ctx_event,
990 &trace_wake_event,
991 &trace_special_event,
992 &trace_stack_event,
993 &trace_user_stack_event,
994 &trace_bprint_event,
995 &trace_print_event,
996 NULL
997};
998
999__init static int init_events(void)
1000{
1001 struct trace_event *event;
1002 int i, ret;
1003
1004 for (i = 0; events[i]; i++) {
1005 event = events[i];
1006
1007 ret = register_ftrace_event(event);
1008 if (!ret) {
1009 printk(KERN_WARNING "event %d failed to register\n",
1010 event->type);
1011 WARN_ON_ONCE(1);
1012 }
1013 }
1014
1015 return 0;
1016}
1017device_initcall(init_events);
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
new file mode 100644
index 000000000000..e0bde39c2dd9
--- /dev/null
+++ b/kernel/trace/trace_output.h
@@ -0,0 +1,71 @@
1#ifndef __TRACE_EVENTS_H
2#define __TRACE_EVENTS_H
3
4#include "trace.h"
5
6typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
7 int flags);
8
9struct trace_event {
10 struct hlist_node node;
11 int type;
12 trace_print_func trace;
13 trace_print_func raw;
14 trace_print_func hex;
15 trace_print_func binary;
16};
17
18extern enum print_line_t
19trace_print_bprintk_msg_only(struct trace_iterator *iter);
20extern enum print_line_t
21trace_print_printk_msg_only(struct trace_iterator *iter);
22
23extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
24 __attribute__ ((format (printf, 2, 3)));
25extern int
26trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
27extern int
28seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
29 unsigned long sym_flags);
30extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
31 size_t cnt);
32extern int trace_seq_puts(struct trace_seq *s, const char *str);
33extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
34extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
35extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
36 size_t len);
37extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
38extern int trace_seq_path(struct trace_seq *s, struct path *path);
39extern int seq_print_userip_objs(const struct userstack_entry *entry,
40 struct trace_seq *s, unsigned long sym_flags);
41extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
42 unsigned long ip, unsigned long sym_flags);
43
44extern int trace_print_context(struct trace_iterator *iter);
45extern int trace_print_lat_context(struct trace_iterator *iter);
46
47extern struct trace_event *ftrace_find_event(int type);
48extern int register_ftrace_event(struct trace_event *event);
49extern int unregister_ftrace_event(struct trace_event *event);
50
51extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
52 int flags);
53
54#define MAX_MEMHEX_BYTES 8
55#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
56
57#define SEQ_PUT_FIELD_RET(s, x) \
58do { \
59 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
60 return TRACE_TYPE_PARTIAL_LINE; \
61} while (0)
62
63#define SEQ_PUT_HEX_FIELD_RET(s, x) \
64do { \
65 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
66 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
67 return TRACE_TYPE_PARTIAL_LINE; \
68} while (0)
69
70#endif
71
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index 7bda248daf55..bae791ebcc51 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -11,15 +11,113 @@
11 11
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/ftrace.h> 14#include <trace/power.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/module.h> 16#include <linux/module.h>
17 17
18#include "trace.h" 18#include "trace.h"
19#include "trace_output.h"
19 20
20static struct trace_array *power_trace; 21static struct trace_array *power_trace;
21static int __read_mostly trace_power_enabled; 22static int __read_mostly trace_power_enabled;
22 23
24static void probe_power_start(struct power_trace *it, unsigned int type,
25 unsigned int level)
26{
27 if (!trace_power_enabled)
28 return;
29
30 memset(it, 0, sizeof(struct power_trace));
31 it->state = level;
32 it->type = type;
33 it->stamp = ktime_get();
34}
35
36
37static void probe_power_end(struct power_trace *it)
38{
39 struct ring_buffer_event *event;
40 struct trace_power *entry;
41 struct trace_array_cpu *data;
42 struct trace_array *tr = power_trace;
43
44 if (!trace_power_enabled)
45 return;
46
47 preempt_disable();
48 it->end = ktime_get();
49 data = tr->data[smp_processor_id()];
50
51 event = trace_buffer_lock_reserve(tr, TRACE_POWER,
52 sizeof(*entry), 0, 0);
53 if (!event)
54 goto out;
55 entry = ring_buffer_event_data(event);
56 entry->state_data = *it;
57 trace_buffer_unlock_commit(tr, event, 0, 0);
58 out:
59 preempt_enable();
60}
61
62static void probe_power_mark(struct power_trace *it, unsigned int type,
63 unsigned int level)
64{
65 struct ring_buffer_event *event;
66 struct trace_power *entry;
67 struct trace_array_cpu *data;
68 struct trace_array *tr = power_trace;
69
70 if (!trace_power_enabled)
71 return;
72
73 memset(it, 0, sizeof(struct power_trace));
74 it->state = level;
75 it->type = type;
76 it->stamp = ktime_get();
77 preempt_disable();
78 it->end = it->stamp;
79 data = tr->data[smp_processor_id()];
80
81 event = trace_buffer_lock_reserve(tr, TRACE_POWER,
82 sizeof(*entry), 0, 0);
83 if (!event)
84 goto out;
85 entry = ring_buffer_event_data(event);
86 entry->state_data = *it;
87 trace_buffer_unlock_commit(tr, event, 0, 0);
88 out:
89 preempt_enable();
90}
91
92static int tracing_power_register(void)
93{
94 int ret;
95
96 ret = register_trace_power_start(probe_power_start);
97 if (ret) {
98 pr_info("power trace: Couldn't activate tracepoint"
99 " probe to trace_power_start\n");
100 return ret;
101 }
102 ret = register_trace_power_end(probe_power_end);
103 if (ret) {
104 pr_info("power trace: Couldn't activate tracepoint"
105 " probe to trace_power_end\n");
106 goto fail_start;
107 }
108 ret = register_trace_power_mark(probe_power_mark);
109 if (ret) {
110 pr_info("power trace: Couldn't activate tracepoint"
111 " probe to trace_power_mark\n");
112 goto fail_end;
113 }
114 return ret;
115fail_end:
116 unregister_trace_power_end(probe_power_end);
117fail_start:
118 unregister_trace_power_start(probe_power_start);
119 return ret;
120}
23 121
24static void start_power_trace(struct trace_array *tr) 122static void start_power_trace(struct trace_array *tr)
25{ 123{
@@ -31,6 +129,14 @@ static void stop_power_trace(struct trace_array *tr)
31 trace_power_enabled = 0; 129 trace_power_enabled = 0;
32} 130}
33 131
132static void power_trace_reset(struct trace_array *tr)
133{
134 trace_power_enabled = 0;
135 unregister_trace_power_start(probe_power_start);
136 unregister_trace_power_end(probe_power_end);
137 unregister_trace_power_mark(probe_power_mark);
138}
139
34 140
35static int power_trace_init(struct trace_array *tr) 141static int power_trace_init(struct trace_array *tr)
36{ 142{
@@ -38,6 +144,7 @@ static int power_trace_init(struct trace_array *tr)
38 power_trace = tr; 144 power_trace = tr;
39 145
40 trace_power_enabled = 1; 146 trace_power_enabled = 1;
147 tracing_power_register();
41 148
42 for_each_cpu(cpu, cpu_possible_mask) 149 for_each_cpu(cpu, cpu_possible_mask)
43 tracing_reset(tr, cpu); 150 tracing_reset(tr, cpu);
@@ -85,7 +192,7 @@ static struct tracer power_tracer __read_mostly =
85 .init = power_trace_init, 192 .init = power_trace_init,
86 .start = start_power_trace, 193 .start = start_power_trace,
87 .stop = stop_power_trace, 194 .stop = stop_power_trace,
88 .reset = stop_power_trace, 195 .reset = power_trace_reset,
89 .print_line = power_print_line, 196 .print_line = power_print_line,
90}; 197};
91 198
@@ -94,86 +201,3 @@ static int init_power_trace(void)
94 return register_tracer(&power_tracer); 201 return register_tracer(&power_tracer);
95} 202}
96device_initcall(init_power_trace); 203device_initcall(init_power_trace);
97
98void trace_power_start(struct power_trace *it, unsigned int type,
99 unsigned int level)
100{
101 if (!trace_power_enabled)
102 return;
103
104 memset(it, 0, sizeof(struct power_trace));
105 it->state = level;
106 it->type = type;
107 it->stamp = ktime_get();
108}
109EXPORT_SYMBOL_GPL(trace_power_start);
110
111
112void trace_power_end(struct power_trace *it)
113{
114 struct ring_buffer_event *event;
115 struct trace_power *entry;
116 struct trace_array_cpu *data;
117 unsigned long irq_flags;
118 struct trace_array *tr = power_trace;
119
120 if (!trace_power_enabled)
121 return;
122
123 preempt_disable();
124 it->end = ktime_get();
125 data = tr->data[smp_processor_id()];
126
127 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
128 &irq_flags);
129 if (!event)
130 goto out;
131 entry = ring_buffer_event_data(event);
132 tracing_generic_entry_update(&entry->ent, 0, 0);
133 entry->ent.type = TRACE_POWER;
134 entry->state_data = *it;
135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
136
137 trace_wake_up();
138
139 out:
140 preempt_enable();
141}
142EXPORT_SYMBOL_GPL(trace_power_end);
143
144void trace_power_mark(struct power_trace *it, unsigned int type,
145 unsigned int level)
146{
147 struct ring_buffer_event *event;
148 struct trace_power *entry;
149 struct trace_array_cpu *data;
150 unsigned long irq_flags;
151 struct trace_array *tr = power_trace;
152
153 if (!trace_power_enabled)
154 return;
155
156 memset(it, 0, sizeof(struct power_trace));
157 it->state = level;
158 it->type = type;
159 it->stamp = ktime_get();
160 preempt_disable();
161 it->end = it->stamp;
162 data = tr->data[smp_processor_id()];
163
164 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
165 &irq_flags);
166 if (!event)
167 goto out;
168 entry = ring_buffer_event_data(event);
169 tracing_generic_entry_update(&entry->ent, 0, 0);
170 entry->ent.type = TRACE_POWER;
171 entry->state_data = *it;
172 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
173
174 trace_wake_up();
175
176 out:
177 preempt_enable();
178}
179EXPORT_SYMBOL_GPL(trace_power_mark);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
new file mode 100644
index 000000000000..eb81556107fe
--- /dev/null
+++ b/kernel/trace/trace_printk.c
@@ -0,0 +1,270 @@
1/*
2 * trace binary printk
3 *
4 * Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com>
5 *
6 */
7#include <linux/seq_file.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/kernel.h>
11#include <linux/ftrace.h>
12#include <linux/string.h>
13#include <linux/module.h>
14#include <linux/marker.h>
15#include <linux/mutex.h>
16#include <linux/ctype.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/fs.h>
20
21#include "trace.h"
22
23#ifdef CONFIG_MODULES
24
25/*
26 * modules trace_printk()'s formats are autosaved in struct trace_bprintk_fmt
27 * which are queued on trace_bprintk_fmt_list.
28 */
29static LIST_HEAD(trace_bprintk_fmt_list);
30
31/* serialize accesses to trace_bprintk_fmt_list */
32static DEFINE_MUTEX(btrace_mutex);
33
34struct trace_bprintk_fmt {
35 struct list_head list;
36 char fmt[0];
37};
38
39static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
40{
41 struct trace_bprintk_fmt *pos;
42 list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
43 if (!strcmp(pos->fmt, fmt))
44 return pos;
45 }
46 return NULL;
47}
48
49static
50void hold_module_trace_bprintk_format(const char **start, const char **end)
51{
52 const char **iter;
53
54 mutex_lock(&btrace_mutex);
55 for (iter = start; iter < end; iter++) {
56 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
57 if (tb_fmt) {
58 *iter = tb_fmt->fmt;
59 continue;
60 }
61
62 tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt)
63 + strlen(*iter) + 1, GFP_KERNEL);
64 if (tb_fmt) {
65 list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
66 strcpy(tb_fmt->fmt, *iter);
67 *iter = tb_fmt->fmt;
68 } else
69 *iter = NULL;
70 }
71 mutex_unlock(&btrace_mutex);
72}
73
74static int module_trace_bprintk_format_notify(struct notifier_block *self,
75 unsigned long val, void *data)
76{
77 struct module *mod = data;
78 if (mod->num_trace_bprintk_fmt) {
79 const char **start = mod->trace_bprintk_fmt_start;
80 const char **end = start + mod->num_trace_bprintk_fmt;
81
82 if (val == MODULE_STATE_COMING)
83 hold_module_trace_bprintk_format(start, end);
84 }
85 return 0;
86}
87
88#else /* !CONFIG_MODULES */
89__init static int
90module_trace_bprintk_format_notify(struct notifier_block *self,
91 unsigned long val, void *data)
92{
93 return 0;
94}
95#endif /* CONFIG_MODULES */
96
97
98__initdata_or_module static
99struct notifier_block module_trace_bprintk_format_nb = {
100 .notifier_call = module_trace_bprintk_format_notify,
101};
102
103int __trace_bprintk(unsigned long ip, const char *fmt, ...)
104 {
105 int ret;
106 va_list ap;
107
108 if (unlikely(!fmt))
109 return 0;
110
111 if (!(trace_flags & TRACE_ITER_PRINTK))
112 return 0;
113
114 va_start(ap, fmt);
115 ret = trace_vbprintk(ip, fmt, ap);
116 va_end(ap);
117 return ret;
118}
119EXPORT_SYMBOL_GPL(__trace_bprintk);
120
121int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
122 {
123 if (unlikely(!fmt))
124 return 0;
125
126 if (!(trace_flags & TRACE_ITER_PRINTK))
127 return 0;
128
129 return trace_vbprintk(ip, fmt, ap);
130}
131EXPORT_SYMBOL_GPL(__ftrace_vbprintk);
132
133int __trace_printk(unsigned long ip, const char *fmt, ...)
134{
135 int ret;
136 va_list ap;
137
138 if (!(trace_flags & TRACE_ITER_PRINTK))
139 return 0;
140
141 va_start(ap, fmt);
142 ret = trace_vprintk(ip, fmt, ap);
143 va_end(ap);
144 return ret;
145}
146EXPORT_SYMBOL_GPL(__trace_printk);
147
148int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
149{
150 if (!(trace_flags & TRACE_ITER_PRINTK))
151 return 0;
152
153 return trace_vprintk(ip, fmt, ap);
154}
155EXPORT_SYMBOL_GPL(__ftrace_vprintk);
156
157static void *
158t_next(struct seq_file *m, void *v, loff_t *pos)
159{
160 const char **fmt = m->private;
161 const char **next = fmt;
162
163 (*pos)++;
164
165 if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
166 return NULL;
167
168 next = fmt;
169 m->private = ++next;
170
171 return fmt;
172}
173
174static void *t_start(struct seq_file *m, loff_t *pos)
175{
176 return t_next(m, NULL, pos);
177}
178
179static int t_show(struct seq_file *m, void *v)
180{
181 const char **fmt = v;
182 const char *str = *fmt;
183 int i;
184
185 seq_printf(m, "0x%lx : \"", (unsigned long)fmt);
186
187 /*
188 * Tabs and new lines need to be converted.
189 */
190 for (i = 0; str[i]; i++) {
191 switch (str[i]) {
192 case '\n':
193 seq_puts(m, "\\n");
194 break;
195 case '\t':
196 seq_puts(m, "\\t");
197 break;
198 case '\\':
199 seq_puts(m, "\\");
200 break;
201 case '"':
202 seq_puts(m, "\\\"");
203 break;
204 default:
205 seq_putc(m, str[i]);
206 }
207 }
208 seq_puts(m, "\"\n");
209
210 return 0;
211}
212
213static void t_stop(struct seq_file *m, void *p)
214{
215}
216
217static const struct seq_operations show_format_seq_ops = {
218 .start = t_start,
219 .next = t_next,
220 .show = t_show,
221 .stop = t_stop,
222};
223
224static int
225ftrace_formats_open(struct inode *inode, struct file *file)
226{
227 int ret;
228
229 ret = seq_open(file, &show_format_seq_ops);
230 if (!ret) {
231 struct seq_file *m = file->private_data;
232
233 m->private = __start___trace_bprintk_fmt;
234 }
235 return ret;
236}
237
238static const struct file_operations ftrace_formats_fops = {
239 .open = ftrace_formats_open,
240 .read = seq_read,
241 .llseek = seq_lseek,
242 .release = seq_release,
243};
244
245static __init int init_trace_printk_function_export(void)
246{
247 struct dentry *d_tracer;
248 struct dentry *entry;
249
250 d_tracer = tracing_init_dentry();
251 if (!d_tracer)
252 return 0;
253
254 entry = debugfs_create_file("printk_formats", 0444, d_tracer,
255 NULL, &ftrace_formats_fops);
256 if (!entry)
257 pr_warning("Could not create debugfs "
258 "'printk_formats' entry\n");
259
260 return 0;
261}
262
263fs_initcall(init_trace_printk_function_export);
264
265static __init int init_trace_printk(void)
266{
267 return register_module_notifier(&module_trace_bprintk_format_nb);
268}
269
270early_initcall(init_trace_printk);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index df175cb4564f..de35f200abd3 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -18,6 +18,7 @@ static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled; 18static int __read_mostly tracer_enabled;
19static int sched_ref; 19static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex); 20static DEFINE_MUTEX(sched_register_mutex);
21static int sched_stopped;
21 22
22static void 23static void
23probe_sched_switch(struct rq *__rq, struct task_struct *prev, 24probe_sched_switch(struct rq *__rq, struct task_struct *prev,
@@ -28,7 +29,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
28 int cpu; 29 int cpu;
29 int pc; 30 int pc;
30 31
31 if (!sched_ref) 32 if (!sched_ref || sched_stopped)
32 return; 33 return;
33 34
34 tracing_record_cmdline(prev); 35 tracing_record_cmdline(prev);
@@ -43,7 +44,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
43 data = ctx_trace->data[cpu]; 44 data = ctx_trace->data[cpu];
44 45
45 if (likely(!atomic_read(&data->disabled))) 46 if (likely(!atomic_read(&data->disabled)))
46 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); 47 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
47 48
48 local_irq_restore(flags); 49 local_irq_restore(flags);
49} 50}
@@ -66,7 +67,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
66 data = ctx_trace->data[cpu]; 67 data = ctx_trace->data[cpu];
67 68
68 if (likely(!atomic_read(&data->disabled))) 69 if (likely(!atomic_read(&data->disabled)))
69 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, 70 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
70 flags, pc); 71 flags, pc);
71 72
72 local_irq_restore(flags); 73 local_irq_restore(flags);
@@ -93,7 +94,7 @@ static int tracing_sched_register(void)
93 ret = register_trace_sched_switch(probe_sched_switch); 94 ret = register_trace_sched_switch(probe_sched_switch);
94 if (ret) { 95 if (ret) {
95 pr_info("sched trace: Couldn't activate tracepoint" 96 pr_info("sched trace: Couldn't activate tracepoint"
96 " probe to kernel_sched_schedule\n"); 97 " probe to kernel_sched_switch\n");
97 goto fail_deprobe_wake_new; 98 goto fail_deprobe_wake_new;
98 } 99 }
99 100
@@ -185,12 +186,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
185 ctx_trace = tr; 186 ctx_trace = tr;
186} 187}
187 188
188static void start_sched_trace(struct trace_array *tr)
189{
190 tracing_reset_online_cpus(tr);
191 tracing_start_sched_switch_record();
192}
193
194static void stop_sched_trace(struct trace_array *tr) 189static void stop_sched_trace(struct trace_array *tr)
195{ 190{
196 tracing_stop_sched_switch_record(); 191 tracing_stop_sched_switch_record();
@@ -199,7 +194,8 @@ static void stop_sched_trace(struct trace_array *tr)
199static int sched_switch_trace_init(struct trace_array *tr) 194static int sched_switch_trace_init(struct trace_array *tr)
200{ 195{
201 ctx_trace = tr; 196 ctx_trace = tr;
202 start_sched_trace(tr); 197 tracing_reset_online_cpus(tr);
198 tracing_start_sched_switch_record();
203 return 0; 199 return 0;
204} 200}
205 201
@@ -211,13 +207,12 @@ static void sched_switch_trace_reset(struct trace_array *tr)
211 207
212static void sched_switch_trace_start(struct trace_array *tr) 208static void sched_switch_trace_start(struct trace_array *tr)
213{ 209{
214 tracing_reset_online_cpus(tr); 210 sched_stopped = 0;
215 tracing_start_sched_switch();
216} 211}
217 212
218static void sched_switch_trace_stop(struct trace_array *tr) 213static void sched_switch_trace_stop(struct trace_array *tr)
219{ 214{
220 tracing_stop_sched_switch(); 215 sched_stopped = 1;
221} 216}
222 217
223static struct tracer sched_switch_trace __read_mostly = 218static struct tracer sched_switch_trace __read_mostly =
@@ -227,6 +222,7 @@ static struct tracer sched_switch_trace __read_mostly =
227 .reset = sched_switch_trace_reset, 222 .reset = sched_switch_trace_reset,
228 .start = sched_switch_trace_start, 223 .start = sched_switch_trace_start,
229 .stop = sched_switch_trace_stop, 224 .stop = sched_switch_trace_stop,
225 .wait_pipe = poll_wait_pipe,
230#ifdef CONFIG_FTRACE_SELFTEST 226#ifdef CONFIG_FTRACE_SELFTEST
231 .selftest = trace_selftest_startup_sched_switch, 227 .selftest = trace_selftest_startup_sched_switch,
232#endif 228#endif
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 42ae1e77b6b3..3c5ad6b2ec84 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -25,12 +25,15 @@ static int __read_mostly tracer_enabled;
25static struct task_struct *wakeup_task; 25static struct task_struct *wakeup_task;
26static int wakeup_cpu; 26static int wakeup_cpu;
27static unsigned wakeup_prio = -1; 27static unsigned wakeup_prio = -1;
28static int wakeup_rt;
28 29
29static raw_spinlock_t wakeup_lock = 30static raw_spinlock_t wakeup_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
31 32
32static void __wakeup_reset(struct trace_array *tr); 33static void __wakeup_reset(struct trace_array *tr);
33 34
35static int save_lat_flag;
36
34#ifdef CONFIG_FUNCTION_TRACER 37#ifdef CONFIG_FUNCTION_TRACER
35/* 38/*
36 * irqsoff uses its own tracer function to keep the overhead down: 39 * irqsoff uses its own tracer function to keep the overhead down:
@@ -71,7 +74,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
71 if (task_cpu(wakeup_task) != cpu) 74 if (task_cpu(wakeup_task) != cpu)
72 goto unlock; 75 goto unlock;
73 76
74 trace_function(tr, data, ip, parent_ip, flags, pc); 77 trace_function(tr, ip, parent_ip, flags, pc);
75 78
76 unlock: 79 unlock:
77 __raw_spin_unlock(&wakeup_lock); 80 __raw_spin_unlock(&wakeup_lock);
@@ -151,7 +154,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
151 if (unlikely(!tracer_enabled || next != wakeup_task)) 154 if (unlikely(!tracer_enabled || next != wakeup_task))
152 goto out_unlock; 155 goto out_unlock;
153 156
154 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 157 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
158 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
155 159
156 /* 160 /*
157 * usecs conversion is slow so we try to delay the conversion 161 * usecs conversion is slow so we try to delay the conversion
@@ -182,13 +186,10 @@ out:
182 186
183static void __wakeup_reset(struct trace_array *tr) 187static void __wakeup_reset(struct trace_array *tr)
184{ 188{
185 struct trace_array_cpu *data;
186 int cpu; 189 int cpu;
187 190
188 for_each_possible_cpu(cpu) { 191 for_each_possible_cpu(cpu)
189 data = tr->data[cpu];
190 tracing_reset(tr, cpu); 192 tracing_reset(tr, cpu);
191 }
192 193
193 wakeup_cpu = -1; 194 wakeup_cpu = -1;
194 wakeup_prio = -1; 195 wakeup_prio = -1;
@@ -213,6 +214,7 @@ static void wakeup_reset(struct trace_array *tr)
213static void 214static void
214probe_wakeup(struct rq *rq, struct task_struct *p, int success) 215probe_wakeup(struct rq *rq, struct task_struct *p, int success)
215{ 216{
217 struct trace_array_cpu *data;
216 int cpu = smp_processor_id(); 218 int cpu = smp_processor_id();
217 unsigned long flags; 219 unsigned long flags;
218 long disabled; 220 long disabled;
@@ -224,7 +226,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
224 tracing_record_cmdline(p); 226 tracing_record_cmdline(p);
225 tracing_record_cmdline(current); 227 tracing_record_cmdline(current);
226 228
227 if (likely(!rt_task(p)) || 229 if ((wakeup_rt && !rt_task(p)) ||
228 p->prio >= wakeup_prio || 230 p->prio >= wakeup_prio ||
229 p->prio >= current->prio) 231 p->prio >= current->prio)
230 return; 232 return;
@@ -252,9 +254,10 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
252 254
253 local_save_flags(flags); 255 local_save_flags(flags);
254 256
255 wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); 257 data = wakeup_trace->data[wakeup_cpu];
256 trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], 258 data->preempt_timestamp = ftrace_now(cpu);
257 CALLER_ADDR1, CALLER_ADDR2, flags, pc); 259 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
260 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
258 261
259out_locked: 262out_locked:
260 __raw_spin_unlock(&wakeup_lock); 263 __raw_spin_unlock(&wakeup_lock);
@@ -262,12 +265,6 @@ out:
262 atomic_dec(&wakeup_trace->data[cpu]->disabled); 265 atomic_dec(&wakeup_trace->data[cpu]->disabled);
263} 266}
264 267
265/*
266 * save_tracer_enabled is used to save the state of the tracer_enabled
267 * variable when we disable it when we open a trace output file.
268 */
269static int save_tracer_enabled;
270
271static void start_wakeup_tracer(struct trace_array *tr) 268static void start_wakeup_tracer(struct trace_array *tr)
272{ 269{
273 int ret; 270 int ret;
@@ -289,7 +286,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
289 ret = register_trace_sched_switch(probe_wakeup_sched_switch); 286 ret = register_trace_sched_switch(probe_wakeup_sched_switch);
290 if (ret) { 287 if (ret) {
291 pr_info("sched trace: Couldn't activate tracepoint" 288 pr_info("sched trace: Couldn't activate tracepoint"
292 " probe to kernel_sched_schedule\n"); 289 " probe to kernel_sched_switch\n");
293 goto fail_deprobe_wake_new; 290 goto fail_deprobe_wake_new;
294 } 291 }
295 292
@@ -306,13 +303,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
306 303
307 register_ftrace_function(&trace_ops); 304 register_ftrace_function(&trace_ops);
308 305
309 if (tracing_is_enabled()) { 306 if (tracing_is_enabled())
310 tracer_enabled = 1; 307 tracer_enabled = 1;
311 save_tracer_enabled = 1; 308 else
312 } else {
313 tracer_enabled = 0; 309 tracer_enabled = 0;
314 save_tracer_enabled = 0;
315 }
316 310
317 return; 311 return;
318fail_deprobe_wake_new: 312fail_deprobe_wake_new:
@@ -324,54 +318,54 @@ fail_deprobe:
324static void stop_wakeup_tracer(struct trace_array *tr) 318static void stop_wakeup_tracer(struct trace_array *tr)
325{ 319{
326 tracer_enabled = 0; 320 tracer_enabled = 0;
327 save_tracer_enabled = 0;
328 unregister_ftrace_function(&trace_ops); 321 unregister_ftrace_function(&trace_ops);
329 unregister_trace_sched_switch(probe_wakeup_sched_switch); 322 unregister_trace_sched_switch(probe_wakeup_sched_switch);
330 unregister_trace_sched_wakeup_new(probe_wakeup); 323 unregister_trace_sched_wakeup_new(probe_wakeup);
331 unregister_trace_sched_wakeup(probe_wakeup); 324 unregister_trace_sched_wakeup(probe_wakeup);
332} 325}
333 326
334static int wakeup_tracer_init(struct trace_array *tr) 327static int __wakeup_tracer_init(struct trace_array *tr)
335{ 328{
329 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
330 trace_flags |= TRACE_ITER_LATENCY_FMT;
331
336 tracing_max_latency = 0; 332 tracing_max_latency = 0;
337 wakeup_trace = tr; 333 wakeup_trace = tr;
338 start_wakeup_tracer(tr); 334 start_wakeup_tracer(tr);
339 return 0; 335 return 0;
340} 336}
341 337
338static int wakeup_tracer_init(struct trace_array *tr)
339{
340 wakeup_rt = 0;
341 return __wakeup_tracer_init(tr);
342}
343
344static int wakeup_rt_tracer_init(struct trace_array *tr)
345{
346 wakeup_rt = 1;
347 return __wakeup_tracer_init(tr);
348}
349
342static void wakeup_tracer_reset(struct trace_array *tr) 350static void wakeup_tracer_reset(struct trace_array *tr)
343{ 351{
344 stop_wakeup_tracer(tr); 352 stop_wakeup_tracer(tr);
345 /* make sure we put back any tasks we are tracing */ 353 /* make sure we put back any tasks we are tracing */
346 wakeup_reset(tr); 354 wakeup_reset(tr);
355
356 if (!save_lat_flag)
357 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
347} 358}
348 359
349static void wakeup_tracer_start(struct trace_array *tr) 360static void wakeup_tracer_start(struct trace_array *tr)
350{ 361{
351 wakeup_reset(tr); 362 wakeup_reset(tr);
352 tracer_enabled = 1; 363 tracer_enabled = 1;
353 save_tracer_enabled = 1;
354} 364}
355 365
356static void wakeup_tracer_stop(struct trace_array *tr) 366static void wakeup_tracer_stop(struct trace_array *tr)
357{ 367{
358 tracer_enabled = 0; 368 tracer_enabled = 0;
359 save_tracer_enabled = 0;
360}
361
362static void wakeup_tracer_open(struct trace_iterator *iter)
363{
364 /* stop the trace while dumping */
365 tracer_enabled = 0;
366}
367
368static void wakeup_tracer_close(struct trace_iterator *iter)
369{
370 /* forget about any processes we were recording */
371 if (save_tracer_enabled) {
372 wakeup_reset(iter->tr);
373 tracer_enabled = 1;
374 }
375} 369}
376 370
377static struct tracer wakeup_tracer __read_mostly = 371static struct tracer wakeup_tracer __read_mostly =
@@ -381,8 +375,20 @@ static struct tracer wakeup_tracer __read_mostly =
381 .reset = wakeup_tracer_reset, 375 .reset = wakeup_tracer_reset,
382 .start = wakeup_tracer_start, 376 .start = wakeup_tracer_start,
383 .stop = wakeup_tracer_stop, 377 .stop = wakeup_tracer_stop,
384 .open = wakeup_tracer_open, 378 .print_max = 1,
385 .close = wakeup_tracer_close, 379#ifdef CONFIG_FTRACE_SELFTEST
380 .selftest = trace_selftest_startup_wakeup,
381#endif
382};
383
384static struct tracer wakeup_rt_tracer __read_mostly =
385{
386 .name = "wakeup_rt",
387 .init = wakeup_rt_tracer_init,
388 .reset = wakeup_tracer_reset,
389 .start = wakeup_tracer_start,
390 .stop = wakeup_tracer_stop,
391 .wait_pipe = poll_wait_pipe,
386 .print_max = 1, 392 .print_max = 1,
387#ifdef CONFIG_FTRACE_SELFTEST 393#ifdef CONFIG_FTRACE_SELFTEST
388 .selftest = trace_selftest_startup_wakeup, 394 .selftest = trace_selftest_startup_wakeup,
@@ -397,6 +403,10 @@ __init static int init_wakeup_tracer(void)
397 if (ret) 403 if (ret)
398 return ret; 404 return ret;
399 405
406 ret = register_tracer(&wakeup_rt_tracer);
407 if (ret)
408 return ret;
409
400 return 0; 410 return 0;
401} 411}
402device_initcall(init_wakeup_tracer); 412device_initcall(init_wakeup_tracer);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index bc8e80a86bca..08f4eb2763d1 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1,5 +1,6 @@
1/* Include in trace.c */ 1/* Include in trace.c */
2 2
3#include <linux/stringify.h>
3#include <linux/kthread.h> 4#include <linux/kthread.h>
4#include <linux/delay.h> 5#include <linux/delay.h>
5 6
@@ -9,11 +10,12 @@ static inline int trace_valid_entry(struct trace_entry *entry)
9 case TRACE_FN: 10 case TRACE_FN:
10 case TRACE_CTX: 11 case TRACE_CTX:
11 case TRACE_WAKE: 12 case TRACE_WAKE:
12 case TRACE_CONT:
13 case TRACE_STACK: 13 case TRACE_STACK:
14 case TRACE_PRINT: 14 case TRACE_PRINT:
15 case TRACE_SPECIAL: 15 case TRACE_SPECIAL:
16 case TRACE_BRANCH: 16 case TRACE_BRANCH:
17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
17 return 1; 19 return 1;
18 } 20 }
19 return 0; 21 return 0;
@@ -99,9 +101,6 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
99 101
100#ifdef CONFIG_DYNAMIC_FTRACE 102#ifdef CONFIG_DYNAMIC_FTRACE
101 103
102#define __STR(x) #x
103#define STR(x) __STR(x)
104
105/* Test dynamic code modification and ftrace filters */ 104/* Test dynamic code modification and ftrace filters */
106int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 105int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
107 struct trace_array *tr, 106 struct trace_array *tr,
@@ -125,17 +124,17 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
125 func(); 124 func();
126 125
127 /* 126 /*
128 * Some archs *cough*PowerPC*cough* add charachters to the 127 * Some archs *cough*PowerPC*cough* add characters to the
129 * start of the function names. We simply put a '*' to 128 * start of the function names. We simply put a '*' to
130 * accomodate them. 129 * accommodate them.
131 */ 130 */
132 func_name = "*" STR(DYN_FTRACE_TEST_NAME); 131 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
133 132
134 /* filter only on our function */ 133 /* filter only on our function */
135 ftrace_set_filter(func_name, strlen(func_name), 1); 134 ftrace_set_filter(func_name, strlen(func_name), 1);
136 135
137 /* enable tracing */ 136 /* enable tracing */
138 ret = trace->init(tr); 137 ret = tracer_init(trace, tr);
139 if (ret) { 138 if (ret) {
140 warn_failed_init_tracer(trace, ret); 139 warn_failed_init_tracer(trace, ret);
141 goto out; 140 goto out;
@@ -209,7 +208,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
209 ftrace_enabled = 1; 208 ftrace_enabled = 1;
210 tracer_enabled = 1; 209 tracer_enabled = 1;
211 210
212 ret = trace->init(tr); 211 ret = tracer_init(trace, tr);
213 if (ret) { 212 if (ret) {
214 warn_failed_init_tracer(trace, ret); 213 warn_failed_init_tracer(trace, ret);
215 goto out; 214 goto out;
@@ -247,6 +246,90 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
247} 246}
248#endif /* CONFIG_FUNCTION_TRACER */ 247#endif /* CONFIG_FUNCTION_TRACER */
249 248
249
250#ifdef CONFIG_FUNCTION_GRAPH_TRACER
251
252/* Maximum number of functions to trace before diagnosing a hang */
253#define GRAPH_MAX_FUNC_TEST 100000000
254
255static void __ftrace_dump(bool disable_tracing);
256static unsigned int graph_hang_thresh;
257
258/* Wrap the real function entry probe to avoid possible hanging */
259static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
260{
261 /* This is harmlessly racy, we want to approximately detect a hang */
262 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
263 ftrace_graph_stop();
264 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
265 if (ftrace_dump_on_oops)
266 __ftrace_dump(false);
267 return 0;
268 }
269
270 return trace_graph_entry(trace);
271}
272
273/*
274 * Pretty much the same than for the function tracer from which the selftest
275 * has been borrowed.
276 */
277int
278trace_selftest_startup_function_graph(struct tracer *trace,
279 struct trace_array *tr)
280{
281 int ret;
282 unsigned long count;
283
284 /*
285 * Simulate the init() callback but we attach a watchdog callback
286 * to detect and recover from possible hangs
287 */
288 tracing_reset_online_cpus(tr);
289 ret = register_ftrace_graph(&trace_graph_return,
290 &trace_graph_entry_watchdog);
291 if (ret) {
292 warn_failed_init_tracer(trace, ret);
293 goto out;
294 }
295 tracing_start_cmdline_record();
296
297 /* Sleep for a 1/10 of a second */
298 msleep(100);
299
300 /* Have we just recovered from a hang? */
301 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
302 tracing_selftest_disabled = true;
303 ret = -1;
304 goto out;
305 }
306
307 tracing_stop();
308
309 /* check the trace buffer */
310 ret = trace_test_buffer(tr, &count);
311
312 trace->reset(tr);
313 tracing_start();
314
315 if (!ret && !count) {
316 printk(KERN_CONT ".. no entries found ..");
317 ret = -1;
318 goto out;
319 }
320
321 /* Don't test dynamic tracing, the function tracer already did */
322
323out:
324 /* Stop it if we failed */
325 if (ret)
326 ftrace_graph_stop();
327
328 return ret;
329}
330#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
331
332
250#ifdef CONFIG_IRQSOFF_TRACER 333#ifdef CONFIG_IRQSOFF_TRACER
251int 334int
252trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 335trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
@@ -256,7 +339,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
256 int ret; 339 int ret;
257 340
258 /* start the tracing */ 341 /* start the tracing */
259 ret = trace->init(tr); 342 ret = tracer_init(trace, tr);
260 if (ret) { 343 if (ret) {
261 warn_failed_init_tracer(trace, ret); 344 warn_failed_init_tracer(trace, ret);
262 return ret; 345 return ret;
@@ -268,6 +351,14 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
268 local_irq_disable(); 351 local_irq_disable();
269 udelay(100); 352 udelay(100);
270 local_irq_enable(); 353 local_irq_enable();
354
355 /*
356 * Stop the tracer to avoid a warning subsequent
357 * to buffer flipping failure because tracing_stop()
358 * disables the tr and max buffers, making flipping impossible
359 * in case of parallels max irqs off latencies.
360 */
361 trace->stop(tr);
271 /* stop the tracing. */ 362 /* stop the tracing. */
272 tracing_stop(); 363 tracing_stop();
273 /* check both trace buffers */ 364 /* check both trace buffers */
@@ -310,7 +401,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
310 } 401 }
311 402
312 /* start the tracing */ 403 /* start the tracing */
313 ret = trace->init(tr); 404 ret = tracer_init(trace, tr);
314 if (ret) { 405 if (ret) {
315 warn_failed_init_tracer(trace, ret); 406 warn_failed_init_tracer(trace, ret);
316 return ret; 407 return ret;
@@ -322,6 +413,14 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
322 preempt_disable(); 413 preempt_disable();
323 udelay(100); 414 udelay(100);
324 preempt_enable(); 415 preempt_enable();
416
417 /*
418 * Stop the tracer to avoid a warning subsequent
419 * to buffer flipping failure because tracing_stop()
420 * disables the tr and max buffers, making flipping impossible
421 * in case of parallels max preempt off latencies.
422 */
423 trace->stop(tr);
325 /* stop the tracing. */ 424 /* stop the tracing. */
326 tracing_stop(); 425 tracing_stop();
327 /* check both trace buffers */ 426 /* check both trace buffers */
@@ -364,10 +463,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
364 } 463 }
365 464
366 /* start the tracing */ 465 /* start the tracing */
367 ret = trace->init(tr); 466 ret = tracer_init(trace, tr);
368 if (ret) { 467 if (ret) {
369 warn_failed_init_tracer(trace, ret); 468 warn_failed_init_tracer(trace, ret);
370 goto out; 469 goto out_no_start;
371 } 470 }
372 471
373 /* reset the max latency */ 472 /* reset the max latency */
@@ -381,31 +480,35 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
381 /* reverse the order of preempt vs irqs */ 480 /* reverse the order of preempt vs irqs */
382 local_irq_enable(); 481 local_irq_enable();
383 482
483 /*
484 * Stop the tracer to avoid a warning subsequent
485 * to buffer flipping failure because tracing_stop()
486 * disables the tr and max buffers, making flipping impossible
487 * in case of parallels max irqs/preempt off latencies.
488 */
489 trace->stop(tr);
384 /* stop the tracing. */ 490 /* stop the tracing. */
385 tracing_stop(); 491 tracing_stop();
386 /* check both trace buffers */ 492 /* check both trace buffers */
387 ret = trace_test_buffer(tr, NULL); 493 ret = trace_test_buffer(tr, NULL);
388 if (ret) { 494 if (ret)
389 tracing_start();
390 goto out; 495 goto out;
391 }
392 496
393 ret = trace_test_buffer(&max_tr, &count); 497 ret = trace_test_buffer(&max_tr, &count);
394 if (ret) { 498 if (ret)
395 tracing_start();
396 goto out; 499 goto out;
397 }
398 500
399 if (!ret && !count) { 501 if (!ret && !count) {
400 printk(KERN_CONT ".. no entries found .."); 502 printk(KERN_CONT ".. no entries found ..");
401 ret = -1; 503 ret = -1;
402 tracing_start();
403 goto out; 504 goto out;
404 } 505 }
405 506
406 /* do the test by disabling interrupts first this time */ 507 /* do the test by disabling interrupts first this time */
407 tracing_max_latency = 0; 508 tracing_max_latency = 0;
408 tracing_start(); 509 tracing_start();
510 trace->start(tr);
511
409 preempt_disable(); 512 preempt_disable();
410 local_irq_disable(); 513 local_irq_disable();
411 udelay(100); 514 udelay(100);
@@ -413,6 +516,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
413 /* reverse the order of preempt vs irqs */ 516 /* reverse the order of preempt vs irqs */
414 local_irq_enable(); 517 local_irq_enable();
415 518
519 trace->stop(tr);
416 /* stop the tracing. */ 520 /* stop the tracing. */
417 tracing_stop(); 521 tracing_stop();
418 /* check both trace buffers */ 522 /* check both trace buffers */
@@ -428,9 +532,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
428 goto out; 532 goto out;
429 } 533 }
430 534
431 out: 535out:
432 trace->reset(tr);
433 tracing_start(); 536 tracing_start();
537out_no_start:
538 trace->reset(tr);
434 tracing_max_latency = save_max; 539 tracing_max_latency = save_max;
435 540
436 return ret; 541 return ret;
@@ -496,7 +601,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
496 wait_for_completion(&isrt); 601 wait_for_completion(&isrt);
497 602
498 /* start the tracing */ 603 /* start the tracing */
499 ret = trace->init(tr); 604 ret = tracer_init(trace, tr);
500 if (ret) { 605 if (ret) {
501 warn_failed_init_tracer(trace, ret); 606 warn_failed_init_tracer(trace, ret);
502 return ret; 607 return ret;
@@ -557,7 +662,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
557 int ret; 662 int ret;
558 663
559 /* start the tracing */ 664 /* start the tracing */
560 ret = trace->init(tr); 665 ret = tracer_init(trace, tr);
561 if (ret) { 666 if (ret) {
562 warn_failed_init_tracer(trace, ret); 667 warn_failed_init_tracer(trace, ret);
563 return ret; 668 return ret;
@@ -589,10 +694,10 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
589 int ret; 694 int ret;
590 695
591 /* start the tracing */ 696 /* start the tracing */
592 ret = trace->init(tr); 697 ret = tracer_init(trace, tr);
593 if (ret) { 698 if (ret) {
594 warn_failed_init_tracer(trace, ret); 699 warn_failed_init_tracer(trace, ret);
595 return 0; 700 return ret;
596 } 701 }
597 702
598 /* Sleep for a 1/10 of a second */ 703 /* Sleep for a 1/10 of a second */
@@ -604,6 +709,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
604 trace->reset(tr); 709 trace->reset(tr);
605 tracing_start(); 710 tracing_start();
606 711
712 if (!ret && !count) {
713 printk(KERN_CONT ".. no entries found ..");
714 ret = -1;
715 }
716
607 return ret; 717 return ret;
608} 718}
609#endif /* CONFIG_SYSPROF_TRACER */ 719#endif /* CONFIG_SYSPROF_TRACER */
@@ -616,7 +726,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
616 int ret; 726 int ret;
617 727
618 /* start the tracing */ 728 /* start the tracing */
619 ret = trace->init(tr); 729 ret = tracer_init(trace, tr);
620 if (ret) { 730 if (ret) {
621 warn_failed_init_tracer(trace, ret); 731 warn_failed_init_tracer(trace, ret);
622 return ret; 732 return ret;
@@ -631,6 +741,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
631 trace->reset(tr); 741 trace->reset(tr);
632 tracing_start(); 742 tracing_start();
633 743
744 if (!ret && !count) {
745 printk(KERN_CONT ".. no entries found ..");
746 ret = -1;
747 }
748
634 return ret; 749 return ret;
635} 750}
636#endif /* CONFIG_BRANCH_TRACER */ 751#endif /* CONFIG_BRANCH_TRACER */
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index d0871bc0aca5..c750f65f9661 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -245,16 +245,31 @@ static int trace_lookup_stack(struct seq_file *m, long i)
245#endif 245#endif
246} 246}
247 247
248static void print_disabled(struct seq_file *m)
249{
250 seq_puts(m, "#\n"
251 "# Stack tracer disabled\n"
252 "#\n"
253 "# To enable the stack tracer, either add 'stacktrace' to the\n"
254 "# kernel command line\n"
255 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
256 "#\n");
257}
258
248static int t_show(struct seq_file *m, void *v) 259static int t_show(struct seq_file *m, void *v)
249{ 260{
250 long i; 261 long i;
251 int size; 262 int size;
252 263
253 if (v == SEQ_START_TOKEN) { 264 if (v == SEQ_START_TOKEN) {
254 seq_printf(m, " Depth Size Location" 265 seq_printf(m, " Depth Size Location"
255 " (%d entries)\n" 266 " (%d entries)\n"
256 " ----- ---- --------\n", 267 " ----- ---- --------\n",
257 max_stack_trace.nr_entries); 268 max_stack_trace.nr_entries);
269
270 if (!stack_tracer_enabled && !max_stack_size)
271 print_disabled(m);
272
258 return 0; 273 return 0;
259 } 274 }
260 275
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
new file mode 100644
index 000000000000..acdebd771a93
--- /dev/null
+++ b/kernel/trace/trace_stat.c
@@ -0,0 +1,326 @@
1/*
2 * Infrastructure for statistic tracing (histogram output).
3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 *
6 * Based on the code from trace_branch.c which is
7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10
11
12#include <linux/list.h>
13#include <linux/debugfs.h>
14#include "trace_stat.h"
15#include "trace.h"
16
17
18/* List of stat entries from a tracer */
19struct trace_stat_list {
20 struct list_head list;
21 void *stat;
22};
23
24/* A stat session is the stats output in one file */
25struct tracer_stat_session {
26 struct list_head session_list;
27 struct tracer_stat *ts;
28 struct list_head stat_list;
29 struct mutex stat_mutex;
30 struct dentry *file;
31};
32
33/* All of the sessions currently in use. Each stat file embed one session */
34static LIST_HEAD(all_stat_sessions);
35static DEFINE_MUTEX(all_stat_sessions_mutex);
36
37/* The root directory for all stat files */
38static struct dentry *stat_dir;
39
40
41static void reset_stat_session(struct tracer_stat_session *session)
42{
43 struct trace_stat_list *node, *next;
44
45 list_for_each_entry_safe(node, next, &session->stat_list, list)
46 kfree(node);
47
48 INIT_LIST_HEAD(&session->stat_list);
49}
50
51static void destroy_session(struct tracer_stat_session *session)
52{
53 debugfs_remove(session->file);
54 reset_stat_session(session);
55 mutex_destroy(&session->stat_mutex);
56 kfree(session);
57}
58
59/*
60 * For tracers that don't provide a stat_cmp callback.
61 * This one will force an immediate insertion on tail of
62 * the list.
63 */
64static int dummy_cmp(void *p1, void *p2)
65{
66 return 1;
67}
68
69/*
70 * Initialize the stat list at each trace_stat file opening.
71 * All of these copies and sorting are required on all opening
72 * since the stats could have changed between two file sessions.
73 */
74static int stat_seq_init(struct tracer_stat_session *session)
75{
76 struct trace_stat_list *iter_entry, *new_entry;
77 struct tracer_stat *ts = session->ts;
78 void *stat;
79 int ret = 0;
80 int i;
81
82 mutex_lock(&session->stat_mutex);
83 reset_stat_session(session);
84
85 if (!ts->stat_cmp)
86 ts->stat_cmp = dummy_cmp;
87
88 stat = ts->stat_start();
89 if (!stat)
90 goto exit;
91
92 /*
93 * The first entry. Actually this is the second, but the first
94 * one (the stat_list head) is pointless.
95 */
96 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
97 if (!new_entry) {
98 ret = -ENOMEM;
99 goto exit;
100 }
101
102 INIT_LIST_HEAD(&new_entry->list);
103
104 list_add(&new_entry->list, &session->stat_list);
105
106 new_entry->stat = stat;
107
108 /*
109 * Iterate over the tracer stat entries and store them in a sorted
110 * list.
111 */
112 for (i = 1; ; i++) {
113 stat = ts->stat_next(stat, i);
114
115 /* End of insertion */
116 if (!stat)
117 break;
118
119 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
120 if (!new_entry) {
121 ret = -ENOMEM;
122 goto exit_free_list;
123 }
124
125 INIT_LIST_HEAD(&new_entry->list);
126 new_entry->stat = stat;
127
128 list_for_each_entry_reverse(iter_entry, &session->stat_list,
129 list) {
130
131 /* Insertion with a descendent sorting */
132 if (ts->stat_cmp(iter_entry->stat,
133 new_entry->stat) >= 0) {
134
135 list_add(&new_entry->list, &iter_entry->list);
136 break;
137 }
138 }
139
140 /* The current larger value */
141 if (list_empty(&new_entry->list))
142 list_add(&new_entry->list, &session->stat_list);
143 }
144exit:
145 mutex_unlock(&session->stat_mutex);
146 return ret;
147
148exit_free_list:
149 reset_stat_session(session);
150 mutex_unlock(&session->stat_mutex);
151 return ret;
152}
153
154
155static void *stat_seq_start(struct seq_file *s, loff_t *pos)
156{
157 struct tracer_stat_session *session = s->private;
158
159 /* Prevent from tracer switch or stat_list modification */
160 mutex_lock(&session->stat_mutex);
161
162 /* If we are in the beginning of the file, print the headers */
163 if (!*pos && session->ts->stat_headers)
164 return SEQ_START_TOKEN;
165
166 return seq_list_start(&session->stat_list, *pos);
167}
168
169static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
170{
171 struct tracer_stat_session *session = s->private;
172
173 if (p == SEQ_START_TOKEN)
174 return seq_list_start(&session->stat_list, *pos);
175
176 return seq_list_next(p, &session->stat_list, pos);
177}
178
179static void stat_seq_stop(struct seq_file *s, void *p)
180{
181 struct tracer_stat_session *session = s->private;
182 mutex_unlock(&session->stat_mutex);
183}
184
185static int stat_seq_show(struct seq_file *s, void *v)
186{
187 struct tracer_stat_session *session = s->private;
188 struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
189
190 if (v == SEQ_START_TOKEN)
191 return session->ts->stat_headers(s);
192
193 return session->ts->stat_show(s, l->stat);
194}
195
196static const struct seq_operations trace_stat_seq_ops = {
197 .start = stat_seq_start,
198 .next = stat_seq_next,
199 .stop = stat_seq_stop,
200 .show = stat_seq_show
201};
202
203/* The session stat is refilled and resorted at each stat file opening */
204static int tracing_stat_open(struct inode *inode, struct file *file)
205{
206 int ret;
207
208 struct tracer_stat_session *session = inode->i_private;
209
210 ret = seq_open(file, &trace_stat_seq_ops);
211 if (!ret) {
212 struct seq_file *m = file->private_data;
213 m->private = session;
214 ret = stat_seq_init(session);
215 }
216
217 return ret;
218}
219
220/*
221 * Avoid consuming memory with our now useless list.
222 */
223static int tracing_stat_release(struct inode *i, struct file *f)
224{
225 struct tracer_stat_session *session = i->i_private;
226
227 mutex_lock(&session->stat_mutex);
228 reset_stat_session(session);
229 mutex_unlock(&session->stat_mutex);
230
231 return 0;
232}
233
234static const struct file_operations tracing_stat_fops = {
235 .open = tracing_stat_open,
236 .read = seq_read,
237 .llseek = seq_lseek,
238 .release = tracing_stat_release
239};
240
241static int tracing_stat_init(void)
242{
243 struct dentry *d_tracing;
244
245 d_tracing = tracing_init_dentry();
246
247 stat_dir = debugfs_create_dir("trace_stat", d_tracing);
248 if (!stat_dir)
249 pr_warning("Could not create debugfs "
250 "'trace_stat' entry\n");
251 return 0;
252}
253
254static int init_stat_file(struct tracer_stat_session *session)
255{
256 if (!stat_dir && tracing_stat_init())
257 return -ENODEV;
258
259 session->file = debugfs_create_file(session->ts->name, 0644,
260 stat_dir,
261 session, &tracing_stat_fops);
262 if (!session->file)
263 return -ENOMEM;
264 return 0;
265}
266
267int register_stat_tracer(struct tracer_stat *trace)
268{
269 struct tracer_stat_session *session, *node, *tmp;
270 int ret;
271
272 if (!trace)
273 return -EINVAL;
274
275 if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
276 return -EINVAL;
277
278 /* Already registered? */
279 mutex_lock(&all_stat_sessions_mutex);
280 list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
281 if (node->ts == trace) {
282 mutex_unlock(&all_stat_sessions_mutex);
283 return -EINVAL;
284 }
285 }
286 mutex_unlock(&all_stat_sessions_mutex);
287
288 /* Init the session */
289 session = kmalloc(sizeof(struct tracer_stat_session), GFP_KERNEL);
290 if (!session)
291 return -ENOMEM;
292
293 session->ts = trace;
294 INIT_LIST_HEAD(&session->session_list);
295 INIT_LIST_HEAD(&session->stat_list);
296 mutex_init(&session->stat_mutex);
297 session->file = NULL;
298
299 ret = init_stat_file(session);
300 if (ret) {
301 destroy_session(session);
302 return ret;
303 }
304
305 /* Register */
306 mutex_lock(&all_stat_sessions_mutex);
307 list_add_tail(&session->session_list, &all_stat_sessions);
308 mutex_unlock(&all_stat_sessions_mutex);
309
310 return 0;
311}
312
313void unregister_stat_tracer(struct tracer_stat *trace)
314{
315 struct tracer_stat_session *node, *tmp;
316
317 mutex_lock(&all_stat_sessions_mutex);
318 list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
319 if (node->ts == trace) {
320 list_del(&node->session_list);
321 destroy_session(node);
322 break;
323 }
324 }
325 mutex_unlock(&all_stat_sessions_mutex);
326}
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h
new file mode 100644
index 000000000000..202274cf7f3d
--- /dev/null
+++ b/kernel/trace/trace_stat.h
@@ -0,0 +1,31 @@
1#ifndef __TRACE_STAT_H
2#define __TRACE_STAT_H
3
4#include <linux/seq_file.h>
5
6/*
7 * If you want to provide a stat file (one-shot statistics), fill
8 * an iterator with stat_start/stat_next and a stat_show callbacks.
9 * The others callbacks are optional.
10 */
11struct tracer_stat {
12 /* The name of your stat file */
13 const char *name;
14 /* Iteration over statistic entries */
15 void *(*stat_start)(void);
16 void *(*stat_next)(void *prev, int idx);
17 /* Compare two entries for stats sorting */
18 int (*stat_cmp)(void *p1, void *p2);
19 /* Print a stat entry */
20 int (*stat_show)(struct seq_file *s, void *p);
21 /* Print the headers of your stat entries */
22 int (*stat_headers)(struct seq_file *s);
23};
24
25/*
26 * Destroy or create a stat file
27 */
28extern int register_stat_tracer(struct tracer_stat *trace);
29extern void unregister_stat_tracer(struct tracer_stat *trace);
30
31#endif /* __TRACE_STAT_H */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
new file mode 100644
index 000000000000..a2a3af29c943
--- /dev/null
+++ b/kernel/trace/trace_syscalls.c
@@ -0,0 +1,250 @@
1#include <linux/kernel.h>
2#include <linux/ftrace.h>
3#include <asm/syscall.h>
4
5#include "trace_output.h"
6#include "trace.h"
7
8/* Keep a counter of the syscall tracing users */
9static int refcount;
10
11/* Prevent from races on thread flags toggling */
12static DEFINE_MUTEX(syscall_trace_lock);
13
14/* Option to display the parameters types */
15enum {
16 TRACE_SYSCALLS_OPT_TYPES = 0x1,
17};
18
19static struct tracer_opt syscalls_opts[] = {
20 { TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
21 { }
22};
23
24static struct tracer_flags syscalls_flags = {
25 .val = 0, /* By default: no parameters types */
26 .opts = syscalls_opts
27};
28
29enum print_line_t
30print_syscall_enter(struct trace_iterator *iter, int flags)
31{
32 struct trace_seq *s = &iter->seq;
33 struct trace_entry *ent = iter->ent;
34 struct syscall_trace_enter *trace;
35 struct syscall_metadata *entry;
36 int i, ret, syscall;
37
38 trace_assign_type(trace, ent);
39
40 syscall = trace->nr;
41
42 entry = syscall_nr_to_meta(syscall);
43 if (!entry)
44 goto end;
45
46 ret = trace_seq_printf(s, "%s(", entry->name);
47 if (!ret)
48 return TRACE_TYPE_PARTIAL_LINE;
49
50 for (i = 0; i < entry->nb_args; i++) {
51 /* parameter types */
52 if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
53 ret = trace_seq_printf(s, "%s ", entry->types[i]);
54 if (!ret)
55 return TRACE_TYPE_PARTIAL_LINE;
56 }
57 /* parameter values */
58 ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
59 trace->args[i],
60 i == entry->nb_args - 1 ? ")" : ",");
61 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE;
63 }
64
65end:
66 trace_seq_printf(s, "\n");
67 return TRACE_TYPE_HANDLED;
68}
69
70enum print_line_t
71print_syscall_exit(struct trace_iterator *iter, int flags)
72{
73 struct trace_seq *s = &iter->seq;
74 struct trace_entry *ent = iter->ent;
75 struct syscall_trace_exit *trace;
76 int syscall;
77 struct syscall_metadata *entry;
78 int ret;
79
80 trace_assign_type(trace, ent);
81
82 syscall = trace->nr;
83
84 entry = syscall_nr_to_meta(syscall);
85 if (!entry) {
86 trace_seq_printf(s, "\n");
87 return TRACE_TYPE_HANDLED;
88 }
89
90 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
91 trace->ret);
92 if (!ret)
93 return TRACE_TYPE_PARTIAL_LINE;
94
95 return TRACE_TYPE_HANDLED;
96}
97
98void start_ftrace_syscalls(void)
99{
100 unsigned long flags;
101 struct task_struct *g, *t;
102
103 mutex_lock(&syscall_trace_lock);
104
105 /* Don't enable the flag on the tasks twice */
106 if (++refcount != 1)
107 goto unlock;
108
109 arch_init_ftrace_syscalls();
110 read_lock_irqsave(&tasklist_lock, flags);
111
112 do_each_thread(g, t) {
113 set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
114 } while_each_thread(g, t);
115
116 read_unlock_irqrestore(&tasklist_lock, flags);
117
118unlock:
119 mutex_unlock(&syscall_trace_lock);
120}
121
122void stop_ftrace_syscalls(void)
123{
124 unsigned long flags;
125 struct task_struct *g, *t;
126
127 mutex_lock(&syscall_trace_lock);
128
129 /* There are perhaps still some users */
130 if (--refcount)
131 goto unlock;
132
133 read_lock_irqsave(&tasklist_lock, flags);
134
135 do_each_thread(g, t) {
136 clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
137 } while_each_thread(g, t);
138
139 read_unlock_irqrestore(&tasklist_lock, flags);
140
141unlock:
142 mutex_unlock(&syscall_trace_lock);
143}
144
145void ftrace_syscall_enter(struct pt_regs *regs)
146{
147 struct syscall_trace_enter *entry;
148 struct syscall_metadata *sys_data;
149 struct ring_buffer_event *event;
150 int size;
151 int syscall_nr;
152
153 syscall_nr = syscall_get_nr(current, regs);
154
155 sys_data = syscall_nr_to_meta(syscall_nr);
156 if (!sys_data)
157 return;
158
159 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
160
161 event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_ENTER, size,
162 0, 0);
163 if (!event)
164 return;
165
166 entry = ring_buffer_event_data(event);
167 entry->nr = syscall_nr;
168 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
169
170 trace_current_buffer_unlock_commit(event, 0, 0);
171 trace_wake_up();
172}
173
174void ftrace_syscall_exit(struct pt_regs *regs)
175{
176 struct syscall_trace_exit *entry;
177 struct syscall_metadata *sys_data;
178 struct ring_buffer_event *event;
179 int syscall_nr;
180
181 syscall_nr = syscall_get_nr(current, regs);
182
183 sys_data = syscall_nr_to_meta(syscall_nr);
184 if (!sys_data)
185 return;
186
187 event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_EXIT,
188 sizeof(*entry), 0, 0);
189 if (!event)
190 return;
191
192 entry = ring_buffer_event_data(event);
193 entry->nr = syscall_nr;
194 entry->ret = syscall_get_return_value(current, regs);
195
196 trace_current_buffer_unlock_commit(event, 0, 0);
197 trace_wake_up();
198}
199
200static int init_syscall_tracer(struct trace_array *tr)
201{
202 start_ftrace_syscalls();
203
204 return 0;
205}
206
207static void reset_syscall_tracer(struct trace_array *tr)
208{
209 stop_ftrace_syscalls();
210 tracing_reset_online_cpus(tr);
211}
212
213static struct trace_event syscall_enter_event = {
214 .type = TRACE_SYSCALL_ENTER,
215 .trace = print_syscall_enter,
216};
217
218static struct trace_event syscall_exit_event = {
219 .type = TRACE_SYSCALL_EXIT,
220 .trace = print_syscall_exit,
221};
222
223static struct tracer syscall_tracer __read_mostly = {
224 .name = "syscall",
225 .init = init_syscall_tracer,
226 .reset = reset_syscall_tracer,
227 .flags = &syscalls_flags,
228};
229
230__init int register_ftrace_syscalls(void)
231{
232 int ret;
233
234 ret = register_ftrace_event(&syscall_enter_event);
235 if (!ret) {
236 printk(KERN_WARNING "event %d failed to register\n",
237 syscall_enter_event.type);
238 WARN_ON_ONCE(1);
239 }
240
241 ret = register_ftrace_event(&syscall_exit_event);
242 if (!ret) {
243 printk(KERN_WARNING "event %d failed to register\n",
244 syscall_exit_event.type);
245 WARN_ON_ONCE(1);
246 }
247
248 return register_tracer(&syscall_tracer);
249}
250device_initcall(register_ftrace_syscalls);
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index eaca5ad803ff..91fd19c2149f 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
88 } 88 }
89} 89}
90 90
91const static struct stacktrace_ops backtrace_ops = { 91static const struct stacktrace_ops backtrace_ops = {
92 .warning = backtrace_warning, 92 .warning = backtrace_warning,
93 .warning_symbol = backtrace_warning_symbol, 93 .warning_symbol = backtrace_warning_symbol,
94 .stack = backtrace_stack, 94 .stack = backtrace_stack,
@@ -226,15 +226,6 @@ static void stop_stack_timers(void)
226 stop_stack_timer(cpu); 226 stop_stack_timer(cpu);
227} 227}
228 228
229static void start_stack_trace(struct trace_array *tr)
230{
231 mutex_lock(&sample_timer_lock);
232 tracing_reset_online_cpus(tr);
233 start_stack_timers();
234 tracer_enabled = 1;
235 mutex_unlock(&sample_timer_lock);
236}
237
238static void stop_stack_trace(struct trace_array *tr) 229static void stop_stack_trace(struct trace_array *tr)
239{ 230{
240 mutex_lock(&sample_timer_lock); 231 mutex_lock(&sample_timer_lock);
@@ -247,12 +238,18 @@ static int stack_trace_init(struct trace_array *tr)
247{ 238{
248 sysprof_trace = tr; 239 sysprof_trace = tr;
249 240
250 start_stack_trace(tr); 241 tracing_start_cmdline_record();
242
243 mutex_lock(&sample_timer_lock);
244 start_stack_timers();
245 tracer_enabled = 1;
246 mutex_unlock(&sample_timer_lock);
251 return 0; 247 return 0;
252} 248}
253 249
254static void stack_trace_reset(struct trace_array *tr) 250static void stack_trace_reset(struct trace_array *tr)
255{ 251{
252 tracing_stop_cmdline_record();
256 stop_stack_trace(tr); 253 stop_stack_trace(tr);
257} 254}
258 255
@@ -317,7 +314,7 @@ sysprof_sample_write(struct file *filp, const char __user *ubuf,
317 return cnt; 314 return cnt;
318} 315}
319 316
320static struct file_operations sysprof_sample_fops = { 317static const struct file_operations sysprof_sample_fops = {
321 .read = sysprof_sample_read, 318 .read = sysprof_sample_read,
322 .write = sysprof_sample_write, 319 .write = sysprof_sample_write,
323}; 320};
@@ -330,5 +327,5 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
330 d_tracer, NULL, &sysprof_sample_fops); 327 d_tracer, NULL, &sysprof_sample_fops);
331 if (entry) 328 if (entry)
332 return; 329 return;
333 pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n"); 330 pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n");
334} 331}
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
new file mode 100644
index 000000000000..797201e4a137
--- /dev/null
+++ b/kernel/trace/trace_workqueue.c
@@ -0,0 +1,288 @@
1/*
2 * Workqueue statistical tracer.
3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 *
6 */
7
8
9#include <trace/workqueue.h>
10#include <linux/list.h>
11#include <linux/percpu.h>
12#include "trace_stat.h"
13#include "trace.h"
14
15
16/* A cpu workqueue thread */
17struct cpu_workqueue_stats {
18 struct list_head list;
19/* Useful to know if we print the cpu headers */
20 bool first_entry;
21 int cpu;
22 pid_t pid;
23/* Can be inserted from interrupt or user context, need to be atomic */
24 atomic_t inserted;
25/*
26 * Don't need to be atomic, works are serialized in a single workqueue thread
27 * on a single CPU.
28 */
29 unsigned int executed;
30};
31
32/* List of workqueue threads on one cpu */
33struct workqueue_global_stats {
34 struct list_head list;
35 spinlock_t lock;
36};
37
38/* Don't need a global lock because allocated before the workqueues, and
39 * never freed.
40 */
41static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
42#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
43
44/* Insertion of a work */
45static void
46probe_workqueue_insertion(struct task_struct *wq_thread,
47 struct work_struct *work)
48{
49 int cpu = cpumask_first(&wq_thread->cpus_allowed);
50 struct cpu_workqueue_stats *node, *next;
51 unsigned long flags;
52
53 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
54 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
55 list) {
56 if (node->pid == wq_thread->pid) {
57 atomic_inc(&node->inserted);
58 goto found;
59 }
60 }
61 pr_debug("trace_workqueue: entry not found\n");
62found:
63 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
64}
65
66/* Execution of a work */
67static void
68probe_workqueue_execution(struct task_struct *wq_thread,
69 struct work_struct *work)
70{
71 int cpu = cpumask_first(&wq_thread->cpus_allowed);
72 struct cpu_workqueue_stats *node, *next;
73 unsigned long flags;
74
75 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
77 list) {
78 if (node->pid == wq_thread->pid) {
79 node->executed++;
80 goto found;
81 }
82 }
83 pr_debug("trace_workqueue: entry not found\n");
84found:
85 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
86}
87
88/* Creation of a cpu workqueue thread */
89static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
90{
91 struct cpu_workqueue_stats *cws;
92 unsigned long flags;
93
94 WARN_ON(cpu < 0);
95
96 /* Workqueues are sometimes created in atomic context */
97 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
98 if (!cws) {
99 pr_warning("trace_workqueue: not enough memory\n");
100 return;
101 }
102 INIT_LIST_HEAD(&cws->list);
103 cws->cpu = cpu;
104
105 cws->pid = wq_thread->pid;
106
107 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
108 if (list_empty(&workqueue_cpu_stat(cpu)->list))
109 cws->first_entry = true;
110 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
111 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
112}
113
114/* Destruction of a cpu workqueue thread */
115static void probe_workqueue_destruction(struct task_struct *wq_thread)
116{
117 /* Workqueue only execute on one cpu */
118 int cpu = cpumask_first(&wq_thread->cpus_allowed);
119 struct cpu_workqueue_stats *node, *next;
120 unsigned long flags;
121
122 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
123 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
124 list) {
125 if (node->pid == wq_thread->pid) {
126 list_del(&node->list);
127 kfree(node);
128 goto found;
129 }
130 }
131
132 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
133found:
134 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
135
136}
137
138static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
139{
140 unsigned long flags;
141 struct cpu_workqueue_stats *ret = NULL;
142
143
144 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
145
146 if (!list_empty(&workqueue_cpu_stat(cpu)->list))
147 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
148 struct cpu_workqueue_stats, list);
149
150 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
151
152 return ret;
153}
154
155static void *workqueue_stat_start(void)
156{
157 int cpu;
158 void *ret = NULL;
159
160 for_each_possible_cpu(cpu) {
161 ret = workqueue_stat_start_cpu(cpu);
162 if (ret)
163 return ret;
164 }
165 return NULL;
166}
167
168static void *workqueue_stat_next(void *prev, int idx)
169{
170 struct cpu_workqueue_stats *prev_cws = prev;
171 int cpu = prev_cws->cpu;
172 unsigned long flags;
173 void *ret = NULL;
174
175 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
176 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
177 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
178 do {
179 cpu = cpumask_next(cpu, cpu_possible_mask);
180 if (cpu >= nr_cpu_ids)
181 return NULL;
182 } while (!(ret = workqueue_stat_start_cpu(cpu)));
183 return ret;
184 }
185 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
186
187 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
188 list);
189}
190
191static int workqueue_stat_show(struct seq_file *s, void *p)
192{
193 struct cpu_workqueue_stats *cws = p;
194 unsigned long flags;
195 int cpu = cws->cpu;
196 struct pid *pid;
197 struct task_struct *tsk;
198
199 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
200 if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
201 seq_printf(s, "\n");
202 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
203
204 pid = find_get_pid(cws->pid);
205 if (pid) {
206 tsk = get_pid_task(pid, PIDTYPE_PID);
207 if (tsk) {
208 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
209 atomic_read(&cws->inserted), cws->executed,
210 tsk->comm);
211 put_task_struct(tsk);
212 }
213 put_pid(pid);
214 }
215
216 return 0;
217}
218
219static int workqueue_stat_headers(struct seq_file *s)
220{
221 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
222 seq_printf(s, "# | | | |\n");
223 return 0;
224}
225
226struct tracer_stat workqueue_stats __read_mostly = {
227 .name = "workqueues",
228 .stat_start = workqueue_stat_start,
229 .stat_next = workqueue_stat_next,
230 .stat_show = workqueue_stat_show,
231 .stat_headers = workqueue_stat_headers
232};
233
234
235int __init stat_workqueue_init(void)
236{
237 if (register_stat_tracer(&workqueue_stats)) {
238 pr_warning("Unable to register workqueue stat tracer\n");
239 return 1;
240 }
241
242 return 0;
243}
244fs_initcall(stat_workqueue_init);
245
246/*
247 * Workqueues are created very early, just after pre-smp initcalls.
248 * So we must register our tracepoints at this stage.
249 */
250int __init trace_workqueue_early_init(void)
251{
252 int ret, cpu;
253
254 ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
255 if (ret)
256 goto out;
257
258 ret = register_trace_workqueue_execution(probe_workqueue_execution);
259 if (ret)
260 goto no_insertion;
261
262 ret = register_trace_workqueue_creation(probe_workqueue_creation);
263 if (ret)
264 goto no_execution;
265
266 ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
267 if (ret)
268 goto no_creation;
269
270 for_each_possible_cpu(cpu) {
271 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
272 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
273 }
274
275 return 0;
276
277no_creation:
278 unregister_trace_workqueue_creation(probe_workqueue_creation);
279no_execution:
280 unregister_trace_workqueue_execution(probe_workqueue_execution);
281no_insertion:
282 unregister_trace_workqueue_insertion(probe_workqueue_insertion);
283out:
284 pr_warning("trace_workqueue: unable to trace workqueues\n");
285
286 return 1;
287}
288early_initcall(trace_workqueue_early_init);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 79602740bbb5..1ef5d3a601c7 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -272,12 +272,15 @@ static void disable_tracepoint(struct tracepoint *elem)
272 * 272 *
273 * Updates the probe callback corresponding to a range of tracepoints. 273 * Updates the probe callback corresponding to a range of tracepoints.
274 */ 274 */
275void tracepoint_update_probe_range(struct tracepoint *begin, 275void
276 struct tracepoint *end) 276tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
277{ 277{
278 struct tracepoint *iter; 278 struct tracepoint *iter;
279 struct tracepoint_entry *mark_entry; 279 struct tracepoint_entry *mark_entry;
280 280
281 if (!begin)
282 return;
283
281 mutex_lock(&tracepoints_mutex); 284 mutex_lock(&tracepoints_mutex);
282 for (iter = begin; iter < end; iter++) { 285 for (iter = begin; iter < end; iter++) {
283 mark_entry = get_tracepoint(iter->name); 286 mark_entry = get_tracepoint(iter->name);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9aedd9fd825b..3003ecad08f4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -33,6 +33,7 @@
33#include <linux/kallsyms.h> 33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h> 34#include <linux/debug_locks.h>
35#include <linux/lockdep.h> 35#include <linux/lockdep.h>
36#include <trace/workqueue.h>
36 37
37/* 38/*
38 * The per-CPU workqueue (if single thread, we always use the first 39 * The per-CPU workqueue (if single thread, we always use the first
@@ -125,9 +126,13 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
125 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 126 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
126} 127}
127 128
129DEFINE_TRACE(workqueue_insertion);
130
128static void insert_work(struct cpu_workqueue_struct *cwq, 131static void insert_work(struct cpu_workqueue_struct *cwq,
129 struct work_struct *work, struct list_head *head) 132 struct work_struct *work, struct list_head *head)
130{ 133{
134 trace_workqueue_insertion(cwq->thread, work);
135
131 set_wq_data(work, cwq); 136 set_wq_data(work, cwq);
132 /* 137 /*
133 * Ensure that we get the right work->data if we see the 138 * Ensure that we get the right work->data if we see the
@@ -259,6 +264,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
259} 264}
260EXPORT_SYMBOL_GPL(queue_delayed_work_on); 265EXPORT_SYMBOL_GPL(queue_delayed_work_on);
261 266
267DEFINE_TRACE(workqueue_execution);
268
262static void run_workqueue(struct cpu_workqueue_struct *cwq) 269static void run_workqueue(struct cpu_workqueue_struct *cwq)
263{ 270{
264 spin_lock_irq(&cwq->lock); 271 spin_lock_irq(&cwq->lock);
@@ -284,7 +291,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
284 */ 291 */
285 struct lockdep_map lockdep_map = work->lockdep_map; 292 struct lockdep_map lockdep_map = work->lockdep_map;
286#endif 293#endif
287 294 trace_workqueue_execution(cwq->thread, work);
288 cwq->current_work = work; 295 cwq->current_work = work;
289 list_del_init(cwq->worklist.next); 296 list_del_init(cwq->worklist.next);
290 spin_unlock_irq(&cwq->lock); 297 spin_unlock_irq(&cwq->lock);
@@ -765,6 +772,8 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
765 return cwq; 772 return cwq;
766} 773}
767 774
775DEFINE_TRACE(workqueue_creation);
776
768static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 777static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
769{ 778{
770 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 779 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -787,6 +796,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
787 sched_setscheduler_nocheck(p, SCHED_FIFO, &param); 796 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
788 cwq->thread = p; 797 cwq->thread = p;
789 798
799 trace_workqueue_creation(cwq->thread, cpu);
800
790 return 0; 801 return 0;
791} 802}
792 803
@@ -868,6 +879,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
868} 879}
869EXPORT_SYMBOL_GPL(__create_workqueue_key); 880EXPORT_SYMBOL_GPL(__create_workqueue_key);
870 881
882DEFINE_TRACE(workqueue_destruction);
883
871static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 884static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
872{ 885{
873 /* 886 /*
@@ -891,6 +904,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
891 * checks list_empty(), and a "normal" queue_work() can't use 904 * checks list_empty(), and a "normal" queue_work() can't use
892 * a dead CPU. 905 * a dead CPU.
893 */ 906 */
907 trace_workqueue_destruction(cwq->thread);
894 kthread_stop(cwq->thread); 908 kthread_stop(cwq->thread);
895 cwq->thread = NULL; 909 cwq->thread = NULL;
896} 910}
diff --git a/lib/Kconfig b/lib/Kconfig
index 2a9c69f34482..8ade0a7a91e0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -2,6 +2,9 @@
2# Library configuration 2# Library configuration
3# 3#
4 4
5config BINARY_PRINTF
6 def_bool n
7
5menu "Library routines" 8menu "Library routines"
6 9
7config BITREVERSE 10config BITREVERSE
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 280332c1827c..619313ed6c46 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -157,11 +157,11 @@ static void init_shared_classes(void)
157#define SOFTIRQ_ENTER() \ 157#define SOFTIRQ_ENTER() \
158 local_bh_disable(); \ 158 local_bh_disable(); \
159 local_irq_disable(); \ 159 local_irq_disable(); \
160 trace_softirq_enter(); \ 160 lockdep_softirq_enter(); \
161 WARN_ON(!in_softirq()); 161 WARN_ON(!in_softirq());
162 162
163#define SOFTIRQ_EXIT() \ 163#define SOFTIRQ_EXIT() \
164 trace_softirq_exit(); \ 164 lockdep_softirq_exit(); \
165 local_irq_enable(); \ 165 local_irq_enable(); \
166 local_bh_enable(); 166 local_bh_enable();
167 167
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0fbd0121d91d..dc1674377009 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -396,7 +396,38 @@ static noinline char* put_dec(char *buf, unsigned long long num)
396#define SMALL 32 /* Must be 32 == 0x20 */ 396#define SMALL 32 /* Must be 32 == 0x20 */
397#define SPECIAL 64 /* 0x */ 397#define SPECIAL 64 /* 0x */
398 398
399static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type) 399enum format_type {
400 FORMAT_TYPE_NONE, /* Just a string part */
401 FORMAT_TYPE_WITDH,
402 FORMAT_TYPE_PRECISION,
403 FORMAT_TYPE_CHAR,
404 FORMAT_TYPE_STR,
405 FORMAT_TYPE_PTR,
406 FORMAT_TYPE_PERCENT_CHAR,
407 FORMAT_TYPE_INVALID,
408 FORMAT_TYPE_LONG_LONG,
409 FORMAT_TYPE_ULONG,
410 FORMAT_TYPE_LONG,
411 FORMAT_TYPE_USHORT,
412 FORMAT_TYPE_SHORT,
413 FORMAT_TYPE_UINT,
414 FORMAT_TYPE_INT,
415 FORMAT_TYPE_NRCHARS,
416 FORMAT_TYPE_SIZE_T,
417 FORMAT_TYPE_PTRDIFF
418};
419
420struct printf_spec {
421 enum format_type type;
422 int flags; /* flags to number() */
423 int field_width; /* width of output field */
424 int base;
425 int precision; /* # of digits/chars */
426 int qualifier;
427};
428
429static char *number(char *buf, char *end, unsigned long long num,
430 struct printf_spec spec)
400{ 431{
401 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 432 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
402 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 433 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -404,32 +435,32 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
404 char tmp[66]; 435 char tmp[66];
405 char sign; 436 char sign;
406 char locase; 437 char locase;
407 int need_pfx = ((type & SPECIAL) && base != 10); 438 int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
408 int i; 439 int i;
409 440
410 /* locase = 0 or 0x20. ORing digits or letters with 'locase' 441 /* locase = 0 or 0x20. ORing digits or letters with 'locase'
411 * produces same digits or (maybe lowercased) letters */ 442 * produces same digits or (maybe lowercased) letters */
412 locase = (type & SMALL); 443 locase = (spec.flags & SMALL);
413 if (type & LEFT) 444 if (spec.flags & LEFT)
414 type &= ~ZEROPAD; 445 spec.flags &= ~ZEROPAD;
415 sign = 0; 446 sign = 0;
416 if (type & SIGN) { 447 if (spec.flags & SIGN) {
417 if ((signed long long) num < 0) { 448 if ((signed long long) num < 0) {
418 sign = '-'; 449 sign = '-';
419 num = - (signed long long) num; 450 num = - (signed long long) num;
420 size--; 451 spec.field_width--;
421 } else if (type & PLUS) { 452 } else if (spec.flags & PLUS) {
422 sign = '+'; 453 sign = '+';
423 size--; 454 spec.field_width--;
424 } else if (type & SPACE) { 455 } else if (spec.flags & SPACE) {
425 sign = ' '; 456 sign = ' ';
426 size--; 457 spec.field_width--;
427 } 458 }
428 } 459 }
429 if (need_pfx) { 460 if (need_pfx) {
430 size--; 461 spec.field_width--;
431 if (base == 16) 462 if (spec.base == 16)
432 size--; 463 spec.field_width--;
433 } 464 }
434 465
435 /* generate full string in tmp[], in reverse order */ 466 /* generate full string in tmp[], in reverse order */
@@ -441,10 +472,10 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
441 tmp[i++] = (digits[do_div(num,base)] | locase); 472 tmp[i++] = (digits[do_div(num,base)] | locase);
442 } while (num != 0); 473 } while (num != 0);
443 */ 474 */
444 else if (base != 10) { /* 8 or 16 */ 475 else if (spec.base != 10) { /* 8 or 16 */
445 int mask = base - 1; 476 int mask = spec.base - 1;
446 int shift = 3; 477 int shift = 3;
447 if (base == 16) shift = 4; 478 if (spec.base == 16) shift = 4;
448 do { 479 do {
449 tmp[i++] = (digits[((unsigned char)num) & mask] | locase); 480 tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
450 num >>= shift; 481 num >>= shift;
@@ -454,12 +485,12 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
454 } 485 }
455 486
456 /* printing 100 using %2d gives "100", not "00" */ 487 /* printing 100 using %2d gives "100", not "00" */
457 if (i > precision) 488 if (i > spec.precision)
458 precision = i; 489 spec.precision = i;
459 /* leading space padding */ 490 /* leading space padding */
460 size -= precision; 491 spec.field_width -= spec.precision;
461 if (!(type & (ZEROPAD+LEFT))) { 492 if (!(spec.flags & (ZEROPAD+LEFT))) {
462 while(--size >= 0) { 493 while(--spec.field_width >= 0) {
463 if (buf < end) 494 if (buf < end)
464 *buf = ' '; 495 *buf = ' ';
465 ++buf; 496 ++buf;
@@ -476,23 +507,23 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
476 if (buf < end) 507 if (buf < end)
477 *buf = '0'; 508 *buf = '0';
478 ++buf; 509 ++buf;
479 if (base == 16) { 510 if (spec.base == 16) {
480 if (buf < end) 511 if (buf < end)
481 *buf = ('X' | locase); 512 *buf = ('X' | locase);
482 ++buf; 513 ++buf;
483 } 514 }
484 } 515 }
485 /* zero or space padding */ 516 /* zero or space padding */
486 if (!(type & LEFT)) { 517 if (!(spec.flags & LEFT)) {
487 char c = (type & ZEROPAD) ? '0' : ' '; 518 char c = (spec.flags & ZEROPAD) ? '0' : ' ';
488 while (--size >= 0) { 519 while (--spec.field_width >= 0) {
489 if (buf < end) 520 if (buf < end)
490 *buf = c; 521 *buf = c;
491 ++buf; 522 ++buf;
492 } 523 }
493 } 524 }
494 /* hmm even more zero padding? */ 525 /* hmm even more zero padding? */
495 while (i <= --precision) { 526 while (i <= --spec.precision) {
496 if (buf < end) 527 if (buf < end)
497 *buf = '0'; 528 *buf = '0';
498 ++buf; 529 ++buf;
@@ -504,7 +535,7 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
504 ++buf; 535 ++buf;
505 } 536 }
506 /* trailing space padding */ 537 /* trailing space padding */
507 while (--size >= 0) { 538 while (--spec.field_width >= 0) {
508 if (buf < end) 539 if (buf < end)
509 *buf = ' '; 540 *buf = ' ';
510 ++buf; 541 ++buf;
@@ -512,17 +543,17 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
512 return buf; 543 return buf;
513} 544}
514 545
515static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) 546static char *string(char *buf, char *end, char *s, struct printf_spec spec)
516{ 547{
517 int len, i; 548 int len, i;
518 549
519 if ((unsigned long)s < PAGE_SIZE) 550 if ((unsigned long)s < PAGE_SIZE)
520 s = "<NULL>"; 551 s = "<NULL>";
521 552
522 len = strnlen(s, precision); 553 len = strnlen(s, spec.precision);
523 554
524 if (!(flags & LEFT)) { 555 if (!(spec.flags & LEFT)) {
525 while (len < field_width--) { 556 while (len < spec.field_width--) {
526 if (buf < end) 557 if (buf < end)
527 *buf = ' '; 558 *buf = ' ';
528 ++buf; 559 ++buf;
@@ -533,7 +564,7 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
533 *buf = *s; 564 *buf = *s;
534 ++buf; ++s; 565 ++buf; ++s;
535 } 566 }
536 while (len < field_width--) { 567 while (len < spec.field_width--) {
537 if (buf < end) 568 if (buf < end)
538 *buf = ' '; 569 *buf = ' ';
539 ++buf; 570 ++buf;
@@ -541,21 +572,24 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
541 return buf; 572 return buf;
542} 573}
543 574
544static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) 575static char *symbol_string(char *buf, char *end, void *ptr,
576 struct printf_spec spec)
545{ 577{
546 unsigned long value = (unsigned long) ptr; 578 unsigned long value = (unsigned long) ptr;
547#ifdef CONFIG_KALLSYMS 579#ifdef CONFIG_KALLSYMS
548 char sym[KSYM_SYMBOL_LEN]; 580 char sym[KSYM_SYMBOL_LEN];
549 sprint_symbol(sym, value); 581 sprint_symbol(sym, value);
550 return string(buf, end, sym, field_width, precision, flags); 582 return string(buf, end, sym, spec);
551#else 583#else
552 field_width = 2*sizeof(void *); 584 spec.field_width = 2*sizeof(void *);
553 flags |= SPECIAL | SMALL | ZEROPAD; 585 spec.flags |= SPECIAL | SMALL | ZEROPAD;
554 return number(buf, end, value, 16, field_width, precision, flags); 586 spec.base = 16;
587 return number(buf, end, value, spec);
555#endif 588#endif
556} 589}
557 590
558static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags) 591static char *resource_string(char *buf, char *end, struct resource *res,
592 struct printf_spec spec)
559{ 593{
560#ifndef IO_RSRC_PRINTK_SIZE 594#ifndef IO_RSRC_PRINTK_SIZE
561#define IO_RSRC_PRINTK_SIZE 4 595#define IO_RSRC_PRINTK_SIZE 4
@@ -564,7 +598,11 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
564#ifndef MEM_RSRC_PRINTK_SIZE 598#ifndef MEM_RSRC_PRINTK_SIZE
565#define MEM_RSRC_PRINTK_SIZE 8 599#define MEM_RSRC_PRINTK_SIZE 8
566#endif 600#endif
567 601 struct printf_spec num_spec = {
602 .base = 16,
603 .precision = -1,
604 .flags = SPECIAL | SMALL | ZEROPAD,
605 };
568 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ 606 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */
569 char sym[4*sizeof(resource_size_t) + 8]; 607 char sym[4*sizeof(resource_size_t) + 8];
570 char *p = sym, *pend = sym + sizeof(sym); 608 char *p = sym, *pend = sym + sizeof(sym);
@@ -576,17 +614,18 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
576 size = MEM_RSRC_PRINTK_SIZE; 614 size = MEM_RSRC_PRINTK_SIZE;
577 615
578 *p++ = '['; 616 *p++ = '[';
579 p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD); 617 num_spec.field_width = size;
618 p = number(p, pend, res->start, num_spec);
580 *p++ = '-'; 619 *p++ = '-';
581 p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD); 620 p = number(p, pend, res->end, num_spec);
582 *p++ = ']'; 621 *p++ = ']';
583 *p = 0; 622 *p = 0;
584 623
585 return string(buf, end, sym, field_width, precision, flags); 624 return string(buf, end, sym, spec);
586} 625}
587 626
588static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width, 627static char *mac_address_string(char *buf, char *end, u8 *addr,
589 int precision, int flags) 628 struct printf_spec spec)
590{ 629{
591 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ 630 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */
592 char *p = mac_addr; 631 char *p = mac_addr;
@@ -594,16 +633,17 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width,
594 633
595 for (i = 0; i < 6; i++) { 634 for (i = 0; i < 6; i++) {
596 p = pack_hex_byte(p, addr[i]); 635 p = pack_hex_byte(p, addr[i]);
597 if (!(flags & SPECIAL) && i != 5) 636 if (!(spec.flags & SPECIAL) && i != 5)
598 *p++ = ':'; 637 *p++ = ':';
599 } 638 }
600 *p = '\0'; 639 *p = '\0';
640 spec.flags &= ~SPECIAL;
601 641
602 return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL); 642 return string(buf, end, mac_addr, spec);
603} 643}
604 644
605static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width, 645static char *ip6_addr_string(char *buf, char *end, u8 *addr,
606 int precision, int flags) 646 struct printf_spec spec)
607{ 647{
608 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */ 648 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
609 char *p = ip6_addr; 649 char *p = ip6_addr;
@@ -612,16 +652,17 @@ static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width,
612 for (i = 0; i < 8; i++) { 652 for (i = 0; i < 8; i++) {
613 p = pack_hex_byte(p, addr[2 * i]); 653 p = pack_hex_byte(p, addr[2 * i]);
614 p = pack_hex_byte(p, addr[2 * i + 1]); 654 p = pack_hex_byte(p, addr[2 * i + 1]);
615 if (!(flags & SPECIAL) && i != 7) 655 if (!(spec.flags & SPECIAL) && i != 7)
616 *p++ = ':'; 656 *p++ = ':';
617 } 657 }
618 *p = '\0'; 658 *p = '\0';
659 spec.flags &= ~SPECIAL;
619 660
620 return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL); 661 return string(buf, end, ip6_addr, spec);
621} 662}
622 663
623static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width, 664static char *ip4_addr_string(char *buf, char *end, u8 *addr,
624 int precision, int flags) 665 struct printf_spec spec)
625{ 666{
626 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ 667 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */
627 char temp[3]; /* hold each IP quad in reverse order */ 668 char temp[3]; /* hold each IP quad in reverse order */
@@ -637,8 +678,9 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
637 *p++ = '.'; 678 *p++ = '.';
638 } 679 }
639 *p = '\0'; 680 *p = '\0';
681 spec.flags &= ~SPECIAL;
640 682
641 return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL); 683 return string(buf, end, ip4_addr, spec);
642} 684}
643 685
644/* 686/*
@@ -663,41 +705,233 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
663 * function pointers are really function descriptors, which contain a 705 * function pointers are really function descriptors, which contain a
664 * pointer to the real address. 706 * pointer to the real address.
665 */ 707 */
666static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) 708static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
709 struct printf_spec spec)
667{ 710{
668 if (!ptr) 711 if (!ptr)
669 return string(buf, end, "(null)", field_width, precision, flags); 712 return string(buf, end, "(null)", spec);
670 713
671 switch (*fmt) { 714 switch (*fmt) {
672 case 'F': 715 case 'F':
673 ptr = dereference_function_descriptor(ptr); 716 ptr = dereference_function_descriptor(ptr);
674 /* Fallthrough */ 717 /* Fallthrough */
675 case 'S': 718 case 'S':
676 return symbol_string(buf, end, ptr, field_width, precision, flags); 719 return symbol_string(buf, end, ptr, spec);
677 case 'R': 720 case 'R':
678 return resource_string(buf, end, ptr, field_width, precision, flags); 721 return resource_string(buf, end, ptr, spec);
679 case 'm': 722 case 'm':
680 flags |= SPECIAL; 723 spec.flags |= SPECIAL;
681 /* Fallthrough */ 724 /* Fallthrough */
682 case 'M': 725 case 'M':
683 return mac_address_string(buf, end, ptr, field_width, precision, flags); 726 return mac_address_string(buf, end, ptr, spec);
684 case 'i': 727 case 'i':
685 flags |= SPECIAL; 728 spec.flags |= SPECIAL;
686 /* Fallthrough */ 729 /* Fallthrough */
687 case 'I': 730 case 'I':
688 if (fmt[1] == '6') 731 if (fmt[1] == '6')
689 return ip6_addr_string(buf, end, ptr, field_width, precision, flags); 732 return ip6_addr_string(buf, end, ptr, spec);
690 if (fmt[1] == '4') 733 if (fmt[1] == '4')
691 return ip4_addr_string(buf, end, ptr, field_width, precision, flags); 734 return ip4_addr_string(buf, end, ptr, spec);
692 flags &= ~SPECIAL; 735 spec.flags &= ~SPECIAL;
736 break;
737 }
738 spec.flags |= SMALL;
739 if (spec.field_width == -1) {
740 spec.field_width = 2*sizeof(void *);
741 spec.flags |= ZEROPAD;
742 }
743 spec.base = 16;
744
745 return number(buf, end, (unsigned long) ptr, spec);
746}
747
748/*
749 * Helper function to decode printf style format.
750 * Each call decode a token from the format and return the
751 * number of characters read (or likely the delta where it wants
752 * to go on the next call).
753 * The decoded token is returned through the parameters
754 *
755 * 'h', 'l', or 'L' for integer fields
756 * 'z' support added 23/7/1999 S.H.
757 * 'z' changed to 'Z' --davidm 1/25/99
758 * 't' added for ptrdiff_t
759 *
760 * @fmt: the format string
761 * @type of the token returned
762 * @flags: various flags such as +, -, # tokens..
763 * @field_width: overwritten width
764 * @base: base of the number (octal, hex, ...)
765 * @precision: precision of a number
766 * @qualifier: qualifier of a number (long, size_t, ...)
767 */
768static int format_decode(const char *fmt, struct printf_spec *spec)
769{
770 const char *start = fmt;
771
772 /* we finished early by reading the field width */
773 if (spec->type == FORMAT_TYPE_WITDH) {
774 if (spec->field_width < 0) {
775 spec->field_width = -spec->field_width;
776 spec->flags |= LEFT;
777 }
778 spec->type = FORMAT_TYPE_NONE;
779 goto precision;
780 }
781
782 /* we finished early by reading the precision */
783 if (spec->type == FORMAT_TYPE_PRECISION) {
784 if (spec->precision < 0)
785 spec->precision = 0;
786
787 spec->type = FORMAT_TYPE_NONE;
788 goto qualifier;
789 }
790
791 /* By default */
792 spec->type = FORMAT_TYPE_NONE;
793
794 for (; *fmt ; ++fmt) {
795 if (*fmt == '%')
796 break;
797 }
798
799 /* Return the current non-format string */
800 if (fmt != start || !*fmt)
801 return fmt - start;
802
803 /* Process flags */
804 spec->flags = 0;
805
806 while (1) { /* this also skips first '%' */
807 bool found = true;
808
809 ++fmt;
810
811 switch (*fmt) {
812 case '-': spec->flags |= LEFT; break;
813 case '+': spec->flags |= PLUS; break;
814 case ' ': spec->flags |= SPACE; break;
815 case '#': spec->flags |= SPECIAL; break;
816 case '0': spec->flags |= ZEROPAD; break;
817 default: found = false;
818 }
819
820 if (!found)
821 break;
822 }
823
824 /* get field width */
825 spec->field_width = -1;
826
827 if (isdigit(*fmt))
828 spec->field_width = skip_atoi(&fmt);
829 else if (*fmt == '*') {
830 /* it's the next argument */
831 spec->type = FORMAT_TYPE_WITDH;
832 return ++fmt - start;
833 }
834
835precision:
836 /* get the precision */
837 spec->precision = -1;
838 if (*fmt == '.') {
839 ++fmt;
840 if (isdigit(*fmt)) {
841 spec->precision = skip_atoi(&fmt);
842 if (spec->precision < 0)
843 spec->precision = 0;
844 } else if (*fmt == '*') {
845 /* it's the next argument */
846 spec->type = FORMAT_TYPE_WITDH;
847 return ++fmt - start;
848 }
849 }
850
851qualifier:
852 /* get the conversion qualifier */
853 spec->qualifier = -1;
854 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
855 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
856 spec->qualifier = *fmt;
857 ++fmt;
858 if (spec->qualifier == 'l' && *fmt == 'l') {
859 spec->qualifier = 'L';
860 ++fmt;
861 }
862 }
863
864 /* default base */
865 spec->base = 10;
866 switch (*fmt) {
867 case 'c':
868 spec->type = FORMAT_TYPE_CHAR;
869 return ++fmt - start;
870
871 case 's':
872 spec->type = FORMAT_TYPE_STR;
873 return ++fmt - start;
874
875 case 'p':
876 spec->type = FORMAT_TYPE_PTR;
877 return fmt - start;
878 /* skip alnum */
879
880 case 'n':
881 spec->type = FORMAT_TYPE_NRCHARS;
882 return ++fmt - start;
883
884 case '%':
885 spec->type = FORMAT_TYPE_PERCENT_CHAR;
886 return ++fmt - start;
887
888 /* integer number formats - set up the flags and "break" */
889 case 'o':
890 spec->base = 8;
693 break; 891 break;
892
893 case 'x':
894 spec->flags |= SMALL;
895
896 case 'X':
897 spec->base = 16;
898 break;
899
900 case 'd':
901 case 'i':
902 spec->flags |= SIGN;
903 case 'u':
904 break;
905
906 default:
907 spec->type = FORMAT_TYPE_INVALID;
908 return fmt - start;
694 } 909 }
695 flags |= SMALL; 910
696 if (field_width == -1) { 911 if (spec->qualifier == 'L')
697 field_width = 2*sizeof(void *); 912 spec->type = FORMAT_TYPE_LONG_LONG;
698 flags |= ZEROPAD; 913 else if (spec->qualifier == 'l') {
914 if (spec->flags & SIGN)
915 spec->type = FORMAT_TYPE_LONG;
916 else
917 spec->type = FORMAT_TYPE_ULONG;
918 } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') {
919 spec->type = FORMAT_TYPE_SIZE_T;
920 } else if (spec->qualifier == 't') {
921 spec->type = FORMAT_TYPE_PTRDIFF;
922 } else if (spec->qualifier == 'h') {
923 if (spec->flags & SIGN)
924 spec->type = FORMAT_TYPE_SHORT;
925 else
926 spec->type = FORMAT_TYPE_USHORT;
927 } else {
928 if (spec->flags & SIGN)
929 spec->type = FORMAT_TYPE_INT;
930 else
931 spec->type = FORMAT_TYPE_UINT;
699 } 932 }
700 return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); 933
934 return ++fmt - start;
701} 935}
702 936
703/** 937/**
@@ -726,18 +960,9 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
726int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 960int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
727{ 961{
728 unsigned long long num; 962 unsigned long long num;
729 int base;
730 char *str, *end, c; 963 char *str, *end, c;
731 964 int read;
732 int flags; /* flags to number() */ 965 struct printf_spec spec = {0};
733
734 int field_width; /* width of output field */
735 int precision; /* min. # of digits for integers; max
736 number of chars for from string */
737 int qualifier; /* 'h', 'l', or 'L' for integer fields */
738 /* 'z' support added 23/7/1999 S.H. */
739 /* 'z' changed to 'Z' --davidm 1/25/99 */
740 /* 't' added for ptrdiff_t */
741 966
742 /* Reject out-of-range values early. Large positive sizes are 967 /* Reject out-of-range values early. Large positive sizes are
743 used for unknown buffer sizes. */ 968 used for unknown buffer sizes. */
@@ -758,184 +983,144 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
758 size = end - buf; 983 size = end - buf;
759 } 984 }
760 985
761 for (; *fmt ; ++fmt) { 986 while (*fmt) {
762 if (*fmt != '%') { 987 const char *old_fmt = fmt;
763 if (str < end)
764 *str = *fmt;
765 ++str;
766 continue;
767 }
768 988
769 /* process flags */ 989 read = format_decode(fmt, &spec);
770 flags = 0;
771 repeat:
772 ++fmt; /* this also skips first '%' */
773 switch (*fmt) {
774 case '-': flags |= LEFT; goto repeat;
775 case '+': flags |= PLUS; goto repeat;
776 case ' ': flags |= SPACE; goto repeat;
777 case '#': flags |= SPECIAL; goto repeat;
778 case '0': flags |= ZEROPAD; goto repeat;
779 }
780 990
781 /* get field width */ 991 fmt += read;
782 field_width = -1;
783 if (isdigit(*fmt))
784 field_width = skip_atoi(&fmt);
785 else if (*fmt == '*') {
786 ++fmt;
787 /* it's the next argument */
788 field_width = va_arg(args, int);
789 if (field_width < 0) {
790 field_width = -field_width;
791 flags |= LEFT;
792 }
793 }
794 992
795 /* get the precision */ 993 switch (spec.type) {
796 precision = -1; 994 case FORMAT_TYPE_NONE: {
797 if (*fmt == '.') { 995 int copy = read;
798 ++fmt; 996 if (str < end) {
799 if (isdigit(*fmt)) 997 if (copy > end - str)
800 precision = skip_atoi(&fmt); 998 copy = end - str;
801 else if (*fmt == '*') { 999 memcpy(str, old_fmt, copy);
802 ++fmt;
803 /* it's the next argument */
804 precision = va_arg(args, int);
805 } 1000 }
806 if (precision < 0) 1001 str += read;
807 precision = 0; 1002 break;
808 } 1003 }
809 1004
810 /* get the conversion qualifier */ 1005 case FORMAT_TYPE_WITDH:
811 qualifier = -1; 1006 spec.field_width = va_arg(args, int);
812 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1007 break;
813 *fmt =='Z' || *fmt == 'z' || *fmt == 't') {
814 qualifier = *fmt;
815 ++fmt;
816 if (qualifier == 'l' && *fmt == 'l') {
817 qualifier = 'L';
818 ++fmt;
819 }
820 }
821 1008
822 /* default base */ 1009 case FORMAT_TYPE_PRECISION:
823 base = 10; 1010 spec.precision = va_arg(args, int);
1011 break;
824 1012
825 switch (*fmt) { 1013 case FORMAT_TYPE_CHAR:
826 case 'c': 1014 if (!(spec.flags & LEFT)) {
827 if (!(flags & LEFT)) { 1015 while (--spec.field_width > 0) {
828 while (--field_width > 0) {
829 if (str < end)
830 *str = ' ';
831 ++str;
832 }
833 }
834 c = (unsigned char) va_arg(args, int);
835 if (str < end)
836 *str = c;
837 ++str;
838 while (--field_width > 0) {
839 if (str < end) 1016 if (str < end)
840 *str = ' '; 1017 *str = ' ';
841 ++str; 1018 ++str;
842 }
843 continue;
844
845 case 's':
846 str = string(str, end, va_arg(args, char *), field_width, precision, flags);
847 continue;
848
849 case 'p':
850 str = pointer(fmt+1, str, end,
851 va_arg(args, void *),
852 field_width, precision, flags);
853 /* Skip all alphanumeric pointer suffixes */
854 while (isalnum(fmt[1]))
855 fmt++;
856 continue;
857
858 case 'n':
859 /* FIXME:
860 * What does C99 say about the overflow case here? */
861 if (qualifier == 'l') {
862 long * ip = va_arg(args, long *);
863 *ip = (str - buf);
864 } else if (qualifier == 'Z' || qualifier == 'z') {
865 size_t * ip = va_arg(args, size_t *);
866 *ip = (str - buf);
867 } else {
868 int * ip = va_arg(args, int *);
869 *ip = (str - buf);
870 }
871 continue;
872 1019
873 case '%': 1020 }
1021 }
1022 c = (unsigned char) va_arg(args, int);
1023 if (str < end)
1024 *str = c;
1025 ++str;
1026 while (--spec.field_width > 0) {
874 if (str < end) 1027 if (str < end)
875 *str = '%'; 1028 *str = ' ';
876 ++str; 1029 ++str;
877 continue; 1030 }
1031 break;
878 1032
879 /* integer number formats - set up the flags and "break" */ 1033 case FORMAT_TYPE_STR:
880 case 'o': 1034 str = string(str, end, va_arg(args, char *), spec);
881 base = 8; 1035 break;
882 break;
883 1036
884 case 'x': 1037 case FORMAT_TYPE_PTR:
885 flags |= SMALL; 1038 str = pointer(fmt+1, str, end, va_arg(args, void *),
886 case 'X': 1039 spec);
887 base = 16; 1040 while (isalnum(*fmt))
888 break; 1041 fmt++;
1042 break;
889 1043
890 case 'd': 1044 case FORMAT_TYPE_PERCENT_CHAR:
891 case 'i': 1045 if (str < end)
892 flags |= SIGN; 1046 *str = '%';
893 case 'u': 1047 ++str;
894 break; 1048 break;
895 1049
896 default: 1050 case FORMAT_TYPE_INVALID:
1051 if (str < end)
1052 *str = '%';
1053 ++str;
1054 if (*fmt) {
897 if (str < end) 1055 if (str < end)
898 *str = '%'; 1056 *str = *fmt;
899 ++str; 1057 ++str;
900 if (*fmt) { 1058 } else {
901 if (str < end) 1059 --fmt;
902 *str = *fmt; 1060 }
903 ++str; 1061 break;
904 } else { 1062
905 --fmt; 1063 case FORMAT_TYPE_NRCHARS: {
906 } 1064 int qualifier = spec.qualifier;
907 continue; 1065
1066 if (qualifier == 'l') {
1067 long *ip = va_arg(args, long *);
1068 *ip = (str - buf);
1069 } else if (qualifier == 'Z' ||
1070 qualifier == 'z') {
1071 size_t *ip = va_arg(args, size_t *);
1072 *ip = (str - buf);
1073 } else {
1074 int *ip = va_arg(args, int *);
1075 *ip = (str - buf);
1076 }
1077 break;
908 } 1078 }
909 if (qualifier == 'L') 1079
910 num = va_arg(args, long long); 1080 default:
911 else if (qualifier == 'l') { 1081 switch (spec.type) {
912 num = va_arg(args, unsigned long); 1082 case FORMAT_TYPE_LONG_LONG:
913 if (flags & SIGN) 1083 num = va_arg(args, long long);
914 num = (signed long) num; 1084 break;
915 } else if (qualifier == 'Z' || qualifier == 'z') { 1085 case FORMAT_TYPE_ULONG:
916 num = va_arg(args, size_t); 1086 num = va_arg(args, unsigned long);
917 } else if (qualifier == 't') { 1087 break;
918 num = va_arg(args, ptrdiff_t); 1088 case FORMAT_TYPE_LONG:
919 } else if (qualifier == 'h') { 1089 num = va_arg(args, long);
920 num = (unsigned short) va_arg(args, int); 1090 break;
921 if (flags & SIGN) 1091 case FORMAT_TYPE_SIZE_T:
922 num = (signed short) num; 1092 num = va_arg(args, size_t);
923 } else { 1093 break;
924 num = va_arg(args, unsigned int); 1094 case FORMAT_TYPE_PTRDIFF:
925 if (flags & SIGN) 1095 num = va_arg(args, ptrdiff_t);
926 num = (signed int) num; 1096 break;
1097 case FORMAT_TYPE_USHORT:
1098 num = (unsigned short) va_arg(args, int);
1099 break;
1100 case FORMAT_TYPE_SHORT:
1101 num = (short) va_arg(args, int);
1102 break;
1103 case FORMAT_TYPE_INT:
1104 num = (int) va_arg(args, int);
1105 break;
1106 default:
1107 num = va_arg(args, unsigned int);
1108 }
1109
1110 str = number(str, end, num, spec);
927 } 1111 }
928 str = number(str, end, num, base,
929 field_width, precision, flags);
930 } 1112 }
1113
931 if (size > 0) { 1114 if (size > 0) {
932 if (str < end) 1115 if (str < end)
933 *str = '\0'; 1116 *str = '\0';
934 else 1117 else
935 end[-1] = '\0'; 1118 end[-1] = '\0';
936 } 1119 }
1120
937 /* the trailing null byte doesn't count towards the total */ 1121 /* the trailing null byte doesn't count towards the total */
938 return str-buf; 1122 return str-buf;
1123
939} 1124}
940EXPORT_SYMBOL(vsnprintf); 1125EXPORT_SYMBOL(vsnprintf);
941 1126
@@ -1058,6 +1243,372 @@ int sprintf(char * buf, const char *fmt, ...)
1058} 1243}
1059EXPORT_SYMBOL(sprintf); 1244EXPORT_SYMBOL(sprintf);
1060 1245
1246#ifdef CONFIG_BINARY_PRINTF
1247/*
1248 * bprintf service:
1249 * vbin_printf() - VA arguments to binary data
1250 * bstr_printf() - Binary data to text string
1251 */
1252
1253/**
1254 * vbin_printf - Parse a format string and place args' binary value in a buffer
1255 * @bin_buf: The buffer to place args' binary value
1256 * @size: The size of the buffer(by words(32bits), not characters)
1257 * @fmt: The format string to use
1258 * @args: Arguments for the format string
1259 *
1260 * The format follows C99 vsnprintf, except %n is ignored, and its argument
1261 * is skiped.
1262 *
1263 * The return value is the number of words(32bits) which would be generated for
1264 * the given input.
1265 *
1266 * NOTE:
1267 * If the return value is greater than @size, the resulting bin_buf is NOT
1268 * valid for bstr_printf().
1269 */
1270int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
1271{
1272 struct printf_spec spec = {0};
1273 char *str, *end;
1274 int read;
1275
1276 str = (char *)bin_buf;
1277 end = (char *)(bin_buf + size);
1278
1279#define save_arg(type) \
1280do { \
1281 if (sizeof(type) == 8) { \
1282 unsigned long long value; \
1283 str = PTR_ALIGN(str, sizeof(u32)); \
1284 value = va_arg(args, unsigned long long); \
1285 if (str + sizeof(type) <= end) { \
1286 *(u32 *)str = *(u32 *)&value; \
1287 *(u32 *)(str + 4) = *((u32 *)&value + 1); \
1288 } \
1289 } else { \
1290 unsigned long value; \
1291 str = PTR_ALIGN(str, sizeof(type)); \
1292 value = va_arg(args, int); \
1293 if (str + sizeof(type) <= end) \
1294 *(typeof(type) *)str = (type)value; \
1295 } \
1296 str += sizeof(type); \
1297} while (0)
1298
1299
1300 while (*fmt) {
1301 read = format_decode(fmt, &spec);
1302
1303 fmt += read;
1304
1305 switch (spec.type) {
1306 case FORMAT_TYPE_NONE:
1307 break;
1308
1309 case FORMAT_TYPE_WITDH:
1310 case FORMAT_TYPE_PRECISION:
1311 save_arg(int);
1312 break;
1313
1314 case FORMAT_TYPE_CHAR:
1315 save_arg(char);
1316 break;
1317
1318 case FORMAT_TYPE_STR: {
1319 const char *save_str = va_arg(args, char *);
1320 size_t len;
1321 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
1322 || (unsigned long)save_str < PAGE_SIZE)
1323 save_str = "<NULL>";
1324 len = strlen(save_str);
1325 if (str + len + 1 < end)
1326 memcpy(str, save_str, len + 1);
1327 str += len + 1;
1328 break;
1329 }
1330
1331 case FORMAT_TYPE_PTR:
1332 save_arg(void *);
1333 /* skip all alphanumeric pointer suffixes */
1334 while (isalnum(*fmt))
1335 fmt++;
1336 break;
1337
1338 case FORMAT_TYPE_PERCENT_CHAR:
1339 break;
1340
1341 case FORMAT_TYPE_INVALID:
1342 if (!*fmt)
1343 --fmt;
1344 break;
1345
1346 case FORMAT_TYPE_NRCHARS: {
1347 /* skip %n 's argument */
1348 int qualifier = spec.qualifier;
1349 void *skip_arg;
1350 if (qualifier == 'l')
1351 skip_arg = va_arg(args, long *);
1352 else if (qualifier == 'Z' || qualifier == 'z')
1353 skip_arg = va_arg(args, size_t *);
1354 else
1355 skip_arg = va_arg(args, int *);
1356 break;
1357 }
1358
1359 default:
1360 switch (spec.type) {
1361
1362 case FORMAT_TYPE_LONG_LONG:
1363 save_arg(long long);
1364 break;
1365 case FORMAT_TYPE_ULONG:
1366 case FORMAT_TYPE_LONG:
1367 save_arg(unsigned long);
1368 break;
1369 case FORMAT_TYPE_SIZE_T:
1370 save_arg(size_t);
1371 break;
1372 case FORMAT_TYPE_PTRDIFF:
1373 save_arg(ptrdiff_t);
1374 break;
1375 case FORMAT_TYPE_USHORT:
1376 case FORMAT_TYPE_SHORT:
1377 save_arg(short);
1378 break;
1379 default:
1380 save_arg(int);
1381 }
1382 }
1383 }
1384 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1385
1386#undef save_arg
1387}
1388EXPORT_SYMBOL_GPL(vbin_printf);
1389
1390/**
1391 * bstr_printf - Format a string from binary arguments and place it in a buffer
1392 * @buf: The buffer to place the result into
1393 * @size: The size of the buffer, including the trailing null space
1394 * @fmt: The format string to use
1395 * @bin_buf: Binary arguments for the format string
1396 *
1397 * This function like C99 vsnprintf, but the difference is that vsnprintf gets
1398 * arguments from stack, and bstr_printf gets arguments from @bin_buf which is
1399 * a binary buffer that generated by vbin_printf.
1400 *
1401 * The format follows C99 vsnprintf, but has some extensions:
1402 * %pS output the name of a text symbol
1403 * %pF output the name of a function pointer
1404 * %pR output the address range in a struct resource
1405 * %n is ignored
1406 *
1407 * The return value is the number of characters which would
1408 * be generated for the given input, excluding the trailing
1409 * '\0', as per ISO C99. If you want to have the exact
1410 * number of characters written into @buf as return value
1411 * (not including the trailing '\0'), use vscnprintf(). If the
1412 * return is greater than or equal to @size, the resulting
1413 * string is truncated.
1414 */
1415int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1416{
1417 unsigned long long num;
1418 char *str, *end, c;
1419 const char *args = (const char *)bin_buf;
1420
1421 struct printf_spec spec = {0};
1422
1423 if (unlikely((int) size < 0)) {
1424 /* There can be only one.. */
1425 static char warn = 1;
1426 WARN_ON(warn);
1427 warn = 0;
1428 return 0;
1429 }
1430
1431 str = buf;
1432 end = buf + size;
1433
1434#define get_arg(type) \
1435({ \
1436 typeof(type) value; \
1437 if (sizeof(type) == 8) { \
1438 args = PTR_ALIGN(args, sizeof(u32)); \
1439 *(u32 *)&value = *(u32 *)args; \
1440 *((u32 *)&value + 1) = *(u32 *)(args + 4); \
1441 } else { \
1442 args = PTR_ALIGN(args, sizeof(type)); \
1443 value = *(typeof(type) *)args; \
1444 } \
1445 args += sizeof(type); \
1446 value; \
1447})
1448
1449 /* Make sure end is always >= buf */
1450 if (end < buf) {
1451 end = ((void *)-1);
1452 size = end - buf;
1453 }
1454
1455 while (*fmt) {
1456 int read;
1457 const char *old_fmt = fmt;
1458
1459 read = format_decode(fmt, &spec);
1460
1461 fmt += read;
1462
1463 switch (spec.type) {
1464 case FORMAT_TYPE_NONE: {
1465 int copy = read;
1466 if (str < end) {
1467 if (copy > end - str)
1468 copy = end - str;
1469 memcpy(str, old_fmt, copy);
1470 }
1471 str += read;
1472 break;
1473 }
1474
1475 case FORMAT_TYPE_WITDH:
1476 spec.field_width = get_arg(int);
1477 break;
1478
1479 case FORMAT_TYPE_PRECISION:
1480 spec.precision = get_arg(int);
1481 break;
1482
1483 case FORMAT_TYPE_CHAR:
1484 if (!(spec.flags & LEFT)) {
1485 while (--spec.field_width > 0) {
1486 if (str < end)
1487 *str = ' ';
1488 ++str;
1489 }
1490 }
1491 c = (unsigned char) get_arg(char);
1492 if (str < end)
1493 *str = c;
1494 ++str;
1495 while (--spec.field_width > 0) {
1496 if (str < end)
1497 *str = ' ';
1498 ++str;
1499 }
1500 break;
1501
1502 case FORMAT_TYPE_STR: {
1503 const char *str_arg = args;
1504 size_t len = strlen(str_arg);
1505 args += len + 1;
1506 str = string(str, end, (char *)str_arg, spec);
1507 break;
1508 }
1509
1510 case FORMAT_TYPE_PTR:
1511 str = pointer(fmt+1, str, end, get_arg(void *), spec);
1512 while (isalnum(*fmt))
1513 fmt++;
1514 break;
1515
1516 case FORMAT_TYPE_PERCENT_CHAR:
1517 if (str < end)
1518 *str = '%';
1519 ++str;
1520 break;
1521
1522 case FORMAT_TYPE_INVALID:
1523 if (str < end)
1524 *str = '%';
1525 ++str;
1526 if (*fmt) {
1527 if (str < end)
1528 *str = *fmt;
1529 ++str;
1530 } else {
1531 --fmt;
1532 }
1533 break;
1534
1535 case FORMAT_TYPE_NRCHARS:
1536 /* skip */
1537 break;
1538
1539 default:
1540 switch (spec.type) {
1541
1542 case FORMAT_TYPE_LONG_LONG:
1543 num = get_arg(long long);
1544 break;
1545 case FORMAT_TYPE_ULONG:
1546 num = get_arg(unsigned long);
1547 break;
1548 case FORMAT_TYPE_LONG:
1549 num = get_arg(unsigned long);
1550 break;
1551 case FORMAT_TYPE_SIZE_T:
1552 num = get_arg(size_t);
1553 break;
1554 case FORMAT_TYPE_PTRDIFF:
1555 num = get_arg(ptrdiff_t);
1556 break;
1557 case FORMAT_TYPE_USHORT:
1558 num = get_arg(unsigned short);
1559 break;
1560 case FORMAT_TYPE_SHORT:
1561 num = get_arg(short);
1562 break;
1563 case FORMAT_TYPE_UINT:
1564 num = get_arg(unsigned int);
1565 break;
1566 default:
1567 num = get_arg(int);
1568 }
1569
1570 str = number(str, end, num, spec);
1571 }
1572 }
1573
1574 if (size > 0) {
1575 if (str < end)
1576 *str = '\0';
1577 else
1578 end[-1] = '\0';
1579 }
1580
1581#undef get_arg
1582
1583 /* the trailing null byte doesn't count towards the total */
1584 return str - buf;
1585}
1586EXPORT_SYMBOL_GPL(bstr_printf);
1587
1588/**
1589 * bprintf - Parse a format string and place args' binary value in a buffer
1590 * @bin_buf: The buffer to place args' binary value
1591 * @size: The size of the buffer(by words(32bits), not characters)
1592 * @fmt: The format string to use
1593 * @...: Arguments for the format string
1594 *
1595 * The function returns the number of words(u32) written
1596 * into @bin_buf.
1597 */
1598int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
1599{
1600 va_list args;
1601 int ret;
1602
1603 va_start(args, fmt);
1604 ret = vbin_printf(bin_buf, size, fmt, args);
1605 va_end(args);
1606 return ret;
1607}
1608EXPORT_SYMBOL_GPL(bprintf);
1609
1610#endif /* CONFIG_BINARY_PRINTF */
1611
1061/** 1612/**
1062 * vsscanf - Unformat a buffer into a list of arguments 1613 * vsscanf - Unformat a buffer into a list of arguments
1063 * @buf: input buffer 1614 * @buf: input buffer
diff --git a/mm/slab.c b/mm/slab.c
index 825c606f691d..9ec66c3e6ee0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -102,6 +102,7 @@
102#include <linux/cpu.h> 102#include <linux/cpu.h>
103#include <linux/sysctl.h> 103#include <linux/sysctl.h>
104#include <linux/module.h> 104#include <linux/module.h>
105#include <trace/kmemtrace.h>
105#include <linux/rcupdate.h> 106#include <linux/rcupdate.h>
106#include <linux/string.h> 107#include <linux/string.h>
107#include <linux/uaccess.h> 108#include <linux/uaccess.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
568 569
569#endif 570#endif
570 571
572#ifdef CONFIG_KMEMTRACE
573size_t slab_buffer_size(struct kmem_cache *cachep)
574{
575 return cachep->buffer_size;
576}
577EXPORT_SYMBOL(slab_buffer_size);
578#endif
579
571/* 580/*
572 * Do not go above this order unless 0 objects fit into the slab. 581 * Do not go above this order unless 0 objects fit into the slab.
573 */ 582 */
@@ -3554,10 +3563,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3554 */ 3563 */
3555void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3564void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3556{ 3565{
3557 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3567
3568 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3569 obj_size(cachep), cachep->buffer_size, flags);
3570
3571 return ret;
3558} 3572}
3559EXPORT_SYMBOL(kmem_cache_alloc); 3573EXPORT_SYMBOL(kmem_cache_alloc);
3560 3574
3575#ifdef CONFIG_KMEMTRACE
3576void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3577{
3578 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3579}
3580EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3581#endif
3582
3561/** 3583/**
3562 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. 3584 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3563 * @cachep: the cache we're checking against 3585 * @cachep: the cache we're checking against
@@ -3602,23 +3624,47 @@ out:
3602#ifdef CONFIG_NUMA 3624#ifdef CONFIG_NUMA
3603void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3625void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3604{ 3626{
3605 return __cache_alloc_node(cachep, flags, nodeid, 3627 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3606 __builtin_return_address(0)); 3628 __builtin_return_address(0));
3629
3630 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3631 obj_size(cachep), cachep->buffer_size,
3632 flags, nodeid);
3633
3634 return ret;
3607} 3635}
3608EXPORT_SYMBOL(kmem_cache_alloc_node); 3636EXPORT_SYMBOL(kmem_cache_alloc_node);
3609 3637
3638#ifdef CONFIG_KMEMTRACE
3639void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3640 gfp_t flags,
3641 int nodeid)
3642{
3643 return __cache_alloc_node(cachep, flags, nodeid,
3644 __builtin_return_address(0));
3645}
3646EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
3647#endif
3648
3610static __always_inline void * 3649static __always_inline void *
3611__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3650__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3612{ 3651{
3613 struct kmem_cache *cachep; 3652 struct kmem_cache *cachep;
3653 void *ret;
3614 3654
3615 cachep = kmem_find_general_cachep(size, flags); 3655 cachep = kmem_find_general_cachep(size, flags);
3616 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3656 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3617 return cachep; 3657 return cachep;
3618 return kmem_cache_alloc_node(cachep, flags, node); 3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3659
3660 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
3661 (unsigned long) caller, ret,
3662 size, cachep->buffer_size, flags, node);
3663
3664 return ret;
3619} 3665}
3620 3666
3621#ifdef CONFIG_DEBUG_SLAB 3667#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3622void *__kmalloc_node(size_t size, gfp_t flags, int node) 3668void *__kmalloc_node(size_t size, gfp_t flags, int node)
3623{ 3669{
3624 return __do_kmalloc_node(size, flags, node, 3670 return __do_kmalloc_node(size, flags, node,
@@ -3651,6 +3697,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3651 void *caller) 3697 void *caller)
3652{ 3698{
3653 struct kmem_cache *cachep; 3699 struct kmem_cache *cachep;
3700 void *ret;
3654 3701
3655 /* If you want to save a few bytes .text space: replace 3702 /* If you want to save a few bytes .text space: replace
3656 * __ with kmem_. 3703 * __ with kmem_.
@@ -3660,11 +3707,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3660 cachep = __find_general_cachep(size, flags); 3707 cachep = __find_general_cachep(size, flags);
3661 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3708 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3662 return cachep; 3709 return cachep;
3663 return __cache_alloc(cachep, flags, caller); 3710 ret = __cache_alloc(cachep, flags, caller);
3711
3712 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
3713 (unsigned long) caller, ret,
3714 size, cachep->buffer_size, flags);
3715
3716 return ret;
3664} 3717}
3665 3718
3666 3719
3667#ifdef CONFIG_DEBUG_SLAB 3720#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3668void *__kmalloc(size_t size, gfp_t flags) 3721void *__kmalloc(size_t size, gfp_t flags)
3669{ 3722{
3670 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3723 return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3703,6 +3756,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3703 debug_check_no_obj_freed(objp, obj_size(cachep)); 3756 debug_check_no_obj_freed(objp, obj_size(cachep));
3704 __cache_free(cachep, objp); 3757 __cache_free(cachep, objp);
3705 local_irq_restore(flags); 3758 local_irq_restore(flags);
3759
3760 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
3706} 3761}
3707EXPORT_SYMBOL(kmem_cache_free); 3762EXPORT_SYMBOL(kmem_cache_free);
3708 3763
@@ -3729,6 +3784,8 @@ void kfree(const void *objp)
3729 debug_check_no_obj_freed(objp, obj_size(c)); 3784 debug_check_no_obj_freed(objp, obj_size(c));
3730 __cache_free(c, (void *)objp); 3785 __cache_free(c, (void *)objp);
3731 local_irq_restore(flags); 3786 local_irq_restore(flags);
3787
3788 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
3732} 3789}
3733EXPORT_SYMBOL(kfree); 3790EXPORT_SYMBOL(kfree);
3734 3791
diff --git a/mm/slob.c b/mm/slob.c
index 7a3411524dac..4dd6516447f2 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/rcupdate.h> 66#include <linux/rcupdate.h>
67#include <linux/list.h> 67#include <linux/list.h>
68#include <trace/kmemtrace.h>
68#include <asm/atomic.h> 69#include <asm/atomic.h>
69 70
70/* 71/*
@@ -474,6 +475,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
474{ 475{
475 unsigned int *m; 476 unsigned int *m;
476 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 477 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
478 void *ret;
477 479
478 lockdep_trace_alloc(gfp); 480 lockdep_trace_alloc(gfp);
479 481
@@ -482,12 +484,17 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
482 return ZERO_SIZE_PTR; 484 return ZERO_SIZE_PTR;
483 485
484 m = slob_alloc(size + align, gfp, align, node); 486 m = slob_alloc(size + align, gfp, align, node);
487
485 if (!m) 488 if (!m)
486 return NULL; 489 return NULL;
487 *m = size; 490 *m = size;
488 return (void *)m + align; 491 ret = (void *)m + align;
492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
494 _RET_IP_, ret,
495 size, size + align, gfp, node);
489 } else { 496 } else {
490 void *ret; 497 unsigned int order = get_order(size);
491 498
492 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); 499 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
493 if (ret) { 500 if (ret) {
@@ -495,8 +502,13 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
495 page = virt_to_page(ret); 502 page = virt_to_page(ret);
496 page->private = size; 503 page->private = size;
497 } 504 }
498 return ret; 505
506 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
507 _RET_IP_, ret,
508 size, PAGE_SIZE << order, gfp, node);
499 } 509 }
510
511 return ret;
500} 512}
501EXPORT_SYMBOL(__kmalloc_node); 513EXPORT_SYMBOL(__kmalloc_node);
502 514
@@ -514,6 +526,8 @@ void kfree(const void *block)
514 slob_free(m, *m + align); 526 slob_free(m, *m + align);
515 } else 527 } else
516 put_page(&sp->page); 528 put_page(&sp->page);
529
530 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
517} 531}
518EXPORT_SYMBOL(kfree); 532EXPORT_SYMBOL(kfree);
519 533
@@ -583,10 +597,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
583{ 597{
584 void *b; 598 void *b;
585 599
586 if (c->size < PAGE_SIZE) 600 if (c->size < PAGE_SIZE) {
587 b = slob_alloc(c->size, flags, c->align, node); 601 b = slob_alloc(c->size, flags, c->align, node);
588 else 602 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
603 _RET_IP_, b, c->size,
604 SLOB_UNITS(c->size) * SLOB_UNIT,
605 flags, node);
606 } else {
589 b = slob_new_pages(flags, get_order(c->size), node); 607 b = slob_new_pages(flags, get_order(c->size), node);
608 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
609 _RET_IP_, b, c->size,
610 PAGE_SIZE << get_order(c->size),
611 flags, node);
612 }
590 613
591 if (c->ctor) 614 if (c->ctor)
592 c->ctor(b); 615 c->ctor(b);
@@ -622,6 +645,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
622 } else { 645 } else {
623 __kmem_cache_free(b, c->size); 646 __kmem_cache_free(b, c->size);
624 } 647 }
648
649 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
625} 650}
626EXPORT_SYMBOL(kmem_cache_free); 651EXPORT_SYMBOL(kmem_cache_free);
627 652
diff --git a/mm/slub.c b/mm/slub.c
index c4ea9158c9fb..7aaa121d0ea9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <trace/kmemtrace.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/cpuset.h> 21#include <linux/cpuset.h>
21#include <linux/mempolicy.h> 22#include <linux/mempolicy.h>
@@ -1618,18 +1619,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1618 1619
1619void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1620void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1620{ 1621{
1621 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1623
1624 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1625 s->objsize, s->size, gfpflags);
1626
1627 return ret;
1622} 1628}
1623EXPORT_SYMBOL(kmem_cache_alloc); 1629EXPORT_SYMBOL(kmem_cache_alloc);
1624 1630
1631#ifdef CONFIG_KMEMTRACE
1632void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1633{
1634 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1635}
1636EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1637#endif
1638
1625#ifdef CONFIG_NUMA 1639#ifdef CONFIG_NUMA
1626void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1640void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1627{ 1641{
1628 return slab_alloc(s, gfpflags, node, _RET_IP_); 1642 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1643
1644 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1645 s->objsize, s->size, gfpflags, node);
1646
1647 return ret;
1629} 1648}
1630EXPORT_SYMBOL(kmem_cache_alloc_node); 1649EXPORT_SYMBOL(kmem_cache_alloc_node);
1631#endif 1650#endif
1632 1651
1652#ifdef CONFIG_KMEMTRACE
1653void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1654 gfp_t gfpflags,
1655 int node)
1656{
1657 return slab_alloc(s, gfpflags, node, _RET_IP_);
1658}
1659EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1660#endif
1661
1633/* 1662/*
1634 * Slow patch handling. This may still be called frequently since objects 1663 * Slow patch handling. This may still be called frequently since objects
1635 * have a longer lifetime than the cpu slabs in most processing loads. 1664 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1737,6 +1766,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1737 page = virt_to_head_page(x); 1766 page = virt_to_head_page(x);
1738 1767
1739 slab_free(s, page, x, _RET_IP_); 1768 slab_free(s, page, x, _RET_IP_);
1769
1770 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
1740} 1771}
1741EXPORT_SYMBOL(kmem_cache_free); 1772EXPORT_SYMBOL(kmem_cache_free);
1742 1773
@@ -2659,6 +2690,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2659void *__kmalloc(size_t size, gfp_t flags) 2690void *__kmalloc(size_t size, gfp_t flags)
2660{ 2691{
2661 struct kmem_cache *s; 2692 struct kmem_cache *s;
2693 void *ret;
2662 2694
2663 if (unlikely(size > SLUB_MAX_SIZE)) 2695 if (unlikely(size > SLUB_MAX_SIZE))
2664 return kmalloc_large(size, flags); 2696 return kmalloc_large(size, flags);
@@ -2668,7 +2700,12 @@ void *__kmalloc(size_t size, gfp_t flags)
2668 if (unlikely(ZERO_OR_NULL_PTR(s))) 2700 if (unlikely(ZERO_OR_NULL_PTR(s)))
2669 return s; 2701 return s;
2670 2702
2671 return slab_alloc(s, flags, -1, _RET_IP_); 2703 ret = slab_alloc(s, flags, -1, _RET_IP_);
2704
2705 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2706 size, s->size, flags);
2707
2708 return ret;
2672} 2709}
2673EXPORT_SYMBOL(__kmalloc); 2710EXPORT_SYMBOL(__kmalloc);
2674 2711
@@ -2687,16 +2724,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2687void *__kmalloc_node(size_t size, gfp_t flags, int node) 2724void *__kmalloc_node(size_t size, gfp_t flags, int node)
2688{ 2725{
2689 struct kmem_cache *s; 2726 struct kmem_cache *s;
2727 void *ret;
2690 2728
2691 if (unlikely(size > SLUB_MAX_SIZE)) 2729 if (unlikely(size > SLUB_MAX_SIZE)) {
2692 return kmalloc_large_node(size, flags, node); 2730 ret = kmalloc_large_node(size, flags, node);
2731
2732 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
2733 _RET_IP_, ret,
2734 size, PAGE_SIZE << get_order(size),
2735 flags, node);
2736
2737 return ret;
2738 }
2693 2739
2694 s = get_slab(size, flags); 2740 s = get_slab(size, flags);
2695 2741
2696 if (unlikely(ZERO_OR_NULL_PTR(s))) 2742 if (unlikely(ZERO_OR_NULL_PTR(s)))
2697 return s; 2743 return s;
2698 2744
2699 return slab_alloc(s, flags, node, _RET_IP_); 2745 ret = slab_alloc(s, flags, node, _RET_IP_);
2746
2747 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2748 size, s->size, flags, node);
2749
2750 return ret;
2700} 2751}
2701EXPORT_SYMBOL(__kmalloc_node); 2752EXPORT_SYMBOL(__kmalloc_node);
2702#endif 2753#endif
@@ -2755,6 +2806,8 @@ void kfree(const void *x)
2755 return; 2806 return;
2756 } 2807 }
2757 slab_free(page->slab, page, object, _RET_IP_); 2808 slab_free(page->slab, page, object, _RET_IP_);
2809
2810 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2758} 2811}
2759EXPORT_SYMBOL(kfree); 2812EXPORT_SYMBOL(kfree);
2760 2813
@@ -3224,6 +3277,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3224void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3277void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3225{ 3278{
3226 struct kmem_cache *s; 3279 struct kmem_cache *s;
3280 void *ret;
3227 3281
3228 if (unlikely(size > SLUB_MAX_SIZE)) 3282 if (unlikely(size > SLUB_MAX_SIZE))
3229 return kmalloc_large(size, gfpflags); 3283 return kmalloc_large(size, gfpflags);
@@ -3233,13 +3287,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3233 if (unlikely(ZERO_OR_NULL_PTR(s))) 3287 if (unlikely(ZERO_OR_NULL_PTR(s)))
3234 return s; 3288 return s;
3235 3289
3236 return slab_alloc(s, gfpflags, -1, caller); 3290 ret = slab_alloc(s, gfpflags, -1, caller);
3291
3292 /* Honor the call site pointer we recieved. */
3293 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
3294 s->size, gfpflags);
3295
3296 return ret;
3237} 3297}
3238 3298
3239void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3299void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3240 int node, unsigned long caller) 3300 int node, unsigned long caller)
3241{ 3301{
3242 struct kmem_cache *s; 3302 struct kmem_cache *s;
3303 void *ret;
3243 3304
3244 if (unlikely(size > SLUB_MAX_SIZE)) 3305 if (unlikely(size > SLUB_MAX_SIZE))
3245 return kmalloc_large_node(size, gfpflags, node); 3306 return kmalloc_large_node(size, gfpflags, node);
@@ -3249,7 +3310,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3249 if (unlikely(ZERO_OR_NULL_PTR(s))) 3310 if (unlikely(ZERO_OR_NULL_PTR(s)))
3250 return s; 3311 return s;
3251 3312
3252 return slab_alloc(s, gfpflags, node, caller); 3313 ret = slab_alloc(s, gfpflags, node, caller);
3314
3315 /* Honor the call site pointer we recieved. */
3316 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
3317 size, s->size, gfpflags, node);
3318
3319 return ret;
3253} 3320}
3254 3321
3255#ifdef CONFIG_SLUB_DEBUG 3322#ifdef CONFIG_SLUB_DEBUG
diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h
index 01724e04c556..dffdc49878af 100644
--- a/samples/tracepoints/tp-samples-trace.h
+++ b/samples/tracepoints/tp-samples-trace.h
@@ -5,9 +5,9 @@
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(subsys_event, 7DECLARE_TRACE(subsys_event,
8 TPPROTO(struct inode *inode, struct file *file), 8 TP_PROTO(struct inode *inode, struct file *file),
9 TPARGS(inode, file)); 9 TP_ARGS(inode, file));
10DECLARE_TRACE(subsys_eventb, 10DECLARE_TRACE(subsys_eventb,
11 TPPROTO(void), 11 TP_PROTO(void),
12 TPARGS()); 12 TP_ARGS());
13#endif 13#endif
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c
index 68d5dc0310e4..9cf80a11e8b6 100644
--- a/samples/tracepoints/tracepoint-sample.c
+++ b/samples/tracepoints/tracepoint-sample.c
@@ -1,6 +1,6 @@
1/* tracepoint-sample.c 1/* tracepoint-sample.c
2 * 2 *
3 * Executes a tracepoint when /proc/tracepoint-example is opened. 3 * Executes a tracepoint when /proc/tracepoint-sample is opened.
4 * 4 *
5 * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> 5 * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
6 * 6 *
@@ -16,7 +16,7 @@
16DEFINE_TRACE(subsys_event); 16DEFINE_TRACE(subsys_event);
17DEFINE_TRACE(subsys_eventb); 17DEFINE_TRACE(subsys_eventb);
18 18
19struct proc_dir_entry *pentry_example; 19struct proc_dir_entry *pentry_sample;
20 20
21static int my_open(struct inode *inode, struct file *file) 21static int my_open(struct inode *inode, struct file *file)
22{ 22{
@@ -32,25 +32,25 @@ static struct file_operations mark_ops = {
32 .open = my_open, 32 .open = my_open,
33}; 33};
34 34
35static int __init example_init(void) 35static int __init sample_init(void)
36{ 36{
37 printk(KERN_ALERT "example init\n"); 37 printk(KERN_ALERT "sample init\n");
38 pentry_example = proc_create("tracepoint-example", 0444, NULL, 38 pentry_sample = proc_create("tracepoint-sample", 0444, NULL,
39 &mark_ops); 39 &mark_ops);
40 if (!pentry_example) 40 if (!pentry_sample)
41 return -EPERM; 41 return -EPERM;
42 return 0; 42 return 0;
43} 43}
44 44
45static void __exit example_exit(void) 45static void __exit sample_exit(void)
46{ 46{
47 printk(KERN_ALERT "example exit\n"); 47 printk(KERN_ALERT "sample exit\n");
48 remove_proc_entry("tracepoint-example", NULL); 48 remove_proc_entry("tracepoint-sample", NULL);
49} 49}
50 50
51module_init(example_init) 51module_init(sample_init)
52module_exit(example_exit) 52module_exit(sample_exit)
53 53
54MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
55MODULE_AUTHOR("Mathieu Desnoyers"); 55MODULE_AUTHOR("Mathieu Desnoyers");
56MODULE_DESCRIPTION("Tracepoint example"); 56MODULE_DESCRIPTION("Tracepoint sample");
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index c7de8b39fcf1..39a9642927d3 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -112,13 +112,13 @@ endif
112# --------------------------------------------------------------------------- 112# ---------------------------------------------------------------------------
113 113
114# Default is built-in, unless we know otherwise 114# Default is built-in, unless we know otherwise
115modkern_cflags := $(CFLAGS_KERNEL) 115modkern_cflags = $(if $(part-of-module), $(CFLAGS_MODULE), $(CFLAGS_KERNEL))
116quiet_modtag := $(empty) $(empty) 116quiet_modtag := $(empty) $(empty)
117 117
118$(real-objs-m) : modkern_cflags := $(CFLAGS_MODULE) 118$(real-objs-m) : part-of-module := y
119$(real-objs-m:.o=.i) : modkern_cflags := $(CFLAGS_MODULE) 119$(real-objs-m:.o=.i) : part-of-module := y
120$(real-objs-m:.o=.s) : modkern_cflags := $(CFLAGS_MODULE) 120$(real-objs-m:.o=.s) : part-of-module := y
121$(real-objs-m:.o=.lst): modkern_cflags := $(CFLAGS_MODULE) 121$(real-objs-m:.o=.lst): part-of-module := y
122 122
123$(real-objs-m) : quiet_modtag := [M] 123$(real-objs-m) : quiet_modtag := [M]
124$(real-objs-m:.o=.i) : quiet_modtag := [M] 124$(real-objs-m:.o=.i) : quiet_modtag := [M]
@@ -205,7 +205,8 @@ endif
205ifdef CONFIG_FTRACE_MCOUNT_RECORD 205ifdef CONFIG_FTRACE_MCOUNT_RECORD
206cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ 206cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
207 "$(if $(CONFIG_64BIT),64,32)" \ 207 "$(if $(CONFIG_64BIT),64,32)" \
208 "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" "$(@)"; 208 "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
209 "$(if $(part-of-module),1,0)" "$(@)";
209endif 210endif
210 211
211define rule_cc_o_c 212define rule_cc_o_c
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index ad2434b26970..6654cbed965b 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -500,6 +500,51 @@ static void optimize_token_table(void)
500 optimize_result(); 500 optimize_result();
501} 501}
502 502
503/* guess for "linker script provide" symbol */
504static int may_be_linker_script_provide_symbol(const struct sym_entry *se)
505{
506 const char *symbol = (char *)se->sym + 1;
507 int len = se->len - 1;
508
509 if (len < 8)
510 return 0;
511
512 if (symbol[0] != '_' || symbol[1] != '_')
513 return 0;
514
515 /* __start_XXXXX */
516 if (!memcmp(symbol + 2, "start_", 6))
517 return 1;
518
519 /* __stop_XXXXX */
520 if (!memcmp(symbol + 2, "stop_", 5))
521 return 1;
522
523 /* __end_XXXXX */
524 if (!memcmp(symbol + 2, "end_", 4))
525 return 1;
526
527 /* __XXXXX_start */
528 if (!memcmp(symbol + len - 6, "_start", 6))
529 return 1;
530
531 /* __XXXXX_end */
532 if (!memcmp(symbol + len - 4, "_end", 4))
533 return 1;
534
535 return 0;
536}
537
538static int prefix_underscores_count(const char *str)
539{
540 const char *tail = str;
541
542 while (*tail != '_')
543 tail++;
544
545 return tail - str;
546}
547
503static int compare_symbols(const void *a, const void *b) 548static int compare_symbols(const void *a, const void *b)
504{ 549{
505 const struct sym_entry *sa; 550 const struct sym_entry *sa;
@@ -521,6 +566,18 @@ static int compare_symbols(const void *a, const void *b)
521 if (wa != wb) 566 if (wa != wb)
522 return wa - wb; 567 return wa - wb;
523 568
569 /* sort by "linker script provide" type */
570 wa = may_be_linker_script_provide_symbol(sa);
571 wb = may_be_linker_script_provide_symbol(sb);
572 if (wa != wb)
573 return wa - wb;
574
575 /* sort by the number of prefix underscores */
576 wa = prefix_underscores_count((const char *)sa->sym + 1);
577 wb = prefix_underscores_count((const char *)sb->sym + 1);
578 if (wa != wb)
579 return wa - wb;
580
524 /* sort by initial order, so that other symbols are left undisturbed */ 581 /* sort by initial order, so that other symbols are left undisturbed */
525 return sa->start_pos - sb->start_pos; 582 return sa->start_pos - sb->start_pos;
526} 583}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index fe831412bea9..409596eca124 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -100,14 +100,19 @@ $P =~ s@.*/@@g;
100 100
101my $V = '0.1'; 101my $V = '0.1';
102 102
103if ($#ARGV < 6) { 103if ($#ARGV < 7) {
104 print "usage: $P arch objdump objcopy cc ld nm rm mv inputfile\n"; 104 print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
105 print "version: $V\n"; 105 print "version: $V\n";
106 exit(1); 106 exit(1);
107} 107}
108 108
109my ($arch, $bits, $objdump, $objcopy, $cc, 109my ($arch, $bits, $objdump, $objcopy, $cc,
110 $ld, $nm, $rm, $mv, $inputfile) = @ARGV; 110 $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
111
112# This file refers to mcount and shouldn't be ftraced, so lets' ignore it
113if ($inputfile eq "kernel/trace/ftrace.o") {
114 exit(0);
115}
111 116
112# Acceptable sections to record. 117# Acceptable sections to record.
113my %text_sections = ( 118my %text_sections = (
@@ -201,6 +206,13 @@ if ($arch eq "x86_64") {
201 $alignment = 2; 206 $alignment = 2;
202 $section_type = '%progbits'; 207 $section_type = '%progbits';
203 208
209} elsif ($arch eq "ia64") {
210 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
211 $type = "data8";
212
213 if ($is_module eq "0") {
214 $cc .= " -mconstant-gp";
215 }
204} else { 216} else {
205 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; 217 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
206} 218}
@@ -263,7 +275,6 @@ if (!$found_version) {
263 "\tDisabling local function references.\n"; 275 "\tDisabling local function references.\n";
264} 276}
265 277
266
267# 278#
268# Step 1: find all the local (static functions) and weak symbols. 279# Step 1: find all the local (static functions) and weak symbols.
269# 't' is local, 'w/W' is weak (we never use a weak function) 280# 't' is local, 'w/W' is weak (we never use a weak function)
@@ -331,13 +342,16 @@ sub update_funcs
331# 342#
332# Step 2: find the sections and mcount call sites 343# Step 2: find the sections and mcount call sites
333# 344#
334open(IN, "$objdump -dr $inputfile|") || die "error running $objdump"; 345open(IN, "$objdump -hdr $inputfile|") || die "error running $objdump";
335 346
336my $text; 347my $text;
337 348
349my $read_headers = 1;
350
338while (<IN>) { 351while (<IN>) {
339 # is it a section? 352 # is it a section?
340 if (/$section_regex/) { 353 if (/$section_regex/) {
354 $read_headers = 0;
341 355
342 # Only record text sections that we know are safe 356 # Only record text sections that we know are safe
343 if (defined($text_sections{$1})) { 357 if (defined($text_sections{$1})) {
@@ -371,6 +385,19 @@ while (<IN>) {
371 $ref_func = $text; 385 $ref_func = $text;
372 } 386 }
373 } 387 }
388 } elsif ($read_headers && /$mcount_section/) {
389 #
390 # Somehow the make process can execute this script on an
391 # object twice. If it does, we would duplicate the mcount
392 # section and it will cause the function tracer self test
393 # to fail. Check if the mcount section exists, and if it does,
394 # warn and exit.
395 #
396 print STDERR "ERROR: $mcount_section already in $inputfile\n" .
397 "\tThis may be an indication that your build is corrupted.\n" .
398 "\tDelete $inputfile and try again. If the same object file\n" .
399 "\tstill causes an issue, then disable CONFIG_DYNAMIC_FTRACE.\n";
400 exit(-1);
374 } 401 }
375 402
376 # is this a call site to mcount? If so, record it to print later 403 # is this a call site to mcount? If so, record it to print later