aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 16:35:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 16:35:07 -0400
commit92b29b86fe2e183d44eb467e5e74a5f718ef2e43 (patch)
tree1bac8a1aa11d47322b66d10ec3a370016d843d06
parentb9d7ccf56be1ac77b71a284a1c0e6337f9a7aff0 (diff)
parent98d9c66ab07471006fd7910cb16453581c41a3e7 (diff)
Merge branch 'tracing-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (131 commits) tracing/fastboot: improve help text tracing/stacktrace: improve help text tracing/fastboot: fix initcalls disposition in bootgraph.pl tracing/fastboot: fix bootgraph.pl initcall name regexp tracing/fastboot: fix issues and improve output of bootgraph.pl tracepoints: synchronize unregister static inline tracepoints: tracepoint_synchronize_unregister() ftrace: make ftrace_test_p6nop disassembler-friendly markers: fix synchronize marker unregister static inline tracing/fastboot: add better resolution to initcall debug/tracing trace: add build-time check to avoid overrunning hex buffer ftrace: fix hex output mode of ftrace tracing/fastboot: fix initcalls disposition in bootgraph.pl tracing/fastboot: fix printk format typo in boot tracer ftrace: return an error when setting a nonexistent tracer ftrace: make some tracers reentrant ring-buffer: make reentrant ring-buffer: move page indexes into page headers tracing/fastboot: only trace non-module initcalls ftrace: move pc counter in irqtrace ... Manually fix conflicts: - init/main.c: initcall tracing - kernel/module.c: verbose level vs tracepoints - scripts/bootgraph.pl: fallout from cherry-picking commits.
-rw-r--r--Documentation/markers.txt10
-rw-r--r--Documentation/tracepoints.txt101
-rw-r--r--Documentation/tracers/mmiotrace.txt5
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c11
-rw-r--r--arch/x86/kernel/entry_32.S14
-rw-r--r--arch/x86/kernel/entry_64.S26
-rw-r--r--arch/x86/kernel/ftrace.c124
-rw-r--r--arch/x86/mm/mmio-mod.c87
-rw-r--r--arch/x86/mm/pf_in.c121
-rw-r--r--arch/x86/mm/testmmiotrace.c4
-rw-r--r--include/asm-generic/vmlinux.lds.h14
-rw-r--r--include/asm-x86/ftrace.h10
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/ftrace.h84
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/kprobes.h5
-rw-r--r--include/linux/linkage.h2
-rw-r--r--include/linux/marker.h7
-rw-r--r--include/linux/mmiotrace.h20
-rw-r--r--include/linux/module.h17
-rw-r--r--include/linux/ring_buffer.h127
-rw-r--r--include/linux/tracepoint.h137
-rw-r--r--include/trace/sched.h56
-rw-r--r--init/Kconfig7
-rw-r--r--init/main.c34
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/exit.c10
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/kthread.c5
-rw-r--r--kernel/marker.c36
-rw-r--r--kernel/module.c81
-rw-r--r--kernel/notifier.c2
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/trace/Kconfig64
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c275
-rw-r--r--kernel/trace/ring_buffer.c2014
-rw-r--r--kernel/trace/trace.c1845
-rw-r--r--kernel/trace/trace.h211
-rw-r--r--kernel/trace/trace_boot.c126
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_irqsoff.c19
-rw-r--r--kernel/trace/trace_mmiotrace.c116
-rw-r--r--kernel/trace/trace_nop.c64
-rw-r--r--kernel/trace/trace_sched_switch.c137
-rw-r--r--kernel/trace/trace_sched_wakeup.c148
-rw-r--r--kernel/trace/trace_selftest.c83
-rw-r--r--kernel/trace/trace_stack.c310
-rw-r--r--kernel/trace/trace_sysprof.c2
-rw-r--r--kernel/tracepoint.c477
-rw-r--r--samples/Kconfig6
-rw-r--r--samples/Makefile2
-rw-r--r--samples/markers/probe-example.c1
-rw-r--r--samples/tracepoints/Makefile6
-rw-r--r--samples/tracepoints/tp-samples-trace.h13
-rw-r--r--samples/tracepoints/tracepoint-probe-sample.c55
-rw-r--r--samples/tracepoints/tracepoint-probe-sample2.c42
-rw-r--r--samples/tracepoints/tracepoint-sample.c53
-rw-r--r--scripts/Makefile.build7
-rw-r--r--scripts/bootgraph.pl24
-rwxr-xr-xscripts/recordmcount.pl395
65 files changed, 6108 insertions, 1585 deletions
diff --git a/Documentation/markers.txt b/Documentation/markers.txt
index d9f50a19fa0c..089f6138fcd9 100644
--- a/Documentation/markers.txt
+++ b/Documentation/markers.txt
@@ -50,10 +50,12 @@ Connecting a function (probe) to a marker is done by providing a probe (function
50to call) for the specific marker through marker_probe_register() and can be 50to call) for the specific marker through marker_probe_register() and can be
51activated by calling marker_arm(). Marker deactivation can be done by calling 51activated by calling marker_arm(). Marker deactivation can be done by calling
52marker_disarm() as many times as marker_arm() has been called. Removing a probe 52marker_disarm() as many times as marker_arm() has been called. Removing a probe
53is done through marker_probe_unregister(); it will disarm the probe and make 53is done through marker_probe_unregister(); it will disarm the probe.
54sure there is no caller left using the probe when it returns. Probe removal is 54marker_synchronize_unregister() must be called before the end of the module exit
55preempt-safe because preemption is disabled around the probe call. See the 55function to make sure there is no caller left using the probe. This, and the
56"Probe example" section below for a sample probe module. 56fact that preemption is disabled around the probe call, make sure that probe
57removal and module unload are safe. See the "Probe example" section below for a
58sample probe module.
57 59
58The marker mechanism supports inserting multiple instances of the same marker. 60The marker mechanism supports inserting multiple instances of the same marker.
59Markers can be put in inline functions, inlined static functions, and 61Markers can be put in inline functions, inlined static functions, and
diff --git a/Documentation/tracepoints.txt b/Documentation/tracepoints.txt
new file mode 100644
index 000000000000..5d354e167494
--- /dev/null
+++ b/Documentation/tracepoints.txt
@@ -0,0 +1,101 @@
1 Using the Linux Kernel Tracepoints
2
3 Mathieu Desnoyers
4
5
6This document introduces Linux Kernel Tracepoints and their use. It provides
7examples of how to insert tracepoints in the kernel and connect probe functions
8to them and provides some examples of probe functions.
9
10
11* Purpose of tracepoints
12
13A tracepoint placed in code provides a hook to call a function (probe) that you
14can provide at runtime. A tracepoint can be "on" (a probe is connected to it) or
15"off" (no probe is attached). When a tracepoint is "off" it has no effect,
16except for adding a tiny time penalty (checking a condition for a branch) and
17space penalty (adding a few bytes for the function call at the end of the
18instrumented function and adds a data structure in a separate section). When a
19tracepoint is "on", the function you provide is called each time the tracepoint
20is executed, in the execution context of the caller. When the function provided
21ends its execution, it returns to the caller (continuing from the tracepoint
22site).
23
24You can put tracepoints at important locations in the code. They are
25lightweight hooks that can pass an arbitrary number of parameters,
26which prototypes are described in a tracepoint declaration placed in a header
27file.
28
29They can be used for tracing and performance accounting.
30
31
32* Usage
33
34Two elements are required for tracepoints :
35
36- A tracepoint definition, placed in a header file.
37- The tracepoint statement, in C code.
38
39In order to use tracepoints, you should include linux/tracepoint.h.
40
41In include/trace/subsys.h :
42
43#include <linux/tracepoint.h>
44
45DEFINE_TRACE(subsys_eventname,
46 TPPTOTO(int firstarg, struct task_struct *p),
47 TPARGS(firstarg, p));
48
49In subsys/file.c (where the tracing statement must be added) :
50
51#include <trace/subsys.h>
52
53void somefct(void)
54{
55 ...
56 trace_subsys_eventname(arg, task);
57 ...
58}
59
60Where :
61- subsys_eventname is an identifier unique to your event
62 - subsys is the name of your subsystem.
63 - eventname is the name of the event to trace.
64- TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the function
65 called by this tracepoint.
66- TPARGS(firstarg, p) are the parameters names, same as found in the prototype.
67
68Connecting a function (probe) to a tracepoint is done by providing a probe
69(function to call) for the specific tracepoint through
70register_trace_subsys_eventname(). Removing a probe is done through
71unregister_trace_subsys_eventname(); it will remove the probe sure there is no
72caller left using the probe when it returns. Probe removal is preempt-safe
73because preemption is disabled around the probe call. See the "Probe example"
74section below for a sample probe module.
75
76The tracepoint mechanism supports inserting multiple instances of the same
77tracepoint, but a single definition must be made of a given tracepoint name over
78all the kernel to make sure no type conflict will occur. Name mangling of the
79tracepoints is done using the prototypes to make sure typing is correct.
80Verification of probe type correctness is done at the registration site by the
81compiler. Tracepoints can be put in inline functions, inlined static functions,
82and unrolled loops as well as regular functions.
83
84The naming scheme "subsys_event" is suggested here as a convention intended
85to limit collisions. Tracepoint names are global to the kernel: they are
86considered as being the same whether they are in the core kernel image or in
87modules.
88
89
90* Probe / tracepoint example
91
92See the example provided in samples/tracepoints/src
93
94Compile them with your kernel.
95
96Run, as root :
97modprobe tracepoint-example (insmod order is not important)
98modprobe tracepoint-probe-example
99cat /proc/tracepoint-example (returns an expected error)
100rmmod tracepoint-example tracepoint-probe-example
101dmesg
diff --git a/Documentation/tracers/mmiotrace.txt b/Documentation/tracers/mmiotrace.txt
index a4afb560a45b..5bbbe2096223 100644
--- a/Documentation/tracers/mmiotrace.txt
+++ b/Documentation/tracers/mmiotrace.txt
@@ -36,7 +36,7 @@ $ mount -t debugfs debugfs /debug
36$ echo mmiotrace > /debug/tracing/current_tracer 36$ echo mmiotrace > /debug/tracing/current_tracer
37$ cat /debug/tracing/trace_pipe > mydump.txt & 37$ cat /debug/tracing/trace_pipe > mydump.txt &
38Start X or whatever. 38Start X or whatever.
39$ echo "X is up" > /debug/tracing/marker 39$ echo "X is up" > /debug/tracing/trace_marker
40$ echo none > /debug/tracing/current_tracer 40$ echo none > /debug/tracing/current_tracer
41Check for lost events. 41Check for lost events.
42 42
@@ -59,9 +59,8 @@ The 'cat' process should stay running (sleeping) in the background.
59Load the driver you want to trace and use it. Mmiotrace will only catch MMIO 59Load the driver you want to trace and use it. Mmiotrace will only catch MMIO
60accesses to areas that are ioremapped while mmiotrace is active. 60accesses to areas that are ioremapped while mmiotrace is active.
61 61
62[Unimplemented feature:]
63During tracing you can place comments (markers) into the trace by 62During tracing you can place comments (markers) into the trace by
64$ echo "X is up" > /debug/tracing/marker 63$ echo "X is up" > /debug/tracing/trace_marker
65This makes it easier to see which part of the (huge) trace corresponds to 64This makes it easier to see which part of the (huge) trace corresponds to
66which action. It is recommended to place descriptive markers about what you 65which action. It is recommended to place descriptive markers about what you
67do. 66do.
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 92d20e993ede..2ece399f2862 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -232,6 +232,7 @@ static void __exit sputrace_exit(void)
232 232
233 remove_proc_entry("sputrace", NULL); 233 remove_proc_entry("sputrace", NULL);
234 kfree(sputrace_log); 234 kfree(sputrace_log);
235 marker_synchronize_unregister();
235} 236}
236 237
237module_init(sputrace_init); 238module_init(sputrace_init);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 739668968390..5b9b12321ad1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@ config X86
26 select HAVE_KPROBES 26 select HAVE_KPROBES
27 select ARCH_WANT_OPTIONAL_GPIOLIB 27 select ARCH_WANT_OPTIONAL_GPIOLIB
28 select HAVE_KRETPROBES 28 select HAVE_KRETPROBES
29 select HAVE_FTRACE_MCOUNT_RECORD
29 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
30 select HAVE_FTRACE 31 select HAVE_FTRACE
31 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 6bff382094f5..9abd48b22674 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -17,6 +17,8 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/nmi.h> 19#include <linux/nmi.h>
20#include <linux/kprobes.h>
21
20#include <asm/apic.h> 22#include <asm/apic.h>
21#include <asm/intel_arch_perfmon.h> 23#include <asm/intel_arch_perfmon.h>
22 24
@@ -336,7 +338,8 @@ static void single_msr_unreserve(void)
336 release_perfctr_nmi(wd_ops->perfctr); 338 release_perfctr_nmi(wd_ops->perfctr);
337} 339}
338 340
339static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 341static void __kprobes
342single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
340{ 343{
341 /* start the cycle over again */ 344 /* start the cycle over again */
342 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); 345 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
@@ -401,7 +404,7 @@ static int setup_p6_watchdog(unsigned nmi_hz)
401 return 1; 404 return 1;
402} 405}
403 406
404static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 407static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
405{ 408{
406 /* 409 /*
407 * P6 based Pentium M need to re-unmask 410 * P6 based Pentium M need to re-unmask
@@ -605,7 +608,7 @@ static void p4_unreserve(void)
605 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); 608 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
606} 609}
607 610
608static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 611static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
609{ 612{
610 unsigned dummy; 613 unsigned dummy;
611 /* 614 /*
@@ -784,7 +787,7 @@ unsigned lapic_adjust_nmi_hz(unsigned hz)
784 return hz; 787 return hz;
785} 788}
786 789
787int lapic_wd_event(unsigned nmi_hz) 790int __kprobes lapic_wd_event(unsigned nmi_hz)
788{ 791{
789 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 792 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
790 u64 ctr; 793 u64 ctr;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 4d82171d0f9c..c356423a6026 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1153,20 +1153,6 @@ ENDPROC(xen_failsafe_callback)
1153#ifdef CONFIG_DYNAMIC_FTRACE 1153#ifdef CONFIG_DYNAMIC_FTRACE
1154 1154
1155ENTRY(mcount) 1155ENTRY(mcount)
1156 pushl %eax
1157 pushl %ecx
1158 pushl %edx
1159 movl 0xc(%esp), %eax
1160 subl $MCOUNT_INSN_SIZE, %eax
1161
1162.globl mcount_call
1163mcount_call:
1164 call ftrace_stub
1165
1166 popl %edx
1167 popl %ecx
1168 popl %eax
1169
1170 ret 1156 ret
1171END(mcount) 1157END(mcount)
1172 1158
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1db6ce4314e1..09e7145484c5 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -64,32 +64,6 @@
64#ifdef CONFIG_FTRACE 64#ifdef CONFIG_FTRACE
65#ifdef CONFIG_DYNAMIC_FTRACE 65#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount) 66ENTRY(mcount)
67
68 subq $0x38, %rsp
69 movq %rax, (%rsp)
70 movq %rcx, 8(%rsp)
71 movq %rdx, 16(%rsp)
72 movq %rsi, 24(%rsp)
73 movq %rdi, 32(%rsp)
74 movq %r8, 40(%rsp)
75 movq %r9, 48(%rsp)
76
77 movq 0x38(%rsp), %rdi
78 subq $MCOUNT_INSN_SIZE, %rdi
79
80.globl mcount_call
81mcount_call:
82 call ftrace_stub
83
84 movq 48(%rsp), %r9
85 movq 40(%rsp), %r8
86 movq 32(%rsp), %rdi
87 movq 24(%rsp), %rsi
88 movq 16(%rsp), %rdx
89 movq 8(%rsp), %rcx
90 movq (%rsp), %rax
91 addq $0x38, %rsp
92
93 retq 67 retq
94END(mcount) 68END(mcount)
95 69
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index ab115cd15fdf..d073d981a730 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -11,17 +11,18 @@
11 11
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/hardirq.h> 13#include <linux/hardirq.h>
14#include <linux/uaccess.h>
14#include <linux/ftrace.h> 15#include <linux/ftrace.h>
15#include <linux/percpu.h> 16#include <linux/percpu.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/list.h> 18#include <linux/list.h>
18 19
19#include <asm/alternative.h>
20#include <asm/ftrace.h> 20#include <asm/ftrace.h>
21#include <asm/nops.h>
21 22
22 23
23/* Long is fine, even if it is only 4 bytes ;-) */ 24/* Long is fine, even if it is only 4 bytes ;-) */
24static long *ftrace_nop; 25static unsigned long *ftrace_nop;
25 26
26union ftrace_code_union { 27union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE]; 28 char code[MCOUNT_INSN_SIZE];
@@ -60,11 +61,7 @@ notrace int
60ftrace_modify_code(unsigned long ip, unsigned char *old_code, 61ftrace_modify_code(unsigned long ip, unsigned char *old_code,
61 unsigned char *new_code) 62 unsigned char *new_code)
62{ 63{
63 unsigned replaced; 64 unsigned char replaced[MCOUNT_INSN_SIZE];
64 unsigned old = *(unsigned *)old_code; /* 4 bytes */
65 unsigned new = *(unsigned *)new_code; /* 4 bytes */
66 unsigned char newch = new_code[4];
67 int faulted = 0;
68 65
69 /* 66 /*
70 * Note: Due to modules and __init, code can 67 * Note: Due to modules and __init, code can
@@ -72,29 +69,20 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
72 * as well as code changing. 69 * as well as code changing.
73 * 70 *
74 * No real locking needed, this code is run through 71 * No real locking needed, this code is run through
75 * kstop_machine. 72 * kstop_machine, or before SMP starts.
76 */ 73 */
77 asm volatile ( 74 if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE))
78 "1: lock\n" 75 return 1;
79 " cmpxchg %3, (%2)\n" 76
80 " jnz 2f\n" 77 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
81 " movb %b4, 4(%2)\n" 78 return 2;
82 "2:\n"
83 ".section .fixup, \"ax\"\n"
84 "3: movl $1, %0\n"
85 " jmp 2b\n"
86 ".previous\n"
87 _ASM_EXTABLE(1b, 3b)
88 : "=r"(faulted), "=a"(replaced)
89 : "r"(ip), "r"(new), "c"(newch),
90 "0"(faulted), "a"(old)
91 : "memory");
92 sync_core();
93 79
94 if (replaced != old && replaced != new) 80 WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code,
95 faulted = 2; 81 MCOUNT_INSN_SIZE));
96 82
97 return faulted; 83 sync_core();
84
85 return 0;
98} 86}
99 87
100notrace int ftrace_update_ftrace_func(ftrace_func_t func) 88notrace int ftrace_update_ftrace_func(ftrace_func_t func)
@@ -112,30 +100,76 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func)
112 100
113notrace int ftrace_mcount_set(unsigned long *data) 101notrace int ftrace_mcount_set(unsigned long *data)
114{ 102{
115 unsigned long ip = (long)(&mcount_call); 103 /* mcount is initialized as a nop */
116 unsigned long *addr = data; 104 *data = 0;
117 unsigned char old[MCOUNT_INSN_SIZE], *new;
118
119 /*
120 * Replace the mcount stub with a pointer to the
121 * ip recorder function.
122 */
123 memcpy(old, &mcount_call, MCOUNT_INSN_SIZE);
124 new = ftrace_call_replace(ip, *addr);
125 *addr = ftrace_modify_code(ip, old, new);
126
127 return 0; 105 return 0;
128} 106}
129 107
130int __init ftrace_dyn_arch_init(void *data) 108int __init ftrace_dyn_arch_init(void *data)
131{ 109{
132 const unsigned char *const *noptable = find_nop_table(); 110 extern const unsigned char ftrace_test_p6nop[];
133 111 extern const unsigned char ftrace_test_nop5[];
134 /* This is running in kstop_machine */ 112 extern const unsigned char ftrace_test_jmp[];
135 113 int faulted = 0;
136 ftrace_mcount_set(data);
137 114
138 ftrace_nop = (unsigned long *)noptable[MCOUNT_INSN_SIZE]; 115 /*
116 * There is no good nop for all x86 archs.
117 * We will default to using the P6_NOP5, but first we
118 * will test to make sure that the nop will actually
119 * work on this CPU. If it faults, we will then
120 * go to a lesser efficient 5 byte nop. If that fails
121 * we then just use a jmp as our nop. This isn't the most
122 * efficient nop, but we can not use a multi part nop
123 * since we would then risk being preempted in the middle
124 * of that nop, and if we enabled tracing then, it might
125 * cause a system crash.
126 *
127 * TODO: check the cpuid to determine the best nop.
128 */
129 asm volatile (
130 "jmp ftrace_test_jmp\n"
131 /* This code needs to stay around */
132 ".section .text, \"ax\"\n"
133 "ftrace_test_jmp:"
134 "jmp ftrace_test_p6nop\n"
135 "nop\n"
136 "nop\n"
137 "nop\n" /* 2 byte jmp + 3 bytes */
138 "ftrace_test_p6nop:"
139 P6_NOP5
140 "jmp 1f\n"
141 "ftrace_test_nop5:"
142 ".byte 0x66,0x66,0x66,0x66,0x90\n"
143 "jmp 1f\n"
144 ".previous\n"
145 "1:"
146 ".section .fixup, \"ax\"\n"
147 "2: movl $1, %0\n"
148 " jmp ftrace_test_nop5\n"
149 "3: movl $2, %0\n"
150 " jmp 1b\n"
151 ".previous\n"
152 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
153 _ASM_EXTABLE(ftrace_test_nop5, 3b)
154 : "=r"(faulted) : "0" (faulted));
155
156 switch (faulted) {
157 case 0:
158 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
159 ftrace_nop = (unsigned long *)ftrace_test_p6nop;
160 break;
161 case 1:
162 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
163 ftrace_nop = (unsigned long *)ftrace_test_nop5;
164 break;
165 case 2:
166 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
167 ftrace_nop = (unsigned long *)ftrace_test_jmp;
168 break;
169 }
170
171 /* The return code is retured via data */
172 *(unsigned long *)data = 0;
139 173
140 return 0; 174 return 0;
141} 175}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 635b50e85581..2c4baa88f2cb 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -56,13 +56,6 @@ struct remap_trace {
56static DEFINE_PER_CPU(struct trap_reason, pf_reason); 56static DEFINE_PER_CPU(struct trap_reason, pf_reason);
57static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace); 57static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
58 58
59#if 0 /* XXX: no way gather this info anymore */
60/* Access to this is not per-cpu. */
61static DEFINE_PER_CPU(atomic_t, dropped);
62#endif
63
64static struct dentry *marker_file;
65
66static DEFINE_MUTEX(mmiotrace_mutex); 59static DEFINE_MUTEX(mmiotrace_mutex);
67static DEFINE_SPINLOCK(trace_lock); 60static DEFINE_SPINLOCK(trace_lock);
68static atomic_t mmiotrace_enabled; 61static atomic_t mmiotrace_enabled;
@@ -75,7 +68,7 @@ static LIST_HEAD(trace_list); /* struct remap_trace */
75 * and trace_lock. 68 * and trace_lock.
76 * - Routines depending on is_enabled() must take trace_lock. 69 * - Routines depending on is_enabled() must take trace_lock.
77 * - trace_list users must hold trace_lock. 70 * - trace_list users must hold trace_lock.
78 * - is_enabled() guarantees that mmio_trace_record is allowed. 71 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
79 * - pre/post callbacks assume the effect of is_enabled() being true. 72 * - pre/post callbacks assume the effect of is_enabled() being true.
80 */ 73 */
81 74
@@ -97,44 +90,6 @@ static bool is_enabled(void)
97 return atomic_read(&mmiotrace_enabled); 90 return atomic_read(&mmiotrace_enabled);
98} 91}
99 92
100#if 0 /* XXX: needs rewrite */
101/*
102 * Write callback for the debugfs entry:
103 * Read a marker and write it to the mmio trace log
104 */
105static ssize_t write_marker(struct file *file, const char __user *buffer,
106 size_t count, loff_t *ppos)
107{
108 char *event = NULL;
109 struct mm_io_header *headp;
110 ssize_t len = (count > 65535) ? 65535 : count;
111
112 event = kzalloc(sizeof(*headp) + len, GFP_KERNEL);
113 if (!event)
114 return -ENOMEM;
115
116 headp = (struct mm_io_header *)event;
117 headp->type = MMIO_MAGIC | (MMIO_MARKER << MMIO_OPCODE_SHIFT);
118 headp->data_len = len;
119
120 if (copy_from_user(event + sizeof(*headp), buffer, len)) {
121 kfree(event);
122 return -EFAULT;
123 }
124
125 spin_lock_irq(&trace_lock);
126#if 0 /* XXX: convert this to use tracing */
127 if (is_enabled())
128 relay_write(chan, event, sizeof(*headp) + len);
129 else
130#endif
131 len = -EINVAL;
132 spin_unlock_irq(&trace_lock);
133 kfree(event);
134 return len;
135}
136#endif
137
138static void print_pte(unsigned long address) 93static void print_pte(unsigned long address)
139{ 94{
140 unsigned int level; 95 unsigned int level;
@@ -307,8 +262,10 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
307 map.map_id = trace->id; 262 map.map_id = trace->id;
308 263
309 spin_lock_irq(&trace_lock); 264 spin_lock_irq(&trace_lock);
310 if (!is_enabled()) 265 if (!is_enabled()) {
266 kfree(trace);
311 goto not_enabled; 267 goto not_enabled;
268 }
312 269
313 mmio_trace_mapping(&map); 270 mmio_trace_mapping(&map);
314 list_add_tail(&trace->list, &trace_list); 271 list_add_tail(&trace->list, &trace_list);
@@ -377,6 +334,23 @@ void mmiotrace_iounmap(volatile void __iomem *addr)
377 iounmap_trace_core(addr); 334 iounmap_trace_core(addr);
378} 335}
379 336
337int mmiotrace_printk(const char *fmt, ...)
338{
339 int ret = 0;
340 va_list args;
341 unsigned long flags;
342 va_start(args, fmt);
343
344 spin_lock_irqsave(&trace_lock, flags);
345 if (is_enabled())
346 ret = mmio_trace_printk(fmt, args);
347 spin_unlock_irqrestore(&trace_lock, flags);
348
349 va_end(args);
350 return ret;
351}
352EXPORT_SYMBOL(mmiotrace_printk);
353
380static void clear_trace_list(void) 354static void clear_trace_list(void)
381{ 355{
382 struct remap_trace *trace; 356 struct remap_trace *trace;
@@ -462,26 +436,12 @@ static void leave_uniprocessor(void)
462} 436}
463#endif 437#endif
464 438
465#if 0 /* XXX: out of order */
466static struct file_operations fops_marker = {
467 .owner = THIS_MODULE,
468 .write = write_marker
469};
470#endif
471
472void enable_mmiotrace(void) 439void enable_mmiotrace(void)
473{ 440{
474 mutex_lock(&mmiotrace_mutex); 441 mutex_lock(&mmiotrace_mutex);
475 if (is_enabled()) 442 if (is_enabled())
476 goto out; 443 goto out;
477 444
478#if 0 /* XXX: tracing does not support text entries */
479 marker_file = debugfs_create_file("marker", 0660, dir, NULL,
480 &fops_marker);
481 if (!marker_file)
482 pr_err(NAME "marker file creation failed.\n");
483#endif
484
485 if (nommiotrace) 445 if (nommiotrace)
486 pr_info(NAME "MMIO tracing disabled.\n"); 446 pr_info(NAME "MMIO tracing disabled.\n");
487 enter_uniprocessor(); 447 enter_uniprocessor();
@@ -506,11 +466,6 @@ void disable_mmiotrace(void)
506 466
507 clear_trace_list(); /* guarantees: no more kmmio callbacks */ 467 clear_trace_list(); /* guarantees: no more kmmio callbacks */
508 leave_uniprocessor(); 468 leave_uniprocessor();
509 if (marker_file) {
510 debugfs_remove(marker_file);
511 marker_file = NULL;
512 }
513
514 pr_info(NAME "disabled.\n"); 469 pr_info(NAME "disabled.\n");
515out: 470out:
516 mutex_unlock(&mmiotrace_mutex); 471 mutex_unlock(&mmiotrace_mutex);
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
index efa1911e20ca..df3d5c861cda 100644
--- a/arch/x86/mm/pf_in.c
+++ b/arch/x86/mm/pf_in.c
@@ -79,25 +79,34 @@ static unsigned int mw32[] = { 0xC7 };
79static unsigned int mw64[] = { 0x89, 0x8B }; 79static unsigned int mw64[] = { 0x89, 0x8B };
80#endif /* not __i386__ */ 80#endif /* not __i386__ */
81 81
82static int skip_prefix(unsigned char *addr, int *shorted, int *enlarged, 82struct prefix_bits {
83 int *rexr) 83 unsigned shorted:1;
84 unsigned enlarged:1;
85 unsigned rexr:1;
86 unsigned rex:1;
87};
88
89static int skip_prefix(unsigned char *addr, struct prefix_bits *prf)
84{ 90{
85 int i; 91 int i;
86 unsigned char *p = addr; 92 unsigned char *p = addr;
87 *shorted = 0; 93 prf->shorted = 0;
88 *enlarged = 0; 94 prf->enlarged = 0;
89 *rexr = 0; 95 prf->rexr = 0;
96 prf->rex = 0;
90 97
91restart: 98restart:
92 for (i = 0; i < ARRAY_SIZE(prefix_codes); i++) { 99 for (i = 0; i < ARRAY_SIZE(prefix_codes); i++) {
93 if (*p == prefix_codes[i]) { 100 if (*p == prefix_codes[i]) {
94 if (*p == 0x66) 101 if (*p == 0x66)
95 *shorted = 1; 102 prf->shorted = 1;
96#ifdef __amd64__ 103#ifdef __amd64__
97 if ((*p & 0xf8) == 0x48) 104 if ((*p & 0xf8) == 0x48)
98 *enlarged = 1; 105 prf->enlarged = 1;
99 if ((*p & 0xf4) == 0x44) 106 if ((*p & 0xf4) == 0x44)
100 *rexr = 1; 107 prf->rexr = 1;
108 if ((*p & 0xf0) == 0x40)
109 prf->rex = 1;
101#endif 110#endif
102 p++; 111 p++;
103 goto restart; 112 goto restart;
@@ -135,12 +144,12 @@ enum reason_type get_ins_type(unsigned long ins_addr)
135{ 144{
136 unsigned int opcode; 145 unsigned int opcode;
137 unsigned char *p; 146 unsigned char *p;
138 int shorted, enlarged, rexr; 147 struct prefix_bits prf;
139 int i; 148 int i;
140 enum reason_type rv = OTHERS; 149 enum reason_type rv = OTHERS;
141 150
142 p = (unsigned char *)ins_addr; 151 p = (unsigned char *)ins_addr;
143 p += skip_prefix(p, &shorted, &enlarged, &rexr); 152 p += skip_prefix(p, &prf);
144 p += get_opcode(p, &opcode); 153 p += get_opcode(p, &opcode);
145 154
146 CHECK_OP_TYPE(opcode, reg_rop, REG_READ); 155 CHECK_OP_TYPE(opcode, reg_rop, REG_READ);
@@ -156,10 +165,11 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
156{ 165{
157 unsigned int opcode; 166 unsigned int opcode;
158 unsigned char *p; 167 unsigned char *p;
159 int i, shorted, enlarged, rexr; 168 struct prefix_bits prf;
169 int i;
160 170
161 p = (unsigned char *)ins_addr; 171 p = (unsigned char *)ins_addr;
162 p += skip_prefix(p, &shorted, &enlarged, &rexr); 172 p += skip_prefix(p, &prf);
163 p += get_opcode(p, &opcode); 173 p += get_opcode(p, &opcode);
164 174
165 for (i = 0; i < ARRAY_SIZE(rw8); i++) 175 for (i = 0; i < ARRAY_SIZE(rw8); i++)
@@ -168,7 +178,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
168 178
169 for (i = 0; i < ARRAY_SIZE(rw32); i++) 179 for (i = 0; i < ARRAY_SIZE(rw32); i++)
170 if (rw32[i] == opcode) 180 if (rw32[i] == opcode)
171 return (shorted ? 2 : (enlarged ? 8 : 4)); 181 return prf.shorted ? 2 : (prf.enlarged ? 8 : 4);
172 182
173 printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode); 183 printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode);
174 return 0; 184 return 0;
@@ -178,10 +188,11 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
178{ 188{
179 unsigned int opcode; 189 unsigned int opcode;
180 unsigned char *p; 190 unsigned char *p;
181 int i, shorted, enlarged, rexr; 191 struct prefix_bits prf;
192 int i;
182 193
183 p = (unsigned char *)ins_addr; 194 p = (unsigned char *)ins_addr;
184 p += skip_prefix(p, &shorted, &enlarged, &rexr); 195 p += skip_prefix(p, &prf);
185 p += get_opcode(p, &opcode); 196 p += get_opcode(p, &opcode);
186 197
187 for (i = 0; i < ARRAY_SIZE(mw8); i++) 198 for (i = 0; i < ARRAY_SIZE(mw8); i++)
@@ -194,11 +205,11 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
194 205
195 for (i = 0; i < ARRAY_SIZE(mw32); i++) 206 for (i = 0; i < ARRAY_SIZE(mw32); i++)
196 if (mw32[i] == opcode) 207 if (mw32[i] == opcode)
197 return shorted ? 2 : 4; 208 return prf.shorted ? 2 : 4;
198 209
199 for (i = 0; i < ARRAY_SIZE(mw64); i++) 210 for (i = 0; i < ARRAY_SIZE(mw64); i++)
200 if (mw64[i] == opcode) 211 if (mw64[i] == opcode)
201 return shorted ? 2 : (enlarged ? 8 : 4); 212 return prf.shorted ? 2 : (prf.enlarged ? 8 : 4);
202 213
203 printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode); 214 printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode);
204 return 0; 215 return 0;
@@ -238,7 +249,7 @@ enum {
238#endif 249#endif
239}; 250};
240 251
241static unsigned char *get_reg_w8(int no, struct pt_regs *regs) 252static unsigned char *get_reg_w8(int no, int rex, struct pt_regs *regs)
242{ 253{
243 unsigned char *rv = NULL; 254 unsigned char *rv = NULL;
244 255
@@ -255,18 +266,6 @@ static unsigned char *get_reg_w8(int no, struct pt_regs *regs)
255 case arg_DL: 266 case arg_DL:
256 rv = (unsigned char *)&regs->dx; 267 rv = (unsigned char *)&regs->dx;
257 break; 268 break;
258 case arg_AH:
259 rv = 1 + (unsigned char *)&regs->ax;
260 break;
261 case arg_BH:
262 rv = 1 + (unsigned char *)&regs->bx;
263 break;
264 case arg_CH:
265 rv = 1 + (unsigned char *)&regs->cx;
266 break;
267 case arg_DH:
268 rv = 1 + (unsigned char *)&regs->dx;
269 break;
270#ifdef __amd64__ 269#ifdef __amd64__
271 case arg_R8: 270 case arg_R8:
272 rv = (unsigned char *)&regs->r8; 271 rv = (unsigned char *)&regs->r8;
@@ -294,9 +293,55 @@ static unsigned char *get_reg_w8(int no, struct pt_regs *regs)
294 break; 293 break;
295#endif 294#endif
296 default: 295 default:
297 printk(KERN_ERR "mmiotrace: Error reg no# %d\n", no);
298 break; 296 break;
299 } 297 }
298
299 if (rv)
300 return rv;
301
302 if (rex) {
303 /*
304 * If REX prefix exists, access low bytes of SI etc.
305 * instead of AH etc.
306 */
307 switch (no) {
308 case arg_SI:
309 rv = (unsigned char *)&regs->si;
310 break;
311 case arg_DI:
312 rv = (unsigned char *)&regs->di;
313 break;
314 case arg_BP:
315 rv = (unsigned char *)&regs->bp;
316 break;
317 case arg_SP:
318 rv = (unsigned char *)&regs->sp;
319 break;
320 default:
321 break;
322 }
323 } else {
324 switch (no) {
325 case arg_AH:
326 rv = 1 + (unsigned char *)&regs->ax;
327 break;
328 case arg_BH:
329 rv = 1 + (unsigned char *)&regs->bx;
330 break;
331 case arg_CH:
332 rv = 1 + (unsigned char *)&regs->cx;
333 break;
334 case arg_DH:
335 rv = 1 + (unsigned char *)&regs->dx;
336 break;
337 default:
338 break;
339 }
340 }
341
342 if (!rv)
343 printk(KERN_ERR "mmiotrace: Error reg no# %d\n", no);
344
300 return rv; 345 return rv;
301} 346}
302 347
@@ -368,11 +413,12 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
368 unsigned char mod_rm; 413 unsigned char mod_rm;
369 int reg; 414 int reg;
370 unsigned char *p; 415 unsigned char *p;
371 int i, shorted, enlarged, rexr; 416 struct prefix_bits prf;
417 int i;
372 unsigned long rv; 418 unsigned long rv;
373 419
374 p = (unsigned char *)ins_addr; 420 p = (unsigned char *)ins_addr;
375 p += skip_prefix(p, &shorted, &enlarged, &rexr); 421 p += skip_prefix(p, &prf);
376 p += get_opcode(p, &opcode); 422 p += get_opcode(p, &opcode);
377 for (i = 0; i < ARRAY_SIZE(reg_rop); i++) 423 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
378 if (reg_rop[i] == opcode) { 424 if (reg_rop[i] == opcode) {
@@ -392,10 +438,10 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
392 438
393do_work: 439do_work:
394 mod_rm = *p; 440 mod_rm = *p;
395 reg = ((mod_rm >> 3) & 0x7) | (rexr << 3); 441 reg = ((mod_rm >> 3) & 0x7) | (prf.rexr << 3);
396 switch (get_ins_reg_width(ins_addr)) { 442 switch (get_ins_reg_width(ins_addr)) {
397 case 1: 443 case 1:
398 return *get_reg_w8(reg, regs); 444 return *get_reg_w8(reg, prf.rex, regs);
399 445
400 case 2: 446 case 2:
401 return *(unsigned short *)get_reg_w32(reg, regs); 447 return *(unsigned short *)get_reg_w32(reg, regs);
@@ -422,11 +468,12 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
422 unsigned char mod_rm; 468 unsigned char mod_rm;
423 unsigned char mod; 469 unsigned char mod;
424 unsigned char *p; 470 unsigned char *p;
425 int i, shorted, enlarged, rexr; 471 struct prefix_bits prf;
472 int i;
426 unsigned long rv; 473 unsigned long rv;
427 474
428 p = (unsigned char *)ins_addr; 475 p = (unsigned char *)ins_addr;
429 p += skip_prefix(p, &shorted, &enlarged, &rexr); 476 p += skip_prefix(p, &prf);
430 p += get_opcode(p, &opcode); 477 p += get_opcode(p, &opcode);
431 for (i = 0; i < ARRAY_SIZE(imm_wop); i++) 478 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
432 if (imm_wop[i] == opcode) { 479 if (imm_wop[i] == opcode) {
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index d877c5b423ef..ab50a8d7402c 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -3,6 +3,7 @@
3 */ 3 */
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/io.h> 5#include <linux/io.h>
6#include <linux/mmiotrace.h>
6 7
7#define MODULE_NAME "testmmiotrace" 8#define MODULE_NAME "testmmiotrace"
8 9
@@ -13,6 +14,7 @@ MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
13static void do_write_test(void __iomem *p) 14static void do_write_test(void __iomem *p)
14{ 15{
15 unsigned int i; 16 unsigned int i;
17 mmiotrace_printk("Write test.\n");
16 for (i = 0; i < 256; i++) 18 for (i = 0; i < 256; i++)
17 iowrite8(i, p + i); 19 iowrite8(i, p + i);
18 for (i = 1024; i < (5 * 1024); i += 2) 20 for (i = 1024; i < (5 * 1024); i += 2)
@@ -24,6 +26,7 @@ static void do_write_test(void __iomem *p)
24static void do_read_test(void __iomem *p) 26static void do_read_test(void __iomem *p)
25{ 27{
26 unsigned int i; 28 unsigned int i;
29 mmiotrace_printk("Read test.\n");
27 for (i = 0; i < 256; i++) 30 for (i = 0; i < 256; i++)
28 ioread8(p + i); 31 ioread8(p + i);
29 for (i = 1024; i < (5 * 1024); i += 2) 32 for (i = 1024; i < (5 * 1024); i += 2)
@@ -39,6 +42,7 @@ static void do_test(void)
39 pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); 42 pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
40 return; 43 return;
41 } 44 }
45 mmiotrace_printk("ioremap returned %p.\n", p);
42 do_write_test(p); 46 do_write_test(p);
43 do_read_test(p); 47 do_read_test(p);
44 iounmap(p); 48 iounmap(p);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 74c5faf26c05..80744606bad1 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -37,6 +37,13 @@
37#define MEM_DISCARD(sec) *(.mem##sec) 37#define MEM_DISCARD(sec) *(.mem##sec)
38#endif 38#endif
39 39
40#ifdef CONFIG_FTRACE_MCOUNT_RECORD
41#define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \
42 *(__mcount_loc) \
43 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
44#else
45#define MCOUNT_REC()
46#endif
40 47
41/* .data section */ 48/* .data section */
42#define DATA_DATA \ 49#define DATA_DATA \
@@ -52,7 +59,10 @@
52 . = ALIGN(8); \ 59 . = ALIGN(8); \
53 VMLINUX_SYMBOL(__start___markers) = .; \ 60 VMLINUX_SYMBOL(__start___markers) = .; \
54 *(__markers) \ 61 *(__markers) \
55 VMLINUX_SYMBOL(__stop___markers) = .; 62 VMLINUX_SYMBOL(__stop___markers) = .; \
63 VMLINUX_SYMBOL(__start___tracepoints) = .; \
64 *(__tracepoints) \
65 VMLINUX_SYMBOL(__stop___tracepoints) = .;
56 66
57#define RO_DATA(align) \ 67#define RO_DATA(align) \
58 . = ALIGN((align)); \ 68 . = ALIGN((align)); \
@@ -61,6 +71,7 @@
61 *(.rodata) *(.rodata.*) \ 71 *(.rodata) *(.rodata.*) \
62 *(__vermagic) /* Kernel version magic */ \ 72 *(__vermagic) /* Kernel version magic */ \
63 *(__markers_strings) /* Markers: strings */ \ 73 *(__markers_strings) /* Markers: strings */ \
74 *(__tracepoints_strings)/* Tracepoints: strings */ \
64 } \ 75 } \
65 \ 76 \
66 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 77 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
@@ -188,6 +199,7 @@
188 /* __*init sections */ \ 199 /* __*init sections */ \
189 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 200 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
190 *(.ref.rodata) \ 201 *(.ref.rodata) \
202 MCOUNT_REC() \
191 DEV_KEEP(init.rodata) \ 203 DEV_KEEP(init.rodata) \
192 DEV_KEEP(exit.rodata) \ 204 DEV_KEEP(exit.rodata) \
193 CPU_KEEP(init.rodata) \ 205 CPU_KEEP(init.rodata) \
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h
index be0e004ad148..1bb6f9bbe1ab 100644
--- a/include/asm-x86/ftrace.h
+++ b/include/asm-x86/ftrace.h
@@ -7,6 +7,16 @@
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9extern void mcount(void); 9extern void mcount(void);
10
11static inline unsigned long ftrace_call_adjust(unsigned long addr)
12{
13 /*
14 * call mcount is "e8 <4 byte offset>"
15 * The addr points to the 4 byte offset and the caller of this
16 * function wants the pointer to e8. Simply subtract one.
17 */
18 return addr - 1;
19}
10#endif 20#endif
11 21
12#endif /* CONFIG_FTRACE */ 22#endif /* CONFIG_FTRACE */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8322141ee480..98115d9d04da 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -44,6 +44,8 @@ extern void __chk_io_ptr(const volatile void __iomem *);
44# error Sorry, your compiler is too old/not recognized. 44# error Sorry, your compiler is too old/not recognized.
45#endif 45#endif
46 46
47#define notrace __attribute__((no_instrument_function))
48
47/* Intel compiler defines __GNUC__. So we will overwrite implementations 49/* Intel compiler defines __GNUC__. So we will overwrite implementations
48 * coming from above header files here 50 * coming from above header files here
49 */ 51 */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index bb384068272e..a3d46151be19 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,10 +1,14 @@
1#ifndef _LINUX_FTRACE_H 1#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 2#define _LINUX_FTRACE_H
3 3
4#ifdef CONFIG_FTRACE
5
6#include <linux/linkage.h> 4#include <linux/linkage.h>
7#include <linux/fs.h> 5#include <linux/fs.h>
6#include <linux/ktime.h>
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/kallsyms.h>
10
11#ifdef CONFIG_FTRACE
8 12
9extern int ftrace_enabled; 13extern int ftrace_enabled;
10extern int 14extern int
@@ -36,6 +40,7 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
36# define register_ftrace_function(ops) do { } while (0) 40# define register_ftrace_function(ops) do { } while (0)
37# define unregister_ftrace_function(ops) do { } while (0) 41# define unregister_ftrace_function(ops) do { } while (0)
38# define clear_ftrace_function(ops) do { } while (0) 42# define clear_ftrace_function(ops) do { } while (0)
43static inline void ftrace_kill_atomic(void) { }
39#endif /* CONFIG_FTRACE */ 44#endif /* CONFIG_FTRACE */
40 45
41#ifdef CONFIG_DYNAMIC_FTRACE 46#ifdef CONFIG_DYNAMIC_FTRACE
@@ -76,8 +81,10 @@ extern void mcount_call(void);
76 81
77extern int skip_trace(unsigned long ip); 82extern int skip_trace(unsigned long ip);
78 83
79void ftrace_disable_daemon(void); 84extern void ftrace_release(void *start, unsigned long size);
80void ftrace_enable_daemon(void); 85
86extern void ftrace_disable_daemon(void);
87extern void ftrace_enable_daemon(void);
81 88
82#else 89#else
83# define skip_trace(ip) ({ 0; }) 90# define skip_trace(ip) ({ 0; })
@@ -85,6 +92,7 @@ void ftrace_enable_daemon(void);
85# define ftrace_set_filter(buf, len, reset) do { } while (0) 92# define ftrace_set_filter(buf, len, reset) do { } while (0)
86# define ftrace_disable_daemon() do { } while (0) 93# define ftrace_disable_daemon() do { } while (0)
87# define ftrace_enable_daemon() do { } while (0) 94# define ftrace_enable_daemon() do { } while (0)
95static inline void ftrace_release(void *start, unsigned long size) { }
88#endif /* CONFIG_DYNAMIC_FTRACE */ 96#endif /* CONFIG_DYNAMIC_FTRACE */
89 97
90/* totally disable ftrace - can not re-enable after this */ 98/* totally disable ftrace - can not re-enable after this */
@@ -98,9 +106,11 @@ static inline void tracer_disable(void)
98#endif 106#endif
99} 107}
100 108
101/* Ftrace disable/restore without lock. Some synchronization mechanism 109/*
110 * Ftrace disable/restore without lock. Some synchronization mechanism
102 * must be used to prevent ftrace_enabled to be changed between 111 * must be used to prevent ftrace_enabled to be changed between
103 * disable/restore. */ 112 * disable/restore.
113 */
104static inline int __ftrace_enabled_save(void) 114static inline int __ftrace_enabled_save(void)
105{ 115{
106#ifdef CONFIG_FTRACE 116#ifdef CONFIG_FTRACE
@@ -157,9 +167,71 @@ static inline void __ftrace_enabled_restore(int enabled)
157#ifdef CONFIG_TRACING 167#ifdef CONFIG_TRACING
158extern void 168extern void
159ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); 169ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
170
171/**
172 * ftrace_printk - printf formatting in the ftrace buffer
173 * @fmt: the printf format for printing
174 *
175 * Note: __ftrace_printk is an internal function for ftrace_printk and
176 * the @ip is passed in via the ftrace_printk macro.
177 *
178 * This function allows a kernel developer to debug fast path sections
179 * that printk is not appropriate for. By scattering in various
180 * printk like tracing in the code, a developer can quickly see
181 * where problems are occurring.
182 *
183 * This is intended as a debugging tool for the developer only.
184 * Please refrain from leaving ftrace_printks scattered around in
185 * your code.
186 */
187# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
188extern int
189__ftrace_printk(unsigned long ip, const char *fmt, ...)
190 __attribute__ ((format (printf, 2, 3)));
191extern void ftrace_dump(void);
160#else 192#else
161static inline void 193static inline void
162ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } 194ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
195static inline int
196ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
197
198static inline int
199ftrace_printk(const char *fmt, ...)
200{
201 return 0;
202}
203static inline void ftrace_dump(void) { }
163#endif 204#endif
164 205
206#ifdef CONFIG_FTRACE_MCOUNT_RECORD
207extern void ftrace_init(void);
208extern void ftrace_init_module(unsigned long *start, unsigned long *end);
209#else
210static inline void ftrace_init(void) { }
211static inline void
212ftrace_init_module(unsigned long *start, unsigned long *end) { }
213#endif
214
215
216struct boot_trace {
217 pid_t caller;
218 char func[KSYM_NAME_LEN];
219 int result;
220 unsigned long long duration; /* usecs */
221 ktime_t calltime;
222 ktime_t rettime;
223};
224
225#ifdef CONFIG_BOOT_TRACER
226extern void trace_boot(struct boot_trace *it, initcall_t fn);
227extern void start_boot_trace(void);
228extern void stop_boot_trace(void);
229#else
230static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
231static inline void start_boot_trace(void) { }
232static inline void stop_boot_trace(void) { }
233#endif
234
235
236
165#endif /* _LINUX_FTRACE_H */ 237#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index ad63824460e3..0c1264668be0 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -40,7 +40,7 @@
40 40
41/* These are for everybody (although not all archs will actually 41/* These are for everybody (although not all archs will actually
42 discard it in modules) */ 42 discard it in modules) */
43#define __init __section(.init.text) __cold 43#define __init __section(.init.text) __cold notrace
44#define __initdata __section(.init.data) 44#define __initdata __section(.init.data)
45#define __initconst __section(.init.rodata) 45#define __initconst __section(.init.rodata)
46#define __exitdata __section(.exit.data) 46#define __exitdata __section(.exit.data)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5a566b705ca9..94d17ff64c5a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -496,4 +496,9 @@ struct sysinfo {
496#define NUMA_BUILD 0 496#define NUMA_BUILD 0
497#endif 497#endif
498 498
499/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
500#ifdef CONFIG_FTRACE_MCOUNT_RECORD
501# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
502#endif
503
499#endif 504#endif
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0be7795655fa..497b1d1f7a05 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -29,6 +29,7 @@
29 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 29 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
30 * <prasanna@in.ibm.com> added function-return probes. 30 * <prasanna@in.ibm.com> added function-return probes.
31 */ 31 */
32#include <linux/linkage.h>
32#include <linux/list.h> 33#include <linux/list.h>
33#include <linux/notifier.h> 34#include <linux/notifier.h>
34#include <linux/smp.h> 35#include <linux/smp.h>
@@ -47,7 +48,7 @@
47#define KPROBE_HIT_SSDONE 0x00000008 48#define KPROBE_HIT_SSDONE 0x00000008
48 49
49/* Attach to insert probes on any functions which should be ignored*/ 50/* Attach to insert probes on any functions which should be ignored*/
50#define __kprobes __attribute__((__section__(".kprobes.text"))) 51#define __kprobes __attribute__((__section__(".kprobes.text"))) notrace
51 52
52struct kprobe; 53struct kprobe;
53struct pt_regs; 54struct pt_regs;
@@ -256,7 +257,7 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
256 257
257#else /* CONFIG_KPROBES */ 258#else /* CONFIG_KPROBES */
258 259
259#define __kprobes /**/ 260#define __kprobes notrace
260struct jprobe; 261struct jprobe;
261struct kretprobe; 262struct kretprobe;
262 263
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 56ba37394656..9fd1f859021b 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -4,8 +4,6 @@
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/linkage.h> 5#include <asm/linkage.h>
6 6
7#define notrace __attribute__((no_instrument_function))
8
9#ifdef __cplusplus 7#ifdef __cplusplus
10#define CPP_ASMLINKAGE extern "C" 8#define CPP_ASMLINKAGE extern "C"
11#else 9#else
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 1290653f9241..889196c7fbb1 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -160,4 +160,11 @@ extern int marker_probe_unregister_private_data(marker_probe_func *probe,
160extern void *marker_get_private_data(const char *name, marker_probe_func *probe, 160extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
161 int num); 161 int num);
162 162
163/*
164 * marker_synchronize_unregister must be called between the last marker probe
165 * unregistration and the end of module exit to make sure there is no caller
166 * executing a probe when it is freed.
167 */
168#define marker_synchronize_unregister() synchronize_sched()
169
163#endif 170#endif
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 61d19e1b7a0b..139d7c88d9c9 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -34,11 +34,15 @@ extern void unregister_kmmio_probe(struct kmmio_probe *p);
34/* Called from page fault handler. */ 34/* Called from page fault handler. */
35extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); 35extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
36 36
37/* Called from ioremap.c */
38#ifdef CONFIG_MMIOTRACE 37#ifdef CONFIG_MMIOTRACE
38/* Called from ioremap.c */
39extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, 39extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
40 void __iomem *addr); 40 void __iomem *addr);
41extern void mmiotrace_iounmap(volatile void __iomem *addr); 41extern void mmiotrace_iounmap(volatile void __iomem *addr);
42
43/* For anyone to insert markers. Remember trailing newline. */
44extern int mmiotrace_printk(const char *fmt, ...)
45 __attribute__ ((format (printf, 1, 2)));
42#else 46#else
43static inline void mmiotrace_ioremap(resource_size_t offset, 47static inline void mmiotrace_ioremap(resource_size_t offset,
44 unsigned long size, void __iomem *addr) 48 unsigned long size, void __iomem *addr)
@@ -48,15 +52,22 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
48static inline void mmiotrace_iounmap(volatile void __iomem *addr) 52static inline void mmiotrace_iounmap(volatile void __iomem *addr)
49{ 53{
50} 54}
51#endif /* CONFIG_MMIOTRACE_HOOKS */ 55
56static inline int mmiotrace_printk(const char *fmt, ...)
57 __attribute__ ((format (printf, 1, 0)));
58
59static inline int mmiotrace_printk(const char *fmt, ...)
60{
61 return 0;
62}
63#endif /* CONFIG_MMIOTRACE */
52 64
53enum mm_io_opcode { 65enum mm_io_opcode {
54 MMIO_READ = 0x1, /* struct mmiotrace_rw */ 66 MMIO_READ = 0x1, /* struct mmiotrace_rw */
55 MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ 67 MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
56 MMIO_PROBE = 0x3, /* struct mmiotrace_map */ 68 MMIO_PROBE = 0x3, /* struct mmiotrace_map */
57 MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ 69 MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
58 MMIO_MARKER = 0x5, /* raw char data */ 70 MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
59 MMIO_UNKNOWN_OP = 0x6, /* struct mmiotrace_rw */
60}; 71};
61 72
62struct mmiotrace_rw { 73struct mmiotrace_rw {
@@ -81,5 +92,6 @@ extern void enable_mmiotrace(void);
81extern void disable_mmiotrace(void); 92extern void disable_mmiotrace(void);
82extern void mmio_trace_rw(struct mmiotrace_rw *rw); 93extern void mmio_trace_rw(struct mmiotrace_rw *rw);
83extern void mmio_trace_mapping(struct mmiotrace_map *map); 94extern void mmio_trace_mapping(struct mmiotrace_map *map);
95extern int mmio_trace_printk(const char *fmt, va_list args);
84 96
85#endif /* MMIOTRACE_H */ 97#endif /* MMIOTRACE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index a41555cbe00a..5d2970cdce93 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -16,6 +16,7 @@
16#include <linux/kobject.h> 16#include <linux/kobject.h>
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/marker.h> 18#include <linux/marker.h>
19#include <linux/tracepoint.h>
19#include <asm/local.h> 20#include <asm/local.h>
20 21
21#include <asm/module.h> 22#include <asm/module.h>
@@ -331,6 +332,10 @@ struct module
331 struct marker *markers; 332 struct marker *markers;
332 unsigned int num_markers; 333 unsigned int num_markers;
333#endif 334#endif
335#ifdef CONFIG_TRACEPOINTS
336 struct tracepoint *tracepoints;
337 unsigned int num_tracepoints;
338#endif
334 339
335#ifdef CONFIG_MODULE_UNLOAD 340#ifdef CONFIG_MODULE_UNLOAD
336 /* What modules depend on me? */ 341 /* What modules depend on me? */
@@ -453,6 +458,9 @@ extern void print_modules(void);
453 458
454extern void module_update_markers(void); 459extern void module_update_markers(void);
455 460
461extern void module_update_tracepoints(void);
462extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
463
456#else /* !CONFIG_MODULES... */ 464#else /* !CONFIG_MODULES... */
457#define EXPORT_SYMBOL(sym) 465#define EXPORT_SYMBOL(sym)
458#define EXPORT_SYMBOL_GPL(sym) 466#define EXPORT_SYMBOL_GPL(sym)
@@ -557,6 +565,15 @@ static inline void module_update_markers(void)
557{ 565{
558} 566}
559 567
568static inline void module_update_tracepoints(void)
569{
570}
571
572static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter)
573{
574 return 0;
575}
576
560#endif /* CONFIG_MODULES */ 577#endif /* CONFIG_MODULES */
561 578
562struct device_driver; 579struct device_driver;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
new file mode 100644
index 000000000000..536b0ca46a03
--- /dev/null
+++ b/include/linux/ring_buffer.h
@@ -0,0 +1,127 @@
1#ifndef _LINUX_RING_BUFFER_H
2#define _LINUX_RING_BUFFER_H
3
4#include <linux/mm.h>
5#include <linux/seq_file.h>
6
7struct ring_buffer;
8struct ring_buffer_iter;
9
10/*
11 * Don't reference this struct directly, use functions below.
12 */
13struct ring_buffer_event {
14 u32 type:2, len:3, time_delta:27;
15 u32 array[];
16};
17
18/**
19 * enum ring_buffer_type - internal ring buffer types
20 *
21 * @RINGBUF_TYPE_PADDING: Left over page padding
22 * array is ignored
23 * size is variable depending on how much
24 * padding is needed
25 *
26 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
27 * array[0] = time delta (28 .. 59)
28 * size = 8 bytes
29 *
30 * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
31 * array[0] = tv_nsec
32 * array[1] = tv_sec
33 * size = 16 bytes
34 *
35 * @RINGBUF_TYPE_DATA: Data record
36 * If len is zero:
37 * array[0] holds the actual length
38 * array[1..(length+3)/4-1] holds data
39 * else
40 * length = len << 2
41 * array[0..(length+3)/4] holds data
42 */
43enum ring_buffer_type {
44 RINGBUF_TYPE_PADDING,
45 RINGBUF_TYPE_TIME_EXTEND,
46 /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
47 RINGBUF_TYPE_TIME_STAMP,
48 RINGBUF_TYPE_DATA,
49};
50
51unsigned ring_buffer_event_length(struct ring_buffer_event *event);
52void *ring_buffer_event_data(struct ring_buffer_event *event);
53
54/**
55 * ring_buffer_event_time_delta - return the delta timestamp of the event
56 * @event: the event to get the delta timestamp of
57 *
58 * The delta timestamp is the 27 bit timestamp since the last event.
59 */
60static inline unsigned
61ring_buffer_event_time_delta(struct ring_buffer_event *event)
62{
63 return event->time_delta;
64}
65
66/*
67 * size is in bytes for each per CPU buffer.
68 */
69struct ring_buffer *
70ring_buffer_alloc(unsigned long size, unsigned flags);
71void ring_buffer_free(struct ring_buffer *buffer);
72
73int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
74
75struct ring_buffer_event *
76ring_buffer_lock_reserve(struct ring_buffer *buffer,
77 unsigned long length,
78 unsigned long *flags);
79int ring_buffer_unlock_commit(struct ring_buffer *buffer,
80 struct ring_buffer_event *event,
81 unsigned long flags);
82int ring_buffer_write(struct ring_buffer *buffer,
83 unsigned long length, void *data);
84
85struct ring_buffer_event *
86ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts);
87struct ring_buffer_event *
88ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts);
89
90struct ring_buffer_iter *
91ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
92void ring_buffer_read_finish(struct ring_buffer_iter *iter);
93
94struct ring_buffer_event *
95ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
96struct ring_buffer_event *
97ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
98void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
99int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
100
101unsigned long ring_buffer_size(struct ring_buffer *buffer);
102
103void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
104void ring_buffer_reset(struct ring_buffer *buffer);
105
106int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
107 struct ring_buffer *buffer_b, int cpu);
108
109int ring_buffer_empty(struct ring_buffer *buffer);
110int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
111
112void ring_buffer_record_disable(struct ring_buffer *buffer);
113void ring_buffer_record_enable(struct ring_buffer *buffer);
114void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
115void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
116
117unsigned long ring_buffer_entries(struct ring_buffer *buffer);
118unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
119
120u64 ring_buffer_time_stamp(int cpu);
121void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
122
123enum ring_buffer_flags {
124 RB_FL_OVERWRITE = 1 << 0,
125};
126
127#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
new file mode 100644
index 000000000000..c5bb39c7a770
--- /dev/null
+++ b/include/linux/tracepoint.h
@@ -0,0 +1,137 @@
1#ifndef _LINUX_TRACEPOINT_H
2#define _LINUX_TRACEPOINT_H
3
4/*
5 * Kernel Tracepoint API.
6 *
7 * See Documentation/tracepoint.txt.
8 *
9 * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
11 * Heavily inspired from the Linux Kernel Markers.
12 *
13 * This file is released under the GPLv2.
14 * See the file COPYING for more details.
15 */
16
17#include <linux/types.h>
18#include <linux/rcupdate.h>
19
20struct module;
21struct tracepoint;
22
23struct tracepoint {
24 const char *name; /* Tracepoint name */
25 int state; /* State. */
26 void **funcs;
27} __attribute__((aligned(8)));
28
29
30#define TPPROTO(args...) args
31#define TPARGS(args...) args
32
33#ifdef CONFIG_TRACEPOINTS
34
35/*
36 * it_func[0] is never NULL because there is at least one element in the array
37 * when the array itself is non NULL.
38 */
39#define __DO_TRACE(tp, proto, args) \
40 do { \
41 void **it_func; \
42 \
43 rcu_read_lock_sched(); \
44 it_func = rcu_dereference((tp)->funcs); \
45 if (it_func) { \
46 do { \
47 ((void(*)(proto))(*it_func))(args); \
48 } while (*(++it_func)); \
49 } \
50 rcu_read_unlock_sched(); \
51 } while (0)
52
53/*
54 * Make sure the alignment of the structure in the __tracepoints section will
55 * not add unwanted padding between the beginning of the section and the
56 * structure. Force alignment to the same alignment as the section start.
57 */
58#define DEFINE_TRACE(name, proto, args) \
59 static inline void trace_##name(proto) \
60 { \
61 static const char __tpstrtab_##name[] \
62 __attribute__((section("__tracepoints_strings"))) \
63 = #name ":" #proto; \
64 static struct tracepoint __tracepoint_##name \
65 __attribute__((section("__tracepoints"), aligned(8))) = \
66 { __tpstrtab_##name, 0, NULL }; \
67 if (unlikely(__tracepoint_##name.state)) \
68 __DO_TRACE(&__tracepoint_##name, \
69 TPPROTO(proto), TPARGS(args)); \
70 } \
71 static inline int register_trace_##name(void (*probe)(proto)) \
72 { \
73 return tracepoint_probe_register(#name ":" #proto, \
74 (void *)probe); \
75 } \
76 static inline void unregister_trace_##name(void (*probe)(proto))\
77 { \
78 tracepoint_probe_unregister(#name ":" #proto, \
79 (void *)probe); \
80 }
81
82extern void tracepoint_update_probe_range(struct tracepoint *begin,
83 struct tracepoint *end);
84
85#else /* !CONFIG_TRACEPOINTS */
86#define DEFINE_TRACE(name, proto, args) \
87 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
88 { } \
89 static inline void trace_##name(proto) \
90 { } \
91 static inline int register_trace_##name(void (*probe)(proto)) \
92 { \
93 return -ENOSYS; \
94 } \
95 static inline void unregister_trace_##name(void (*probe)(proto))\
96 { }
97
98static inline void tracepoint_update_probe_range(struct tracepoint *begin,
99 struct tracepoint *end)
100{ }
101#endif /* CONFIG_TRACEPOINTS */
102
103/*
104 * Connect a probe to a tracepoint.
105 * Internal API, should not be used directly.
106 */
107extern int tracepoint_probe_register(const char *name, void *probe);
108
109/*
110 * Disconnect a probe from a tracepoint.
111 * Internal API, should not be used directly.
112 */
113extern int tracepoint_probe_unregister(const char *name, void *probe);
114
115struct tracepoint_iter {
116 struct module *module;
117 struct tracepoint *tracepoint;
118};
119
120extern void tracepoint_iter_start(struct tracepoint_iter *iter);
121extern void tracepoint_iter_next(struct tracepoint_iter *iter);
122extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
123extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
124extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
125 struct tracepoint *begin, struct tracepoint *end);
126
127/*
128 * tracepoint_synchronize_unregister must be called between the last tracepoint
129 * probe unregistration and the end of module exit to make sure there is no
130 * caller executing a probe when it is freed.
131 */
132static inline void tracepoint_synchronize_unregister(void)
133{
134 synchronize_sched();
135}
136
137#endif
diff --git a/include/trace/sched.h b/include/trace/sched.h
new file mode 100644
index 000000000000..ad47369d01b5
--- /dev/null
+++ b/include/trace/sched.h
@@ -0,0 +1,56 @@
1#ifndef _TRACE_SCHED_H
2#define _TRACE_SCHED_H
3
4#include <linux/sched.h>
5#include <linux/tracepoint.h>
6
7DEFINE_TRACE(sched_kthread_stop,
8 TPPROTO(struct task_struct *t),
9 TPARGS(t));
10
11DEFINE_TRACE(sched_kthread_stop_ret,
12 TPPROTO(int ret),
13 TPARGS(ret));
14
15DEFINE_TRACE(sched_wait_task,
16 TPPROTO(struct rq *rq, struct task_struct *p),
17 TPARGS(rq, p));
18
19DEFINE_TRACE(sched_wakeup,
20 TPPROTO(struct rq *rq, struct task_struct *p),
21 TPARGS(rq, p));
22
23DEFINE_TRACE(sched_wakeup_new,
24 TPPROTO(struct rq *rq, struct task_struct *p),
25 TPARGS(rq, p));
26
27DEFINE_TRACE(sched_switch,
28 TPPROTO(struct rq *rq, struct task_struct *prev,
29 struct task_struct *next),
30 TPARGS(rq, prev, next));
31
32DEFINE_TRACE(sched_migrate_task,
33 TPPROTO(struct rq *rq, struct task_struct *p, int dest_cpu),
34 TPARGS(rq, p, dest_cpu));
35
36DEFINE_TRACE(sched_process_free,
37 TPPROTO(struct task_struct *p),
38 TPARGS(p));
39
40DEFINE_TRACE(sched_process_exit,
41 TPPROTO(struct task_struct *p),
42 TPARGS(p));
43
44DEFINE_TRACE(sched_process_wait,
45 TPPROTO(struct pid *pid),
46 TPARGS(pid));
47
48DEFINE_TRACE(sched_process_fork,
49 TPPROTO(struct task_struct *parent, struct task_struct *child),
50 TPARGS(parent, child));
51
52DEFINE_TRACE(sched_signal_send,
53 TPPROTO(int sig, struct task_struct *p),
54 TPARGS(sig, p));
55
56#endif
diff --git a/init/Kconfig b/init/Kconfig
index 8828ed0b2051..c6b70313bf0b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -786,6 +786,13 @@ config PROFILING
786 Say Y here to enable the extended profiling support mechanisms used 786 Say Y here to enable the extended profiling support mechanisms used
787 by profilers such as OProfile. 787 by profilers such as OProfile.
788 788
789#
790# Place an empty function call at each tracepoint site. Can be
791# dynamically changed for a probe function.
792#
793config TRACEPOINTS
794 bool
795
789config MARKERS 796config MARKERS
790 bool "Activate markers" 797 bool "Activate markers"
791 help 798 help
diff --git a/init/main.c b/init/main.c
index 4371d11721f6..3e17a3bafe60 100644
--- a/init/main.c
+++ b/init/main.c
@@ -61,6 +61,7 @@
61#include <linux/sched.h> 61#include <linux/sched.h>
62#include <linux/signal.h> 62#include <linux/signal.h>
63#include <linux/idr.h> 63#include <linux/idr.h>
64#include <linux/ftrace.h>
64 65
65#include <asm/io.h> 66#include <asm/io.h>
66#include <asm/bugs.h> 67#include <asm/bugs.h>
@@ -689,6 +690,8 @@ asmlinkage void __init start_kernel(void)
689 690
690 acpi_early_init(); /* before LAPIC and SMP init */ 691 acpi_early_init(); /* before LAPIC and SMP init */
691 692
693 ftrace_init();
694
692 /* Do the rest non-__init'ed, we're now alive */ 695 /* Do the rest non-__init'ed, we're now alive */
693 rest_init(); 696 rest_init();
694} 697}
@@ -705,30 +708,31 @@ __setup("initcall_debug", initcall_debug_setup);
705int do_one_initcall(initcall_t fn) 708int do_one_initcall(initcall_t fn)
706{ 709{
707 int count = preempt_count(); 710 int count = preempt_count();
708 ktime_t t0, t1, delta; 711 ktime_t delta;
709 char msgbuf[64]; 712 char msgbuf[64];
710 int result; 713 struct boot_trace it;
711 714
712 if (initcall_debug) { 715 if (initcall_debug) {
713 printk("calling %pF @ %i\n", fn, task_pid_nr(current)); 716 it.caller = task_pid_nr(current);
714 t0 = ktime_get(); 717 printk("calling %pF @ %i\n", fn, it.caller);
718 it.calltime = ktime_get();
715 } 719 }
716 720
717 result = fn(); 721 it.result = fn();
718 722
719 if (initcall_debug) { 723 if (initcall_debug) {
720 t1 = ktime_get(); 724 it.rettime = ktime_get();
721 delta = ktime_sub(t1, t0); 725 delta = ktime_sub(it.rettime, it.calltime);
722 726 it.duration = (unsigned long long) delta.tv64 >> 10;
723 printk("initcall %pF returned %d after %Ld msecs\n", 727 printk("initcall %pF returned %d after %Ld usecs\n", fn,
724 fn, result, 728 it.result, it.duration);
725 (unsigned long long) delta.tv64 >> 20); 729 trace_boot(&it, fn);
726 } 730 }
727 731
728 msgbuf[0] = 0; 732 msgbuf[0] = 0;
729 733
730 if (result && result != -ENODEV && initcall_debug) 734 if (it.result && it.result != -ENODEV && initcall_debug)
731 sprintf(msgbuf, "error code %d ", result); 735 sprintf(msgbuf, "error code %d ", it.result);
732 736
733 if (preempt_count() != count) { 737 if (preempt_count() != count) {
734 strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); 738 strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
@@ -742,7 +746,7 @@ int do_one_initcall(initcall_t fn)
742 printk("initcall %pF returned with %s\n", fn, msgbuf); 746 printk("initcall %pF returned with %s\n", fn, msgbuf);
743 } 747 }
744 748
745 return result; 749 return it.result;
746} 750}
747 751
748 752
@@ -857,6 +861,7 @@ static int __init kernel_init(void * unused)
857 smp_prepare_cpus(setup_max_cpus); 861 smp_prepare_cpus(setup_max_cpus);
858 862
859 do_pre_smp_initcalls(); 863 do_pre_smp_initcalls();
864 start_boot_trace();
860 865
861 smp_init(); 866 smp_init();
862 sched_init_smp(); 867 sched_init_smp();
@@ -883,6 +888,7 @@ static int __init kernel_init(void * unused)
883 * we're essentially up and running. Get rid of the 888 * we're essentially up and running. Get rid of the
884 * initmem segments and start the user-mode stuff.. 889 * initmem segments and start the user-mode stuff..
885 */ 890 */
891 stop_boot_trace();
886 init_post(); 892 init_post();
887 return 0; 893 return 0;
888} 894}
diff --git a/kernel/Makefile b/kernel/Makefile
index 066550aa61c5..305f11dbef21 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
85obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o 85obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
86obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o 86obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
87obj-$(CONFIG_MARKERS) += marker.o 87obj-$(CONFIG_MARKERS) += marker.o
88obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
88obj-$(CONFIG_LATENCYTOP) += latencytop.o 89obj-$(CONFIG_LATENCYTOP) += latencytop.o
89obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 90obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
90obj-$(CONFIG_FTRACE) += trace/ 91obj-$(CONFIG_FTRACE) += trace/
diff --git a/kernel/exit.c b/kernel/exit.c
index 059b38cae384..80137a5d9467 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,6 +47,7 @@
47#include <linux/blkdev.h> 47#include <linux/blkdev.h>
48#include <linux/task_io_accounting_ops.h> 48#include <linux/task_io_accounting_ops.h>
49#include <linux/tracehook.h> 49#include <linux/tracehook.h>
50#include <trace/sched.h>
50 51
51#include <asm/uaccess.h> 52#include <asm/uaccess.h>
52#include <asm/unistd.h> 53#include <asm/unistd.h>
@@ -146,7 +147,10 @@ static void __exit_signal(struct task_struct *tsk)
146 147
147static void delayed_put_task_struct(struct rcu_head *rhp) 148static void delayed_put_task_struct(struct rcu_head *rhp)
148{ 149{
149 put_task_struct(container_of(rhp, struct task_struct, rcu)); 150 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
151
152 trace_sched_process_free(tsk);
153 put_task_struct(tsk);
150} 154}
151 155
152 156
@@ -1070,6 +1074,8 @@ NORET_TYPE void do_exit(long code)
1070 1074
1071 if (group_dead) 1075 if (group_dead)
1072 acct_process(); 1076 acct_process();
1077 trace_sched_process_exit(tsk);
1078
1073 exit_sem(tsk); 1079 exit_sem(tsk);
1074 exit_files(tsk); 1080 exit_files(tsk);
1075 exit_fs(tsk); 1081 exit_fs(tsk);
@@ -1675,6 +1681,8 @@ static long do_wait(enum pid_type type, struct pid *pid, int options,
1675 struct task_struct *tsk; 1681 struct task_struct *tsk;
1676 int retval; 1682 int retval;
1677 1683
1684 trace_sched_process_wait(pid);
1685
1678 add_wait_queue(&current->signal->wait_chldexit,&wait); 1686 add_wait_queue(&current->signal->wait_chldexit,&wait);
1679repeat: 1687repeat:
1680 /* 1688 /*
diff --git a/kernel/fork.c b/kernel/fork.c
index 44e64d7ba29b..4d093552dd6e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -58,6 +58,7 @@
58#include <linux/tty.h> 58#include <linux/tty.h>
59#include <linux/proc_fs.h> 59#include <linux/proc_fs.h>
60#include <linux/blkdev.h> 60#include <linux/blkdev.h>
61#include <trace/sched.h>
61 62
62#include <asm/pgtable.h> 63#include <asm/pgtable.h>
63#include <asm/pgalloc.h> 64#include <asm/pgalloc.h>
@@ -1372,6 +1373,8 @@ long do_fork(unsigned long clone_flags,
1372 if (!IS_ERR(p)) { 1373 if (!IS_ERR(p)) {
1373 struct completion vfork; 1374 struct completion vfork;
1374 1375
1376 trace_sched_process_fork(current, p);
1377
1375 nr = task_pid_vnr(p); 1378 nr = task_pid_vnr(p);
1376 1379
1377 if (clone_flags & CLONE_PARENT_SETTID) 1380 if (clone_flags & CLONE_PARENT_SETTID)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 14ec64fe175a..8e7a7ce3ed0a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -13,6 +13,7 @@
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16#include <trace/sched.h>
16 17
17#define KTHREAD_NICE_LEVEL (-5) 18#define KTHREAD_NICE_LEVEL (-5)
18 19
@@ -205,6 +206,8 @@ int kthread_stop(struct task_struct *k)
205 /* It could exit after stop_info.k set, but before wake_up_process. */ 206 /* It could exit after stop_info.k set, but before wake_up_process. */
206 get_task_struct(k); 207 get_task_struct(k);
207 208
209 trace_sched_kthread_stop(k);
210
208 /* Must init completion *before* thread sees kthread_stop_info.k */ 211 /* Must init completion *before* thread sees kthread_stop_info.k */
209 init_completion(&kthread_stop_info.done); 212 init_completion(&kthread_stop_info.done);
210 smp_wmb(); 213 smp_wmb();
@@ -220,6 +223,8 @@ int kthread_stop(struct task_struct *k)
220 ret = kthread_stop_info.err; 223 ret = kthread_stop_info.err;
221 mutex_unlock(&kthread_stop_lock); 224 mutex_unlock(&kthread_stop_lock);
222 225
226 trace_sched_kthread_stop_ret(ret);
227
223 return ret; 228 return ret;
224} 229}
225EXPORT_SYMBOL(kthread_stop); 230EXPORT_SYMBOL(kthread_stop);
diff --git a/kernel/marker.c b/kernel/marker.c
index 7d1faecd7a51..e9c6b2bc9400 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -62,7 +62,7 @@ struct marker_entry {
62 int refcount; /* Number of times armed. 0 if disarmed. */ 62 int refcount; /* Number of times armed. 0 if disarmed. */
63 struct rcu_head rcu; 63 struct rcu_head rcu;
64 void *oldptr; 64 void *oldptr;
65 unsigned char rcu_pending:1; 65 int rcu_pending;
66 unsigned char ptype:1; 66 unsigned char ptype:1;
67 char name[0]; /* Contains name'\0'format'\0' */ 67 char name[0]; /* Contains name'\0'format'\0' */
68}; 68};
@@ -103,11 +103,11 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
103 char ptype; 103 char ptype;
104 104
105 /* 105 /*
106 * preempt_disable does two things : disabling preemption to make sure 106 * rcu_read_lock_sched does two things : disabling preemption to make
107 * the teardown of the callbacks can be done correctly when they are in 107 * sure the teardown of the callbacks can be done correctly when they
108 * modules and they insure RCU read coherency. 108 * are in modules and they insure RCU read coherency.
109 */ 109 */
110 preempt_disable(); 110 rcu_read_lock_sched();
111 ptype = mdata->ptype; 111 ptype = mdata->ptype;
112 if (likely(!ptype)) { 112 if (likely(!ptype)) {
113 marker_probe_func *func; 113 marker_probe_func *func;
@@ -145,7 +145,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
145 va_end(args); 145 va_end(args);
146 } 146 }
147 } 147 }
148 preempt_enable(); 148 rcu_read_unlock_sched();
149} 149}
150EXPORT_SYMBOL_GPL(marker_probe_cb); 150EXPORT_SYMBOL_GPL(marker_probe_cb);
151 151
@@ -162,7 +162,7 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
162 va_list args; /* not initialized */ 162 va_list args; /* not initialized */
163 char ptype; 163 char ptype;
164 164
165 preempt_disable(); 165 rcu_read_lock_sched();
166 ptype = mdata->ptype; 166 ptype = mdata->ptype;
167 if (likely(!ptype)) { 167 if (likely(!ptype)) {
168 marker_probe_func *func; 168 marker_probe_func *func;
@@ -195,7 +195,7 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
195 multi[i].func(multi[i].probe_private, call_private, 195 multi[i].func(multi[i].probe_private, call_private,
196 mdata->format, &args); 196 mdata->format, &args);
197 } 197 }
198 preempt_enable(); 198 rcu_read_unlock_sched();
199} 199}
200EXPORT_SYMBOL_GPL(marker_probe_cb_noarg); 200EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
201 201
@@ -560,7 +560,7 @@ static int set_marker(struct marker_entry **entry, struct marker *elem,
560 * Disable a marker and its probe callback. 560 * Disable a marker and its probe callback.
561 * Note: only waiting an RCU period after setting elem->call to the empty 561 * Note: only waiting an RCU period after setting elem->call to the empty
562 * function insures that the original callback is not used anymore. This insured 562 * function insures that the original callback is not used anymore. This insured
563 * by preempt_disable around the call site. 563 * by rcu_read_lock_sched around the call site.
564 */ 564 */
565static void disable_marker(struct marker *elem) 565static void disable_marker(struct marker *elem)
566{ 566{
@@ -653,11 +653,17 @@ int marker_probe_register(const char *name, const char *format,
653 entry = get_marker(name); 653 entry = get_marker(name);
654 if (!entry) { 654 if (!entry) {
655 entry = add_marker(name, format); 655 entry = add_marker(name, format);
656 if (IS_ERR(entry)) { 656 if (IS_ERR(entry))
657 ret = PTR_ERR(entry); 657 ret = PTR_ERR(entry);
658 goto end; 658 } else if (format) {
659 } 659 if (!entry->format)
660 ret = marker_set_format(&entry, format);
661 else if (strcmp(entry->format, format))
662 ret = -EPERM;
660 } 663 }
664 if (ret)
665 goto end;
666
661 /* 667 /*
662 * If we detect that a call_rcu is pending for this marker, 668 * If we detect that a call_rcu is pending for this marker,
663 * make sure it's executed now. 669 * make sure it's executed now.
@@ -674,6 +680,8 @@ int marker_probe_register(const char *name, const char *format,
674 mutex_lock(&markers_mutex); 680 mutex_lock(&markers_mutex);
675 entry = get_marker(name); 681 entry = get_marker(name);
676 WARN_ON(!entry); 682 WARN_ON(!entry);
683 if (entry->rcu_pending)
684 rcu_barrier_sched();
677 entry->oldptr = old; 685 entry->oldptr = old;
678 entry->rcu_pending = 1; 686 entry->rcu_pending = 1;
679 /* write rcu_pending before calling the RCU callback */ 687 /* write rcu_pending before calling the RCU callback */
@@ -717,6 +725,8 @@ int marker_probe_unregister(const char *name,
717 entry = get_marker(name); 725 entry = get_marker(name);
718 if (!entry) 726 if (!entry)
719 goto end; 727 goto end;
728 if (entry->rcu_pending)
729 rcu_barrier_sched();
720 entry->oldptr = old; 730 entry->oldptr = old;
721 entry->rcu_pending = 1; 731 entry->rcu_pending = 1;
722 /* write rcu_pending before calling the RCU callback */ 732 /* write rcu_pending before calling the RCU callback */
@@ -795,6 +805,8 @@ int marker_probe_unregister_private_data(marker_probe_func *probe,
795 mutex_lock(&markers_mutex); 805 mutex_lock(&markers_mutex);
796 entry = get_marker_from_private_data(probe, probe_private); 806 entry = get_marker_from_private_data(probe, probe_private);
797 WARN_ON(!entry); 807 WARN_ON(!entry);
808 if (entry->rcu_pending)
809 rcu_barrier_sched();
798 entry->oldptr = old; 810 entry->oldptr = old;
799 entry->rcu_pending = 1; 811 entry->rcu_pending = 1;
800 /* write rcu_pending before calling the RCU callback */ 812 /* write rcu_pending before calling the RCU callback */
diff --git a/kernel/module.c b/kernel/module.c
index 25bc9ac9e226..0d8d21ee792c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -46,6 +46,8 @@
46#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
47#include <linux/license.h> 47#include <linux/license.h>
48#include <asm/sections.h> 48#include <asm/sections.h>
49#include <linux/tracepoint.h>
50#include <linux/ftrace.h>
49 51
50#if 0 52#if 0
51#define DEBUGP printk 53#define DEBUGP printk
@@ -1430,6 +1432,9 @@ static void free_module(struct module *mod)
1430 /* Module unload stuff */ 1432 /* Module unload stuff */
1431 module_unload_free(mod); 1433 module_unload_free(mod);
1432 1434
1435 /* release any pointers to mcount in this module */
1436 ftrace_release(mod->module_core, mod->core_size);
1437
1433 /* This may be NULL, but that's OK */ 1438 /* This may be NULL, but that's OK */
1434 module_free(mod, mod->module_init); 1439 module_free(mod, mod->module_init);
1435 kfree(mod->args); 1440 kfree(mod->args);
@@ -1861,9 +1866,13 @@ static noinline struct module *load_module(void __user *umod,
1861 unsigned int markersindex; 1866 unsigned int markersindex;
1862 unsigned int markersstringsindex; 1867 unsigned int markersstringsindex;
1863 unsigned int verboseindex; 1868 unsigned int verboseindex;
1869 unsigned int tracepointsindex;
1870 unsigned int tracepointsstringsindex;
1871 unsigned int mcountindex;
1864 struct module *mod; 1872 struct module *mod;
1865 long err = 0; 1873 long err = 0;
1866 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 1874 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
1875 void *mseg;
1867 struct exception_table_entry *extable; 1876 struct exception_table_entry *extable;
1868 mm_segment_t old_fs; 1877 mm_segment_t old_fs;
1869 1878
@@ -2156,6 +2165,12 @@ static noinline struct module *load_module(void __user *umod,
2156 markersstringsindex = find_sec(hdr, sechdrs, secstrings, 2165 markersstringsindex = find_sec(hdr, sechdrs, secstrings,
2157 "__markers_strings"); 2166 "__markers_strings");
2158 verboseindex = find_sec(hdr, sechdrs, secstrings, "__verbose"); 2167 verboseindex = find_sec(hdr, sechdrs, secstrings, "__verbose");
2168 tracepointsindex = find_sec(hdr, sechdrs, secstrings, "__tracepoints");
2169 tracepointsstringsindex = find_sec(hdr, sechdrs, secstrings,
2170 "__tracepoints_strings");
2171
2172 mcountindex = find_sec(hdr, sechdrs, secstrings,
2173 "__mcount_loc");
2159 2174
2160 /* Now do relocations. */ 2175 /* Now do relocations. */
2161 for (i = 1; i < hdr->e_shnum; i++) { 2176 for (i = 1; i < hdr->e_shnum; i++) {
@@ -2183,6 +2198,12 @@ static noinline struct module *load_module(void __user *umod,
2183 mod->num_markers = 2198 mod->num_markers =
2184 sechdrs[markersindex].sh_size / sizeof(*mod->markers); 2199 sechdrs[markersindex].sh_size / sizeof(*mod->markers);
2185#endif 2200#endif
2201#ifdef CONFIG_TRACEPOINTS
2202 mod->tracepoints = (void *)sechdrs[tracepointsindex].sh_addr;
2203 mod->num_tracepoints =
2204 sechdrs[tracepointsindex].sh_size / sizeof(*mod->tracepoints);
2205#endif
2206
2186 2207
2187 /* Find duplicate symbols */ 2208 /* Find duplicate symbols */
2188 err = verify_export_symbols(mod); 2209 err = verify_export_symbols(mod);
@@ -2201,12 +2222,22 @@ static noinline struct module *load_module(void __user *umod,
2201 2222
2202 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); 2223 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
2203 2224
2225 if (!mod->taints) {
2204#ifdef CONFIG_MARKERS 2226#ifdef CONFIG_MARKERS
2205 if (!mod->taints)
2206 marker_update_probe_range(mod->markers, 2227 marker_update_probe_range(mod->markers,
2207 mod->markers + mod->num_markers); 2228 mod->markers + mod->num_markers);
2208#endif 2229#endif
2209 dynamic_printk_setup(sechdrs, verboseindex); 2230 dynamic_printk_setup(sechdrs, verboseindex);
2231#ifdef CONFIG_TRACEPOINTS
2232 tracepoint_update_probe_range(mod->tracepoints,
2233 mod->tracepoints + mod->num_tracepoints);
2234#endif
2235 }
2236
2237 /* sechdrs[0].sh_size is always zero */
2238 mseg = (void *)sechdrs[mcountindex].sh_addr;
2239 ftrace_init_module(mseg, mseg + sechdrs[mcountindex].sh_size);
2240
2210 err = module_finalize(hdr, sechdrs, mod); 2241 err = module_finalize(hdr, sechdrs, mod);
2211 if (err < 0) 2242 if (err < 0)
2212 goto cleanup; 2243 goto cleanup;
@@ -2276,6 +2307,7 @@ static noinline struct module *load_module(void __user *umod,
2276 cleanup: 2307 cleanup:
2277 kobject_del(&mod->mkobj.kobj); 2308 kobject_del(&mod->mkobj.kobj);
2278 kobject_put(&mod->mkobj.kobj); 2309 kobject_put(&mod->mkobj.kobj);
2310 ftrace_release(mod->module_core, mod->core_size);
2279 free_unload: 2311 free_unload:
2280 module_unload_free(mod); 2312 module_unload_free(mod);
2281 module_free(mod, mod->module_init); 2313 module_free(mod, mod->module_init);
@@ -2759,3 +2791,50 @@ void module_update_markers(void)
2759 mutex_unlock(&module_mutex); 2791 mutex_unlock(&module_mutex);
2760} 2792}
2761#endif 2793#endif
2794
2795#ifdef CONFIG_TRACEPOINTS
2796void module_update_tracepoints(void)
2797{
2798 struct module *mod;
2799
2800 mutex_lock(&module_mutex);
2801 list_for_each_entry(mod, &modules, list)
2802 if (!mod->taints)
2803 tracepoint_update_probe_range(mod->tracepoints,
2804 mod->tracepoints + mod->num_tracepoints);
2805 mutex_unlock(&module_mutex);
2806}
2807
2808/*
2809 * Returns 0 if current not found.
2810 * Returns 1 if current found.
2811 */
2812int module_get_iter_tracepoints(struct tracepoint_iter *iter)
2813{
2814 struct module *iter_mod;
2815 int found = 0;
2816
2817 mutex_lock(&module_mutex);
2818 list_for_each_entry(iter_mod, &modules, list) {
2819 if (!iter_mod->taints) {
2820 /*
2821 * Sorted module list
2822 */
2823 if (iter_mod < iter->module)
2824 continue;
2825 else if (iter_mod > iter->module)
2826 iter->tracepoint = NULL;
2827 found = tracepoint_get_iter_range(&iter->tracepoint,
2828 iter_mod->tracepoints,
2829 iter_mod->tracepoints
2830 + iter_mod->num_tracepoints);
2831 if (found) {
2832 iter->module = iter_mod;
2833 break;
2834 }
2835 }
2836 }
2837 mutex_unlock(&module_mutex);
2838 return found;
2839}
2840#endif
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 823be11584ef..4282c0a40a57 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -550,7 +550,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
550 550
551static ATOMIC_NOTIFIER_HEAD(die_chain); 551static ATOMIC_NOTIFIER_HEAD(die_chain);
552 552
553int notify_die(enum die_val val, const char *str, 553int notrace notify_die(enum die_val val, const char *str,
554 struct pt_regs *regs, long err, int trap, int sig) 554 struct pt_regs *regs, long err, int trap, int sig)
555{ 555{
556 struct die_args args = { 556 struct die_args args = {
diff --git a/kernel/sched.c b/kernel/sched.c
index 09a8c15748f1..d906f72b42d2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h> 73#include <linux/ftrace.h>
74#include <trace/sched.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -1936,6 +1937,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1936 * just go back and repeat. 1937 * just go back and repeat.
1937 */ 1938 */
1938 rq = task_rq_lock(p, &flags); 1939 rq = task_rq_lock(p, &flags);
1940 trace_sched_wait_task(rq, p);
1939 running = task_running(rq, p); 1941 running = task_running(rq, p);
1940 on_rq = p->se.on_rq; 1942 on_rq = p->se.on_rq;
1941 ncsw = 0; 1943 ncsw = 0;
@@ -2297,9 +2299,7 @@ out_activate:
2297 success = 1; 2299 success = 1;
2298 2300
2299out_running: 2301out_running:
2300 trace_mark(kernel_sched_wakeup, 2302 trace_sched_wakeup(rq, p);
2301 "pid %d state %ld ## rq %p task %p rq->curr %p",
2302 p->pid, p->state, rq, p, rq->curr);
2303 check_preempt_curr(rq, p, sync); 2303 check_preempt_curr(rq, p, sync);
2304 2304
2305 p->state = TASK_RUNNING; 2305 p->state = TASK_RUNNING;
@@ -2432,9 +2432,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2432 p->sched_class->task_new(rq, p); 2432 p->sched_class->task_new(rq, p);
2433 inc_nr_running(rq); 2433 inc_nr_running(rq);
2434 } 2434 }
2435 trace_mark(kernel_sched_wakeup_new, 2435 trace_sched_wakeup_new(rq, p);
2436 "pid %d state %ld ## rq %p task %p rq->curr %p",
2437 p->pid, p->state, rq, p, rq->curr);
2438 check_preempt_curr(rq, p, 0); 2436 check_preempt_curr(rq, p, 0);
2439#ifdef CONFIG_SMP 2437#ifdef CONFIG_SMP
2440 if (p->sched_class->task_wake_up) 2438 if (p->sched_class->task_wake_up)
@@ -2607,11 +2605,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2607 struct mm_struct *mm, *oldmm; 2605 struct mm_struct *mm, *oldmm;
2608 2606
2609 prepare_task_switch(rq, prev, next); 2607 prepare_task_switch(rq, prev, next);
2610 trace_mark(kernel_sched_schedule, 2608 trace_sched_switch(rq, prev, next);
2611 "prev_pid %d next_pid %d prev_state %ld "
2612 "## rq %p prev %p next %p",
2613 prev->pid, next->pid, prev->state,
2614 rq, prev, next);
2615 mm = next->mm; 2609 mm = next->mm;
2616 oldmm = prev->active_mm; 2610 oldmm = prev->active_mm;
2617 /* 2611 /*
@@ -2851,6 +2845,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2851 || unlikely(!cpu_active(dest_cpu))) 2845 || unlikely(!cpu_active(dest_cpu)))
2852 goto out; 2846 goto out;
2853 2847
2848 trace_sched_migrate_task(rq, p, dest_cpu);
2854 /* force the process onto the specified CPU */ 2849 /* force the process onto the specified CPU */
2855 if (migrate_task(p, dest_cpu, &req)) { 2850 if (migrate_task(p, dest_cpu, &req)) {
2856 /* Need to wait for migration thread (might exit: take ref). */ 2851 /* Need to wait for migration thread (might exit: take ref). */
diff --git a/kernel/signal.c b/kernel/signal.c
index 6eea5826d618..105217da5c82 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -27,6 +27,7 @@
27#include <linux/freezer.h> 27#include <linux/freezer.h>
28#include <linux/pid_namespace.h> 28#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h> 29#include <linux/nsproxy.h>
30#include <trace/sched.h>
30 31
31#include <asm/param.h> 32#include <asm/param.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
@@ -803,6 +804,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
803 struct sigpending *pending; 804 struct sigpending *pending;
804 struct sigqueue *q; 805 struct sigqueue *q;
805 806
807 trace_sched_signal_send(sig, t);
808
806 assert_spin_locked(&t->sighand->siglock); 809 assert_spin_locked(&t->sighand->siglock);
807 if (!prepare_signal(sig, t)) 810 if (!prepare_signal(sig, t))
808 return 0; 811 return 0;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 263e9e6bbd60..1cb3e1f616af 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1,23 +1,37 @@
1# 1#
2# Architectures that offer an FTRACE implementation should select HAVE_FTRACE: 2# Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
3# 3#
4
5config NOP_TRACER
6 bool
7
4config HAVE_FTRACE 8config HAVE_FTRACE
5 bool 9 bool
10 select NOP_TRACER
6 11
7config HAVE_DYNAMIC_FTRACE 12config HAVE_DYNAMIC_FTRACE
8 bool 13 bool
9 14
15config HAVE_FTRACE_MCOUNT_RECORD
16 bool
17
10config TRACER_MAX_TRACE 18config TRACER_MAX_TRACE
11 bool 19 bool
12 20
21config RING_BUFFER
22 bool
23
13config TRACING 24config TRACING
14 bool 25 bool
15 select DEBUG_FS 26 select DEBUG_FS
27 select RING_BUFFER
16 select STACKTRACE 28 select STACKTRACE
29 select TRACEPOINTS
17 30
18config FTRACE 31config FTRACE
19 bool "Kernel Function Tracer" 32 bool "Kernel Function Tracer"
20 depends on HAVE_FTRACE 33 depends on HAVE_FTRACE
34 depends on DEBUG_KERNEL
21 select FRAME_POINTER 35 select FRAME_POINTER
22 select TRACING 36 select TRACING
23 select CONTEXT_SWITCH_TRACER 37 select CONTEXT_SWITCH_TRACER
@@ -36,6 +50,7 @@ config IRQSOFF_TRACER
36 depends on TRACE_IRQFLAGS_SUPPORT 50 depends on TRACE_IRQFLAGS_SUPPORT
37 depends on GENERIC_TIME 51 depends on GENERIC_TIME
38 depends on HAVE_FTRACE 52 depends on HAVE_FTRACE
53 depends on DEBUG_KERNEL
39 select TRACE_IRQFLAGS 54 select TRACE_IRQFLAGS
40 select TRACING 55 select TRACING
41 select TRACER_MAX_TRACE 56 select TRACER_MAX_TRACE
@@ -59,6 +74,7 @@ config PREEMPT_TRACER
59 depends on GENERIC_TIME 74 depends on GENERIC_TIME
60 depends on PREEMPT 75 depends on PREEMPT
61 depends on HAVE_FTRACE 76 depends on HAVE_FTRACE
77 depends on DEBUG_KERNEL
62 select TRACING 78 select TRACING
63 select TRACER_MAX_TRACE 79 select TRACER_MAX_TRACE
64 help 80 help
@@ -86,6 +102,7 @@ config SYSPROF_TRACER
86config SCHED_TRACER 102config SCHED_TRACER
87 bool "Scheduling Latency Tracer" 103 bool "Scheduling Latency Tracer"
88 depends on HAVE_FTRACE 104 depends on HAVE_FTRACE
105 depends on DEBUG_KERNEL
89 select TRACING 106 select TRACING
90 select CONTEXT_SWITCH_TRACER 107 select CONTEXT_SWITCH_TRACER
91 select TRACER_MAX_TRACE 108 select TRACER_MAX_TRACE
@@ -96,16 +113,56 @@ config SCHED_TRACER
96config CONTEXT_SWITCH_TRACER 113config CONTEXT_SWITCH_TRACER
97 bool "Trace process context switches" 114 bool "Trace process context switches"
98 depends on HAVE_FTRACE 115 depends on HAVE_FTRACE
116 depends on DEBUG_KERNEL
99 select TRACING 117 select TRACING
100 select MARKERS 118 select MARKERS
101 help 119 help
102 This tracer gets called from the context switch and records 120 This tracer gets called from the context switch and records
103 all switching of tasks. 121 all switching of tasks.
104 122
123config BOOT_TRACER
124 bool "Trace boot initcalls"
125 depends on HAVE_FTRACE
126 depends on DEBUG_KERNEL
127 select TRACING
128 help
129 This tracer helps developers to optimize boot times: it records
130 the timings of the initcalls and traces key events and the identity
131 of tasks that can cause boot delays, such as context-switches.
132
133 Its aim is to be parsed by the /scripts/bootgraph.pl tool to
134 produce pretty graphics about boot inefficiencies, giving a visual
135 representation of the delays during initcalls - but the raw
136 /debug/tracing/trace text output is readable too.
137
138 ( Note that tracing self tests can't be enabled if this tracer is
139 selected, because the self-tests are an initcall as well and that
140 would invalidate the boot trace. )
141
142config STACK_TRACER
143 bool "Trace max stack"
144 depends on HAVE_FTRACE
145 depends on DEBUG_KERNEL
146 select FTRACE
147 select STACKTRACE
148 help
149 This special tracer records the maximum stack footprint of the
150 kernel and displays it in debugfs/tracing/stack_trace.
151
152 This tracer works by hooking into every function call that the
153 kernel executes, and keeping a maximum stack depth value and
154 stack-trace saved. Because this logic has to execute in every
155 kernel function, all the time, this option can slow down the
156 kernel measurably and is generally intended for kernel
157 developers only.
158
159 Say N if unsure.
160
105config DYNAMIC_FTRACE 161config DYNAMIC_FTRACE
106 bool "enable/disable ftrace tracepoints dynamically" 162 bool "enable/disable ftrace tracepoints dynamically"
107 depends on FTRACE 163 depends on FTRACE
108 depends on HAVE_DYNAMIC_FTRACE 164 depends on HAVE_DYNAMIC_FTRACE
165 depends on DEBUG_KERNEL
109 default y 166 default y
110 help 167 help
111 This option will modify all the calls to ftrace dynamically 168 This option will modify all the calls to ftrace dynamically
@@ -121,12 +178,17 @@ config DYNAMIC_FTRACE
121 were made. If so, it runs stop_machine (stops all CPUS) 178 were made. If so, it runs stop_machine (stops all CPUS)
122 and modifies the code to jump over the call to ftrace. 179 and modifies the code to jump over the call to ftrace.
123 180
181config FTRACE_MCOUNT_RECORD
182 def_bool y
183 depends on DYNAMIC_FTRACE
184 depends on HAVE_FTRACE_MCOUNT_RECORD
185
124config FTRACE_SELFTEST 186config FTRACE_SELFTEST
125 bool 187 bool
126 188
127config FTRACE_STARTUP_TEST 189config FTRACE_STARTUP_TEST
128 bool "Perform a startup test on ftrace" 190 bool "Perform a startup test on ftrace"
129 depends on TRACING 191 depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER
130 select FTRACE_SELFTEST 192 select FTRACE_SELFTEST
131 help 193 help
132 This option performs a series of startup tests on ftrace. On bootup 194 This option performs a series of startup tests on ftrace. On bootup
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 71d17de17288..a85dfba88ba0 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -11,6 +11,7 @@ obj-y += trace_selftest_dynamic.o
11endif 11endif
12 12
13obj-$(CONFIG_FTRACE) += libftrace.o 13obj-$(CONFIG_FTRACE) += libftrace.o
14obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
14 15
15obj-$(CONFIG_TRACING) += trace.o 16obj-$(CONFIG_TRACING) += trace.o
16obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 17obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
@@ -19,6 +20,9 @@ obj-$(CONFIG_FTRACE) += trace_functions.o
19obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 20obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
20obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 21obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
21obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o 22obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
23obj-$(CONFIG_NOP_TRACER) += trace_nop.o
24obj-$(CONFIG_STACK_TRACER) += trace_stack.o
22obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 25obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
26obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
23 27
24libftrace-y := ftrace.o 28libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f6e3af31b403..4dda4f60a2a9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -81,7 +81,7 @@ void clear_ftrace_function(void)
81 81
82static int __register_ftrace_function(struct ftrace_ops *ops) 82static int __register_ftrace_function(struct ftrace_ops *ops)
83{ 83{
84 /* Should never be called by interrupts */ 84 /* should not be called from interrupt context */
85 spin_lock(&ftrace_lock); 85 spin_lock(&ftrace_lock);
86 86
87 ops->next = ftrace_list; 87 ops->next = ftrace_list;
@@ -115,6 +115,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
115 struct ftrace_ops **p; 115 struct ftrace_ops **p;
116 int ret = 0; 116 int ret = 0;
117 117
118 /* should not be called from interrupt context */
118 spin_lock(&ftrace_lock); 119 spin_lock(&ftrace_lock);
119 120
120 /* 121 /*
@@ -153,6 +154,30 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
153 154
154#ifdef CONFIG_DYNAMIC_FTRACE 155#ifdef CONFIG_DYNAMIC_FTRACE
155 156
157#ifndef CONFIG_FTRACE_MCOUNT_RECORD
158/*
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
162 */
163static DEFINE_SPINLOCK(ftrace_hash_lock);
164#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
165#define ftrace_hash_unlock(flags) \
166 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
167#else
168/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
169#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
170#define ftrace_hash_unlock(flags) do { } while(0)
171#endif
172
173/*
174 * Since MCOUNT_ADDR may point to mcount itself, we do not want
175 * to get it confused by reading a reference in the code as we
176 * are parsing on objcopy output of text. Use a variable for
177 * it instead.
178 */
179static unsigned long mcount_addr = MCOUNT_ADDR;
180
156static struct task_struct *ftraced_task; 181static struct task_struct *ftraced_task;
157 182
158enum { 183enum {
@@ -171,7 +196,6 @@ static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171 196
172static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); 197static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173 198
174static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175static DEFINE_MUTEX(ftraced_lock); 199static DEFINE_MUTEX(ftraced_lock);
176static DEFINE_MUTEX(ftrace_regex_lock); 200static DEFINE_MUTEX(ftrace_regex_lock);
177 201
@@ -294,13 +318,37 @@ static inline void ftrace_del_hash(struct dyn_ftrace *node)
294 318
295static void ftrace_free_rec(struct dyn_ftrace *rec) 319static void ftrace_free_rec(struct dyn_ftrace *rec)
296{ 320{
297 /* no locking, only called from kstop_machine */
298
299 rec->ip = (unsigned long)ftrace_free_records; 321 rec->ip = (unsigned long)ftrace_free_records;
300 ftrace_free_records = rec; 322 ftrace_free_records = rec;
301 rec->flags |= FTRACE_FL_FREE; 323 rec->flags |= FTRACE_FL_FREE;
302} 324}
303 325
326void ftrace_release(void *start, unsigned long size)
327{
328 struct dyn_ftrace *rec;
329 struct ftrace_page *pg;
330 unsigned long s = (unsigned long)start;
331 unsigned long e = s + size;
332 int i;
333
334 if (ftrace_disabled || !start)
335 return;
336
337 /* should not be called from interrupt context */
338 spin_lock(&ftrace_lock);
339
340 for (pg = ftrace_pages_start; pg; pg = pg->next) {
341 for (i = 0; i < pg->index; i++) {
342 rec = &pg->records[i];
343
344 if ((rec->ip >= s) && (rec->ip < e))
345 ftrace_free_rec(rec);
346 }
347 }
348 spin_unlock(&ftrace_lock);
349
350}
351
304static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) 352static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
305{ 353{
306 struct dyn_ftrace *rec; 354 struct dyn_ftrace *rec;
@@ -338,7 +386,6 @@ ftrace_record_ip(unsigned long ip)
338 unsigned long flags; 386 unsigned long flags;
339 unsigned long key; 387 unsigned long key;
340 int resched; 388 int resched;
341 int atomic;
342 int cpu; 389 int cpu;
343 390
344 if (!ftrace_enabled || ftrace_disabled) 391 if (!ftrace_enabled || ftrace_disabled)
@@ -368,9 +415,7 @@ ftrace_record_ip(unsigned long ip)
368 if (ftrace_ip_in_hash(ip, key)) 415 if (ftrace_ip_in_hash(ip, key))
369 goto out; 416 goto out;
370 417
371 atomic = irqs_disabled(); 418 ftrace_hash_lock(flags);
372
373 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
374 419
375 /* This ip may have hit the hash before the lock */ 420 /* This ip may have hit the hash before the lock */
376 if (ftrace_ip_in_hash(ip, key)) 421 if (ftrace_ip_in_hash(ip, key))
@@ -387,7 +432,7 @@ ftrace_record_ip(unsigned long ip)
387 ftraced_trigger = 1; 432 ftraced_trigger = 1;
388 433
389 out_unlock: 434 out_unlock:
390 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); 435 ftrace_hash_unlock(flags);
391 out: 436 out:
392 per_cpu(ftrace_shutdown_disable_cpu, cpu)--; 437 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
393 438
@@ -531,6 +576,16 @@ static void ftrace_shutdown_replenish(void)
531 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); 576 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
532} 577}
533 578
579static void print_ip_ins(const char *fmt, unsigned char *p)
580{
581 int i;
582
583 printk(KERN_CONT "%s", fmt);
584
585 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
586 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
587}
588
534static int 589static int
535ftrace_code_disable(struct dyn_ftrace *rec) 590ftrace_code_disable(struct dyn_ftrace *rec)
536{ 591{
@@ -541,10 +596,27 @@ ftrace_code_disable(struct dyn_ftrace *rec)
541 ip = rec->ip; 596 ip = rec->ip;
542 597
543 nop = ftrace_nop_replace(); 598 nop = ftrace_nop_replace();
544 call = ftrace_call_replace(ip, MCOUNT_ADDR); 599 call = ftrace_call_replace(ip, mcount_addr);
545 600
546 failed = ftrace_modify_code(ip, call, nop); 601 failed = ftrace_modify_code(ip, call, nop);
547 if (failed) { 602 if (failed) {
603 switch (failed) {
604 case 1:
605 WARN_ON_ONCE(1);
606 pr_info("ftrace faulted on modifying ");
607 print_ip_sym(ip);
608 break;
609 case 2:
610 WARN_ON_ONCE(1);
611 pr_info("ftrace failed to modify ");
612 print_ip_sym(ip);
613 print_ip_ins(" expected: ", call);
614 print_ip_ins(" actual: ", (unsigned char *)ip);
615 print_ip_ins(" replace: ", nop);
616 printk(KERN_CONT "\n");
617 break;
618 }
619
548 rec->flags |= FTRACE_FL_FAILED; 620 rec->flags |= FTRACE_FL_FAILED;
549 return 0; 621 return 0;
550 } 622 }
@@ -792,47 +864,7 @@ static int ftrace_update_code(void)
792 return 1; 864 return 1;
793} 865}
794 866
795static int ftraced(void *ignore) 867static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
796{
797 unsigned long usecs;
798
799 while (!kthread_should_stop()) {
800
801 set_current_state(TASK_INTERRUPTIBLE);
802
803 /* check once a second */
804 schedule_timeout(HZ);
805
806 if (unlikely(ftrace_disabled))
807 continue;
808
809 mutex_lock(&ftrace_sysctl_lock);
810 mutex_lock(&ftraced_lock);
811 if (!ftraced_suspend && !ftraced_stop &&
812 ftrace_update_code()) {
813 usecs = nsecs_to_usecs(ftrace_update_time);
814 if (ftrace_update_tot_cnt > 100000) {
815 ftrace_update_tot_cnt = 0;
816 pr_info("hm, dftrace overflow: %lu change%s"
817 " (%lu total) in %lu usec%s\n",
818 ftrace_update_cnt,
819 ftrace_update_cnt != 1 ? "s" : "",
820 ftrace_update_tot_cnt,
821 usecs, usecs != 1 ? "s" : "");
822 ftrace_disabled = 1;
823 WARN_ON_ONCE(1);
824 }
825 }
826 mutex_unlock(&ftraced_lock);
827 mutex_unlock(&ftrace_sysctl_lock);
828
829 ftrace_shutdown_replenish();
830 }
831 __set_current_state(TASK_RUNNING);
832 return 0;
833}
834
835static int __init ftrace_dyn_table_alloc(void)
836{ 868{
837 struct ftrace_page *pg; 869 struct ftrace_page *pg;
838 int cnt; 870 int cnt;
@@ -859,7 +891,9 @@ static int __init ftrace_dyn_table_alloc(void)
859 891
860 pg = ftrace_pages = ftrace_pages_start; 892 pg = ftrace_pages = ftrace_pages_start;
861 893
862 cnt = NR_TO_INIT / ENTRIES_PER_PAGE; 894 cnt = num_to_init / ENTRIES_PER_PAGE;
895 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
896 num_to_init, cnt);
863 897
864 for (i = 0; i < cnt; i++) { 898 for (i = 0; i < cnt; i++) {
865 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 899 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -901,6 +935,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
901 935
902 (*pos)++; 936 (*pos)++;
903 937
938 /* should not be called from interrupt context */
939 spin_lock(&ftrace_lock);
904 retry: 940 retry:
905 if (iter->idx >= iter->pg->index) { 941 if (iter->idx >= iter->pg->index) {
906 if (iter->pg->next) { 942 if (iter->pg->next) {
@@ -910,15 +946,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
910 } 946 }
911 } else { 947 } else {
912 rec = &iter->pg->records[iter->idx++]; 948 rec = &iter->pg->records[iter->idx++];
913 if ((!(iter->flags & FTRACE_ITER_FAILURES) && 949 if ((rec->flags & FTRACE_FL_FREE) ||
950
951 (!(iter->flags & FTRACE_ITER_FAILURES) &&
914 (rec->flags & FTRACE_FL_FAILED)) || 952 (rec->flags & FTRACE_FL_FAILED)) ||
915 953
916 ((iter->flags & FTRACE_ITER_FAILURES) && 954 ((iter->flags & FTRACE_ITER_FAILURES) &&
917 (!(rec->flags & FTRACE_FL_FAILED) || 955 !(rec->flags & FTRACE_FL_FAILED)) ||
918 (rec->flags & FTRACE_FL_FREE))) ||
919
920 ((iter->flags & FTRACE_ITER_FILTER) &&
921 !(rec->flags & FTRACE_FL_FILTER)) ||
922 956
923 ((iter->flags & FTRACE_ITER_NOTRACE) && 957 ((iter->flags & FTRACE_ITER_NOTRACE) &&
924 !(rec->flags & FTRACE_FL_NOTRACE))) { 958 !(rec->flags & FTRACE_FL_NOTRACE))) {
@@ -926,6 +960,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
926 goto retry; 960 goto retry;
927 } 961 }
928 } 962 }
963 spin_unlock(&ftrace_lock);
929 964
930 iter->pos = *pos; 965 iter->pos = *pos;
931 966
@@ -1039,8 +1074,8 @@ static void ftrace_filter_reset(int enable)
1039 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1074 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1040 unsigned i; 1075 unsigned i;
1041 1076
1042 /* keep kstop machine from running */ 1077 /* should not be called from interrupt context */
1043 preempt_disable(); 1078 spin_lock(&ftrace_lock);
1044 if (enable) 1079 if (enable)
1045 ftrace_filtered = 0; 1080 ftrace_filtered = 0;
1046 pg = ftrace_pages_start; 1081 pg = ftrace_pages_start;
@@ -1053,7 +1088,7 @@ static void ftrace_filter_reset(int enable)
1053 } 1088 }
1054 pg = pg->next; 1089 pg = pg->next;
1055 } 1090 }
1056 preempt_enable(); 1091 spin_unlock(&ftrace_lock);
1057} 1092}
1058 1093
1059static int 1094static int
@@ -1165,8 +1200,8 @@ ftrace_match(unsigned char *buff, int len, int enable)
1165 } 1200 }
1166 } 1201 }
1167 1202
1168 /* keep kstop machine from running */ 1203 /* should not be called from interrupt context */
1169 preempt_disable(); 1204 spin_lock(&ftrace_lock);
1170 if (enable) 1205 if (enable)
1171 ftrace_filtered = 1; 1206 ftrace_filtered = 1;
1172 pg = ftrace_pages_start; 1207 pg = ftrace_pages_start;
@@ -1203,7 +1238,7 @@ ftrace_match(unsigned char *buff, int len, int enable)
1203 } 1238 }
1204 pg = pg->next; 1239 pg = pg->next;
1205 } 1240 }
1206 preempt_enable(); 1241 spin_unlock(&ftrace_lock);
1207} 1242}
1208 1243
1209static ssize_t 1244static ssize_t
@@ -1556,6 +1591,114 @@ static __init int ftrace_init_debugfs(void)
1556 1591
1557fs_initcall(ftrace_init_debugfs); 1592fs_initcall(ftrace_init_debugfs);
1558 1593
1594#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1595static int ftrace_convert_nops(unsigned long *start,
1596 unsigned long *end)
1597{
1598 unsigned long *p;
1599 unsigned long addr;
1600 unsigned long flags;
1601
1602 p = start;
1603 while (p < end) {
1604 addr = ftrace_call_adjust(*p++);
1605 /* should not be called from interrupt context */
1606 spin_lock(&ftrace_lock);
1607 ftrace_record_ip(addr);
1608 spin_unlock(&ftrace_lock);
1609 ftrace_shutdown_replenish();
1610 }
1611
1612 /* p is ignored */
1613 local_irq_save(flags);
1614 __ftrace_update_code(p);
1615 local_irq_restore(flags);
1616
1617 return 0;
1618}
1619
1620void ftrace_init_module(unsigned long *start, unsigned long *end)
1621{
1622 if (ftrace_disabled || start == end)
1623 return;
1624 ftrace_convert_nops(start, end);
1625}
1626
1627extern unsigned long __start_mcount_loc[];
1628extern unsigned long __stop_mcount_loc[];
1629
1630void __init ftrace_init(void)
1631{
1632 unsigned long count, addr, flags;
1633 int ret;
1634
1635 /* Keep the ftrace pointer to the stub */
1636 addr = (unsigned long)ftrace_stub;
1637
1638 local_irq_save(flags);
1639 ftrace_dyn_arch_init(&addr);
1640 local_irq_restore(flags);
1641
1642 /* ftrace_dyn_arch_init places the return code in addr */
1643 if (addr)
1644 goto failed;
1645
1646 count = __stop_mcount_loc - __start_mcount_loc;
1647
1648 ret = ftrace_dyn_table_alloc(count);
1649 if (ret)
1650 goto failed;
1651
1652 last_ftrace_enabled = ftrace_enabled = 1;
1653
1654 ret = ftrace_convert_nops(__start_mcount_loc,
1655 __stop_mcount_loc);
1656
1657 return;
1658 failed:
1659 ftrace_disabled = 1;
1660}
1661#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1662static int ftraced(void *ignore)
1663{
1664 unsigned long usecs;
1665
1666 while (!kthread_should_stop()) {
1667
1668 set_current_state(TASK_INTERRUPTIBLE);
1669
1670 /* check once a second */
1671 schedule_timeout(HZ);
1672
1673 if (unlikely(ftrace_disabled))
1674 continue;
1675
1676 mutex_lock(&ftrace_sysctl_lock);
1677 mutex_lock(&ftraced_lock);
1678 if (!ftraced_suspend && !ftraced_stop &&
1679 ftrace_update_code()) {
1680 usecs = nsecs_to_usecs(ftrace_update_time);
1681 if (ftrace_update_tot_cnt > 100000) {
1682 ftrace_update_tot_cnt = 0;
1683 pr_info("hm, dftrace overflow: %lu change%s"
1684 " (%lu total) in %lu usec%s\n",
1685 ftrace_update_cnt,
1686 ftrace_update_cnt != 1 ? "s" : "",
1687 ftrace_update_tot_cnt,
1688 usecs, usecs != 1 ? "s" : "");
1689 ftrace_disabled = 1;
1690 WARN_ON_ONCE(1);
1691 }
1692 }
1693 mutex_unlock(&ftraced_lock);
1694 mutex_unlock(&ftrace_sysctl_lock);
1695
1696 ftrace_shutdown_replenish();
1697 }
1698 __set_current_state(TASK_RUNNING);
1699 return 0;
1700}
1701
1559static int __init ftrace_dynamic_init(void) 1702static int __init ftrace_dynamic_init(void)
1560{ 1703{
1561 struct task_struct *p; 1704 struct task_struct *p;
@@ -1572,7 +1715,7 @@ static int __init ftrace_dynamic_init(void)
1572 goto failed; 1715 goto failed;
1573 } 1716 }
1574 1717
1575 ret = ftrace_dyn_table_alloc(); 1718 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1576 if (ret) 1719 if (ret)
1577 goto failed; 1720 goto failed;
1578 1721
@@ -1593,6 +1736,8 @@ static int __init ftrace_dynamic_init(void)
1593} 1736}
1594 1737
1595core_initcall(ftrace_dynamic_init); 1738core_initcall(ftrace_dynamic_init);
1739#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1740
1596#else 1741#else
1597# define ftrace_startup() do { } while (0) 1742# define ftrace_startup() do { } while (0)
1598# define ftrace_shutdown() do { } while (0) 1743# define ftrace_shutdown() do { } while (0)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
new file mode 100644
index 000000000000..94af1fe56bb4
--- /dev/null
+++ b/kernel/trace/ring_buffer.c
@@ -0,0 +1,2014 @@
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
19/* Up this if you want to test the TIME_EXTENTS and normalization */
20#define DEBUG_SHIFT 0
21
22/* FIXME!!! */
23u64 ring_buffer_time_stamp(int cpu)
24{
25 /* shift to debug/test normalization and TIME_EXTENTS */
26 return sched_clock() << DEBUG_SHIFT;
27}
28
29void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
30{
31 /* Just stupid testing the normalize function and deltas */
32 *ts >>= DEBUG_SHIFT;
33}
34
35#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
36#define RB_ALIGNMENT_SHIFT 2
37#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
38#define RB_MAX_SMALL_DATA 28
39
40enum {
41 RB_LEN_TIME_EXTEND = 8,
42 RB_LEN_TIME_STAMP = 16,
43};
44
45/* inline for ring buffer fast paths */
46static inline unsigned
47rb_event_length(struct ring_buffer_event *event)
48{
49 unsigned length;
50
51 switch (event->type) {
52 case RINGBUF_TYPE_PADDING:
53 /* undefined */
54 return -1;
55
56 case RINGBUF_TYPE_TIME_EXTEND:
57 return RB_LEN_TIME_EXTEND;
58
59 case RINGBUF_TYPE_TIME_STAMP:
60 return RB_LEN_TIME_STAMP;
61
62 case RINGBUF_TYPE_DATA:
63 if (event->len)
64 length = event->len << RB_ALIGNMENT_SHIFT;
65 else
66 length = event->array[0];
67 return length + RB_EVNT_HDR_SIZE;
68 default:
69 BUG();
70 }
71 /* not hit */
72 return 0;
73}
74
75/**
76 * ring_buffer_event_length - return the length of the event
77 * @event: the event to get the length of
78 */
79unsigned ring_buffer_event_length(struct ring_buffer_event *event)
80{
81 return rb_event_length(event);
82}
83
84/* inline for ring buffer fast paths */
85static inline void *
86rb_event_data(struct ring_buffer_event *event)
87{
88 BUG_ON(event->type != RINGBUF_TYPE_DATA);
89 /* If length is in len field, then array[0] has the data */
90 if (event->len)
91 return (void *)&event->array[0];
92 /* Otherwise length is in array[0] and array[1] has the data */
93 return (void *)&event->array[1];
94}
95
96/**
97 * ring_buffer_event_data - return the data of the event
98 * @event: the event to get the data from
99 */
100void *ring_buffer_event_data(struct ring_buffer_event *event)
101{
102 return rb_event_data(event);
103}
104
105#define for_each_buffer_cpu(buffer, cpu) \
106 for_each_cpu_mask(cpu, buffer->cpumask)
107
108#define TS_SHIFT 27
109#define TS_MASK ((1ULL << TS_SHIFT) - 1)
110#define TS_DELTA_TEST (~TS_MASK)
111
112/*
113 * This hack stolen from mm/slob.c.
114 * We can store per page timing information in the page frame of the page.
115 * Thanks to Peter Zijlstra for suggesting this idea.
116 */
117struct buffer_page {
118 u64 time_stamp; /* page time stamp */
119 local_t write; /* index for next write */
120 local_t commit; /* write commited index */
121 unsigned read; /* index for next read */
122 struct list_head list; /* list of free pages */
123 void *page; /* Actual data page */
124};
125
126/*
127 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
128 * this issue out.
129 */
130static inline void free_buffer_page(struct buffer_page *bpage)
131{
132 if (bpage->page)
133 __free_page(bpage->page);
134 kfree(bpage);
135}
136
137/*
138 * We need to fit the time_stamp delta into 27 bits.
139 */
140static inline int test_time_stamp(u64 delta)
141{
142 if (delta & TS_DELTA_TEST)
143 return 1;
144 return 0;
145}
146
147#define BUF_PAGE_SIZE PAGE_SIZE
148
149/*
150 * head_page == tail_page && head == tail then buffer is empty.
151 */
152struct ring_buffer_per_cpu {
153 int cpu;
154 struct ring_buffer *buffer;
155 spinlock_t lock;
156 struct lock_class_key lock_key;
157 struct list_head pages;
158 struct buffer_page *head_page; /* read from head */
159 struct buffer_page *tail_page; /* write to tail */
160 struct buffer_page *commit_page; /* commited pages */
161 struct buffer_page *reader_page;
162 unsigned long overrun;
163 unsigned long entries;
164 u64 write_stamp;
165 u64 read_stamp;
166 atomic_t record_disabled;
167};
168
169struct ring_buffer {
170 unsigned long size;
171 unsigned pages;
172 unsigned flags;
173 int cpus;
174 cpumask_t cpumask;
175 atomic_t record_disabled;
176
177 struct mutex mutex;
178
179 struct ring_buffer_per_cpu **buffers;
180};
181
182struct ring_buffer_iter {
183 struct ring_buffer_per_cpu *cpu_buffer;
184 unsigned long head;
185 struct buffer_page *head_page;
186 u64 read_stamp;
187};
188
189#define RB_WARN_ON(buffer, cond) \
190 do { \
191 if (unlikely(cond)) { \
192 atomic_inc(&buffer->record_disabled); \
193 WARN_ON(1); \
194 } \
195 } while (0)
196
197#define RB_WARN_ON_RET(buffer, cond) \
198 do { \
199 if (unlikely(cond)) { \
200 atomic_inc(&buffer->record_disabled); \
201 WARN_ON(1); \
202 return -1; \
203 } \
204 } while (0)
205
206#define RB_WARN_ON_ONCE(buffer, cond) \
207 do { \
208 static int once; \
209 if (unlikely(cond) && !once) { \
210 once++; \
211 atomic_inc(&buffer->record_disabled); \
212 WARN_ON(1); \
213 } \
214 } while (0)
215
216/**
217 * check_pages - integrity check of buffer pages
218 * @cpu_buffer: CPU buffer with pages to test
219 *
220 * As a safty measure we check to make sure the data pages have not
221 * been corrupted.
222 */
223static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
224{
225 struct list_head *head = &cpu_buffer->pages;
226 struct buffer_page *page, *tmp;
227
228 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
229 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
230
231 list_for_each_entry_safe(page, tmp, head, list) {
232 RB_WARN_ON_RET(cpu_buffer,
233 page->list.next->prev != &page->list);
234 RB_WARN_ON_RET(cpu_buffer,
235 page->list.prev->next != &page->list);
236 }
237
238 return 0;
239}
240
241static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
242 unsigned nr_pages)
243{
244 struct list_head *head = &cpu_buffer->pages;
245 struct buffer_page *page, *tmp;
246 unsigned long addr;
247 LIST_HEAD(pages);
248 unsigned i;
249
250 for (i = 0; i < nr_pages; i++) {
251 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
252 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
253 if (!page)
254 goto free_pages;
255 list_add(&page->list, &pages);
256
257 addr = __get_free_page(GFP_KERNEL);
258 if (!addr)
259 goto free_pages;
260 page->page = (void *)addr;
261 }
262
263 list_splice(&pages, head);
264
265 rb_check_pages(cpu_buffer);
266
267 return 0;
268
269 free_pages:
270 list_for_each_entry_safe(page, tmp, &pages, list) {
271 list_del_init(&page->list);
272 free_buffer_page(page);
273 }
274 return -ENOMEM;
275}
276
277static struct ring_buffer_per_cpu *
278rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
279{
280 struct ring_buffer_per_cpu *cpu_buffer;
281 struct buffer_page *page;
282 unsigned long addr;
283 int ret;
284
285 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
286 GFP_KERNEL, cpu_to_node(cpu));
287 if (!cpu_buffer)
288 return NULL;
289
290 cpu_buffer->cpu = cpu;
291 cpu_buffer->buffer = buffer;
292 spin_lock_init(&cpu_buffer->lock);
293 INIT_LIST_HEAD(&cpu_buffer->pages);
294
295 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
296 GFP_KERNEL, cpu_to_node(cpu));
297 if (!page)
298 goto fail_free_buffer;
299
300 cpu_buffer->reader_page = page;
301 addr = __get_free_page(GFP_KERNEL);
302 if (!addr)
303 goto fail_free_reader;
304 page->page = (void *)addr;
305
306 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
307
308 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
309 if (ret < 0)
310 goto fail_free_reader;
311
312 cpu_buffer->head_page
313 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
314 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
315
316 return cpu_buffer;
317
318 fail_free_reader:
319 free_buffer_page(cpu_buffer->reader_page);
320
321 fail_free_buffer:
322 kfree(cpu_buffer);
323 return NULL;
324}
325
326static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
327{
328 struct list_head *head = &cpu_buffer->pages;
329 struct buffer_page *page, *tmp;
330
331 list_del_init(&cpu_buffer->reader_page->list);
332 free_buffer_page(cpu_buffer->reader_page);
333
334 list_for_each_entry_safe(page, tmp, head, list) {
335 list_del_init(&page->list);
336 free_buffer_page(page);
337 }
338 kfree(cpu_buffer);
339}
340
341/*
342 * Causes compile errors if the struct buffer_page gets bigger
343 * than the struct page.
344 */
345extern int ring_buffer_page_too_big(void);
346
347/**
348 * ring_buffer_alloc - allocate a new ring_buffer
349 * @size: the size in bytes that is needed.
350 * @flags: attributes to set for the ring buffer.
351 *
352 * Currently the only flag that is available is the RB_FL_OVERWRITE
353 * flag. This flag means that the buffer will overwrite old data
354 * when the buffer wraps. If this flag is not set, the buffer will
355 * drop data when the tail hits the head.
356 */
357struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
358{
359 struct ring_buffer *buffer;
360 int bsize;
361 int cpu;
362
363 /* Paranoid! Optimizes out when all is well */
364 if (sizeof(struct buffer_page) > sizeof(struct page))
365 ring_buffer_page_too_big();
366
367
368 /* keep it in its own cache line */
369 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
370 GFP_KERNEL);
371 if (!buffer)
372 return NULL;
373
374 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
375 buffer->flags = flags;
376
377 /* need at least two pages */
378 if (buffer->pages == 1)
379 buffer->pages++;
380
381 buffer->cpumask = cpu_possible_map;
382 buffer->cpus = nr_cpu_ids;
383
384 bsize = sizeof(void *) * nr_cpu_ids;
385 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
386 GFP_KERNEL);
387 if (!buffer->buffers)
388 goto fail_free_buffer;
389
390 for_each_buffer_cpu(buffer, cpu) {
391 buffer->buffers[cpu] =
392 rb_allocate_cpu_buffer(buffer, cpu);
393 if (!buffer->buffers[cpu])
394 goto fail_free_buffers;
395 }
396
397 mutex_init(&buffer->mutex);
398
399 return buffer;
400
401 fail_free_buffers:
402 for_each_buffer_cpu(buffer, cpu) {
403 if (buffer->buffers[cpu])
404 rb_free_cpu_buffer(buffer->buffers[cpu]);
405 }
406 kfree(buffer->buffers);
407
408 fail_free_buffer:
409 kfree(buffer);
410 return NULL;
411}
412
413/**
414 * ring_buffer_free - free a ring buffer.
415 * @buffer: the buffer to free.
416 */
417void
418ring_buffer_free(struct ring_buffer *buffer)
419{
420 int cpu;
421
422 for_each_buffer_cpu(buffer, cpu)
423 rb_free_cpu_buffer(buffer->buffers[cpu]);
424
425 kfree(buffer);
426}
427
428static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
429
430static void
431rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
432{
433 struct buffer_page *page;
434 struct list_head *p;
435 unsigned i;
436
437 atomic_inc(&cpu_buffer->record_disabled);
438 synchronize_sched();
439
440 for (i = 0; i < nr_pages; i++) {
441 BUG_ON(list_empty(&cpu_buffer->pages));
442 p = cpu_buffer->pages.next;
443 page = list_entry(p, struct buffer_page, list);
444 list_del_init(&page->list);
445 free_buffer_page(page);
446 }
447 BUG_ON(list_empty(&cpu_buffer->pages));
448
449 rb_reset_cpu(cpu_buffer);
450
451 rb_check_pages(cpu_buffer);
452
453 atomic_dec(&cpu_buffer->record_disabled);
454
455}
456
457static void
458rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
459 struct list_head *pages, unsigned nr_pages)
460{
461 struct buffer_page *page;
462 struct list_head *p;
463 unsigned i;
464
465 atomic_inc(&cpu_buffer->record_disabled);
466 synchronize_sched();
467
468 for (i = 0; i < nr_pages; i++) {
469 BUG_ON(list_empty(pages));
470 p = pages->next;
471 page = list_entry(p, struct buffer_page, list);
472 list_del_init(&page->list);
473 list_add_tail(&page->list, &cpu_buffer->pages);
474 }
475 rb_reset_cpu(cpu_buffer);
476
477 rb_check_pages(cpu_buffer);
478
479 atomic_dec(&cpu_buffer->record_disabled);
480}
481
482/**
483 * ring_buffer_resize - resize the ring buffer
484 * @buffer: the buffer to resize.
485 * @size: the new size.
486 *
487 * The tracer is responsible for making sure that the buffer is
488 * not being used while changing the size.
489 * Note: We may be able to change the above requirement by using
490 * RCU synchronizations.
491 *
492 * Minimum size is 2 * BUF_PAGE_SIZE.
493 *
494 * Returns -1 on failure.
495 */
496int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
497{
498 struct ring_buffer_per_cpu *cpu_buffer;
499 unsigned nr_pages, rm_pages, new_pages;
500 struct buffer_page *page, *tmp;
501 unsigned long buffer_size;
502 unsigned long addr;
503 LIST_HEAD(pages);
504 int i, cpu;
505
506 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
507 size *= BUF_PAGE_SIZE;
508 buffer_size = buffer->pages * BUF_PAGE_SIZE;
509
510 /* we need a minimum of two pages */
511 if (size < BUF_PAGE_SIZE * 2)
512 size = BUF_PAGE_SIZE * 2;
513
514 if (size == buffer_size)
515 return size;
516
517 mutex_lock(&buffer->mutex);
518
519 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
520
521 if (size < buffer_size) {
522
523 /* easy case, just free pages */
524 BUG_ON(nr_pages >= buffer->pages);
525
526 rm_pages = buffer->pages - nr_pages;
527
528 for_each_buffer_cpu(buffer, cpu) {
529 cpu_buffer = buffer->buffers[cpu];
530 rb_remove_pages(cpu_buffer, rm_pages);
531 }
532 goto out;
533 }
534
535 /*
536 * This is a bit more difficult. We only want to add pages
537 * when we can allocate enough for all CPUs. We do this
538 * by allocating all the pages and storing them on a local
539 * link list. If we succeed in our allocation, then we
540 * add these pages to the cpu_buffers. Otherwise we just free
541 * them all and return -ENOMEM;
542 */
543 BUG_ON(nr_pages <= buffer->pages);
544 new_pages = nr_pages - buffer->pages;
545
546 for_each_buffer_cpu(buffer, cpu) {
547 for (i = 0; i < new_pages; i++) {
548 page = kzalloc_node(ALIGN(sizeof(*page),
549 cache_line_size()),
550 GFP_KERNEL, cpu_to_node(cpu));
551 if (!page)
552 goto free_pages;
553 list_add(&page->list, &pages);
554 addr = __get_free_page(GFP_KERNEL);
555 if (!addr)
556 goto free_pages;
557 page->page = (void *)addr;
558 }
559 }
560
561 for_each_buffer_cpu(buffer, cpu) {
562 cpu_buffer = buffer->buffers[cpu];
563 rb_insert_pages(cpu_buffer, &pages, new_pages);
564 }
565
566 BUG_ON(!list_empty(&pages));
567
568 out:
569 buffer->pages = nr_pages;
570 mutex_unlock(&buffer->mutex);
571
572 return size;
573
574 free_pages:
575 list_for_each_entry_safe(page, tmp, &pages, list) {
576 list_del_init(&page->list);
577 free_buffer_page(page);
578 }
579 return -ENOMEM;
580}
581
582static inline int rb_null_event(struct ring_buffer_event *event)
583{
584 return event->type == RINGBUF_TYPE_PADDING;
585}
586
587static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
588{
589 return page->page + index;
590}
591
592static inline struct ring_buffer_event *
593rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
594{
595 return __rb_page_index(cpu_buffer->reader_page,
596 cpu_buffer->reader_page->read);
597}
598
599static inline struct ring_buffer_event *
600rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
601{
602 return __rb_page_index(cpu_buffer->head_page,
603 cpu_buffer->head_page->read);
604}
605
606static inline struct ring_buffer_event *
607rb_iter_head_event(struct ring_buffer_iter *iter)
608{
609 return __rb_page_index(iter->head_page, iter->head);
610}
611
612static inline unsigned rb_page_write(struct buffer_page *bpage)
613{
614 return local_read(&bpage->write);
615}
616
617static inline unsigned rb_page_commit(struct buffer_page *bpage)
618{
619 return local_read(&bpage->commit);
620}
621
622/* Size is determined by what has been commited */
623static inline unsigned rb_page_size(struct buffer_page *bpage)
624{
625 return rb_page_commit(bpage);
626}
627
628static inline unsigned
629rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
630{
631 return rb_page_commit(cpu_buffer->commit_page);
632}
633
634static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
635{
636 return rb_page_commit(cpu_buffer->head_page);
637}
638
639/*
640 * When the tail hits the head and the buffer is in overwrite mode,
641 * the head jumps to the next page and all content on the previous
642 * page is discarded. But before doing so, we update the overrun
643 * variable of the buffer.
644 */
645static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
646{
647 struct ring_buffer_event *event;
648 unsigned long head;
649
650 for (head = 0; head < rb_head_size(cpu_buffer);
651 head += rb_event_length(event)) {
652
653 event = __rb_page_index(cpu_buffer->head_page, head);
654 BUG_ON(rb_null_event(event));
655 /* Only count data entries */
656 if (event->type != RINGBUF_TYPE_DATA)
657 continue;
658 cpu_buffer->overrun++;
659 cpu_buffer->entries--;
660 }
661}
662
663static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
664 struct buffer_page **page)
665{
666 struct list_head *p = (*page)->list.next;
667
668 if (p == &cpu_buffer->pages)
669 p = p->next;
670
671 *page = list_entry(p, struct buffer_page, list);
672}
673
674static inline unsigned
675rb_event_index(struct ring_buffer_event *event)
676{
677 unsigned long addr = (unsigned long)event;
678
679 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
680}
681
682static inline int
683rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
684 struct ring_buffer_event *event)
685{
686 unsigned long addr = (unsigned long)event;
687 unsigned long index;
688
689 index = rb_event_index(event);
690 addr &= PAGE_MASK;
691
692 return cpu_buffer->commit_page->page == (void *)addr &&
693 rb_commit_index(cpu_buffer) == index;
694}
695
696static inline void
697rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
698 struct ring_buffer_event *event)
699{
700 unsigned long addr = (unsigned long)event;
701 unsigned long index;
702
703 index = rb_event_index(event);
704 addr &= PAGE_MASK;
705
706 while (cpu_buffer->commit_page->page != (void *)addr) {
707 RB_WARN_ON(cpu_buffer,
708 cpu_buffer->commit_page == cpu_buffer->tail_page);
709 cpu_buffer->commit_page->commit =
710 cpu_buffer->commit_page->write;
711 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
712 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
713 }
714
715 /* Now set the commit to the event's index */
716 local_set(&cpu_buffer->commit_page->commit, index);
717}
718
719static inline void
720rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
721{
722 /*
723 * We only race with interrupts and NMIs on this CPU.
724 * If we own the commit event, then we can commit
725 * all others that interrupted us, since the interruptions
726 * are in stack format (they finish before they come
727 * back to us). This allows us to do a simple loop to
728 * assign the commit to the tail.
729 */
730 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
731 cpu_buffer->commit_page->commit =
732 cpu_buffer->commit_page->write;
733 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
734 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
735 /* add barrier to keep gcc from optimizing too much */
736 barrier();
737 }
738 while (rb_commit_index(cpu_buffer) !=
739 rb_page_write(cpu_buffer->commit_page)) {
740 cpu_buffer->commit_page->commit =
741 cpu_buffer->commit_page->write;
742 barrier();
743 }
744}
745
746static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
747{
748 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
749 cpu_buffer->reader_page->read = 0;
750}
751
752static inline void rb_inc_iter(struct ring_buffer_iter *iter)
753{
754 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
755
756 /*
757 * The iterator could be on the reader page (it starts there).
758 * But the head could have moved, since the reader was
759 * found. Check for this case and assign the iterator
760 * to the head page instead of next.
761 */
762 if (iter->head_page == cpu_buffer->reader_page)
763 iter->head_page = cpu_buffer->head_page;
764 else
765 rb_inc_page(cpu_buffer, &iter->head_page);
766
767 iter->read_stamp = iter->head_page->time_stamp;
768 iter->head = 0;
769}
770
771/**
772 * ring_buffer_update_event - update event type and data
773 * @event: the even to update
774 * @type: the type of event
775 * @length: the size of the event field in the ring buffer
776 *
777 * Update the type and data fields of the event. The length
778 * is the actual size that is written to the ring buffer,
779 * and with this, we can determine what to place into the
780 * data field.
781 */
782static inline void
783rb_update_event(struct ring_buffer_event *event,
784 unsigned type, unsigned length)
785{
786 event->type = type;
787
788 switch (type) {
789
790 case RINGBUF_TYPE_PADDING:
791 break;
792
793 case RINGBUF_TYPE_TIME_EXTEND:
794 event->len =
795 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
796 >> RB_ALIGNMENT_SHIFT;
797 break;
798
799 case RINGBUF_TYPE_TIME_STAMP:
800 event->len =
801 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
802 >> RB_ALIGNMENT_SHIFT;
803 break;
804
805 case RINGBUF_TYPE_DATA:
806 length -= RB_EVNT_HDR_SIZE;
807 if (length > RB_MAX_SMALL_DATA) {
808 event->len = 0;
809 event->array[0] = length;
810 } else
811 event->len =
812 (length + (RB_ALIGNMENT-1))
813 >> RB_ALIGNMENT_SHIFT;
814 break;
815 default:
816 BUG();
817 }
818}
819
820static inline unsigned rb_calculate_event_length(unsigned length)
821{
822 struct ring_buffer_event event; /* Used only for sizeof array */
823
824 /* zero length can cause confusions */
825 if (!length)
826 length = 1;
827
828 if (length > RB_MAX_SMALL_DATA)
829 length += sizeof(event.array[0]);
830
831 length += RB_EVNT_HDR_SIZE;
832 length = ALIGN(length, RB_ALIGNMENT);
833
834 return length;
835}
836
837static struct ring_buffer_event *
838__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
839 unsigned type, unsigned long length, u64 *ts)
840{
841 struct buffer_page *tail_page, *head_page, *reader_page;
842 unsigned long tail, write;
843 struct ring_buffer *buffer = cpu_buffer->buffer;
844 struct ring_buffer_event *event;
845 unsigned long flags;
846
847 tail_page = cpu_buffer->tail_page;
848 write = local_add_return(length, &tail_page->write);
849 tail = write - length;
850
851 /* See if we shot pass the end of this buffer page */
852 if (write > BUF_PAGE_SIZE) {
853 struct buffer_page *next_page = tail_page;
854
855 spin_lock_irqsave(&cpu_buffer->lock, flags);
856
857 rb_inc_page(cpu_buffer, &next_page);
858
859 head_page = cpu_buffer->head_page;
860 reader_page = cpu_buffer->reader_page;
861
862 /* we grabbed the lock before incrementing */
863 RB_WARN_ON(cpu_buffer, next_page == reader_page);
864
865 /*
866 * If for some reason, we had an interrupt storm that made
867 * it all the way around the buffer, bail, and warn
868 * about it.
869 */
870 if (unlikely(next_page == cpu_buffer->commit_page)) {
871 WARN_ON_ONCE(1);
872 goto out_unlock;
873 }
874
875 if (next_page == head_page) {
876 if (!(buffer->flags & RB_FL_OVERWRITE)) {
877 /* reset write */
878 if (tail <= BUF_PAGE_SIZE)
879 local_set(&tail_page->write, tail);
880 goto out_unlock;
881 }
882
883 /* tail_page has not moved yet? */
884 if (tail_page == cpu_buffer->tail_page) {
885 /* count overflows */
886 rb_update_overflow(cpu_buffer);
887
888 rb_inc_page(cpu_buffer, &head_page);
889 cpu_buffer->head_page = head_page;
890 cpu_buffer->head_page->read = 0;
891 }
892 }
893
894 /*
895 * If the tail page is still the same as what we think
896 * it is, then it is up to us to update the tail
897 * pointer.
898 */
899 if (tail_page == cpu_buffer->tail_page) {
900 local_set(&next_page->write, 0);
901 local_set(&next_page->commit, 0);
902 cpu_buffer->tail_page = next_page;
903
904 /* reread the time stamp */
905 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
906 cpu_buffer->tail_page->time_stamp = *ts;
907 }
908
909 /*
910 * The actual tail page has moved forward.
911 */
912 if (tail < BUF_PAGE_SIZE) {
913 /* Mark the rest of the page with padding */
914 event = __rb_page_index(tail_page, tail);
915 event->type = RINGBUF_TYPE_PADDING;
916 }
917
918 if (tail <= BUF_PAGE_SIZE)
919 /* Set the write back to the previous setting */
920 local_set(&tail_page->write, tail);
921
922 /*
923 * If this was a commit entry that failed,
924 * increment that too
925 */
926 if (tail_page == cpu_buffer->commit_page &&
927 tail == rb_commit_index(cpu_buffer)) {
928 rb_set_commit_to_write(cpu_buffer);
929 }
930
931 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
932
933 /* fail and let the caller try again */
934 return ERR_PTR(-EAGAIN);
935 }
936
937 /* We reserved something on the buffer */
938
939 BUG_ON(write > BUF_PAGE_SIZE);
940
941 event = __rb_page_index(tail_page, tail);
942 rb_update_event(event, type, length);
943
944 /*
945 * If this is a commit and the tail is zero, then update
946 * this page's time stamp.
947 */
948 if (!tail && rb_is_commit(cpu_buffer, event))
949 cpu_buffer->commit_page->time_stamp = *ts;
950
951 return event;
952
953 out_unlock:
954 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
955 return NULL;
956}
957
958static int
959rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
960 u64 *ts, u64 *delta)
961{
962 struct ring_buffer_event *event;
963 static int once;
964 int ret;
965
966 if (unlikely(*delta > (1ULL << 59) && !once++)) {
967 printk(KERN_WARNING "Delta way too big! %llu"
968 " ts=%llu write stamp = %llu\n",
969 *delta, *ts, cpu_buffer->write_stamp);
970 WARN_ON(1);
971 }
972
973 /*
974 * The delta is too big, we to add a
975 * new timestamp.
976 */
977 event = __rb_reserve_next(cpu_buffer,
978 RINGBUF_TYPE_TIME_EXTEND,
979 RB_LEN_TIME_EXTEND,
980 ts);
981 if (!event)
982 return -EBUSY;
983
984 if (PTR_ERR(event) == -EAGAIN)
985 return -EAGAIN;
986
987 /* Only a commited time event can update the write stamp */
988 if (rb_is_commit(cpu_buffer, event)) {
989 /*
990 * If this is the first on the page, then we need to
991 * update the page itself, and just put in a zero.
992 */
993 if (rb_event_index(event)) {
994 event->time_delta = *delta & TS_MASK;
995 event->array[0] = *delta >> TS_SHIFT;
996 } else {
997 cpu_buffer->commit_page->time_stamp = *ts;
998 event->time_delta = 0;
999 event->array[0] = 0;
1000 }
1001 cpu_buffer->write_stamp = *ts;
1002 /* let the caller know this was the commit */
1003 ret = 1;
1004 } else {
1005 /* Darn, this is just wasted space */
1006 event->time_delta = 0;
1007 event->array[0] = 0;
1008 ret = 0;
1009 }
1010
1011 *delta = 0;
1012
1013 return ret;
1014}
1015
1016static struct ring_buffer_event *
1017rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1018 unsigned type, unsigned long length)
1019{
1020 struct ring_buffer_event *event;
1021 u64 ts, delta;
1022 int commit = 0;
1023
1024 again:
1025 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1026
1027 /*
1028 * Only the first commit can update the timestamp.
1029 * Yes there is a race here. If an interrupt comes in
1030 * just after the conditional and it traces too, then it
1031 * will also check the deltas. More than one timestamp may
1032 * also be made. But only the entry that did the actual
1033 * commit will be something other than zero.
1034 */
1035 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1036 rb_page_write(cpu_buffer->tail_page) ==
1037 rb_commit_index(cpu_buffer)) {
1038
1039 delta = ts - cpu_buffer->write_stamp;
1040
1041 /* make sure this delta is calculated here */
1042 barrier();
1043
1044 /* Did the write stamp get updated already? */
1045 if (unlikely(ts < cpu_buffer->write_stamp))
1046 goto again;
1047
1048 if (test_time_stamp(delta)) {
1049
1050 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1051
1052 if (commit == -EBUSY)
1053 return NULL;
1054
1055 if (commit == -EAGAIN)
1056 goto again;
1057
1058 RB_WARN_ON(cpu_buffer, commit < 0);
1059 }
1060 } else
1061 /* Non commits have zero deltas */
1062 delta = 0;
1063
1064 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1065 if (PTR_ERR(event) == -EAGAIN)
1066 goto again;
1067
1068 if (!event) {
1069 if (unlikely(commit))
1070 /*
1071 * Ouch! We needed a timestamp and it was commited. But
1072 * we didn't get our event reserved.
1073 */
1074 rb_set_commit_to_write(cpu_buffer);
1075 return NULL;
1076 }
1077
1078 /*
1079 * If the timestamp was commited, make the commit our entry
1080 * now so that we will update it when needed.
1081 */
1082 if (commit)
1083 rb_set_commit_event(cpu_buffer, event);
1084 else if (!rb_is_commit(cpu_buffer, event))
1085 delta = 0;
1086
1087 event->time_delta = delta;
1088
1089 return event;
1090}
1091
1092static DEFINE_PER_CPU(int, rb_need_resched);
1093
1094/**
1095 * ring_buffer_lock_reserve - reserve a part of the buffer
1096 * @buffer: the ring buffer to reserve from
1097 * @length: the length of the data to reserve (excluding event header)
1098 * @flags: a pointer to save the interrupt flags
1099 *
1100 * Returns a reseverd event on the ring buffer to copy directly to.
1101 * The user of this interface will need to get the body to write into
1102 * and can use the ring_buffer_event_data() interface.
1103 *
1104 * The length is the length of the data needed, not the event length
1105 * which also includes the event header.
1106 *
1107 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1108 * If NULL is returned, then nothing has been allocated or locked.
1109 */
1110struct ring_buffer_event *
1111ring_buffer_lock_reserve(struct ring_buffer *buffer,
1112 unsigned long length,
1113 unsigned long *flags)
1114{
1115 struct ring_buffer_per_cpu *cpu_buffer;
1116 struct ring_buffer_event *event;
1117 int cpu, resched;
1118
1119 if (atomic_read(&buffer->record_disabled))
1120 return NULL;
1121
1122 /* If we are tracing schedule, we don't want to recurse */
1123 resched = need_resched();
1124 preempt_disable_notrace();
1125
1126 cpu = raw_smp_processor_id();
1127
1128 if (!cpu_isset(cpu, buffer->cpumask))
1129 goto out;
1130
1131 cpu_buffer = buffer->buffers[cpu];
1132
1133 if (atomic_read(&cpu_buffer->record_disabled))
1134 goto out;
1135
1136 length = rb_calculate_event_length(length);
1137 if (length > BUF_PAGE_SIZE)
1138 goto out;
1139
1140 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1141 if (!event)
1142 goto out;
1143
1144 /*
1145 * Need to store resched state on this cpu.
1146 * Only the first needs to.
1147 */
1148
1149 if (preempt_count() == 1)
1150 per_cpu(rb_need_resched, cpu) = resched;
1151
1152 return event;
1153
1154 out:
1155 if (resched)
1156 preempt_enable_notrace();
1157 else
1158 preempt_enable_notrace();
1159 return NULL;
1160}
1161
1162static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1163 struct ring_buffer_event *event)
1164{
1165 cpu_buffer->entries++;
1166
1167 /* Only process further if we own the commit */
1168 if (!rb_is_commit(cpu_buffer, event))
1169 return;
1170
1171 cpu_buffer->write_stamp += event->time_delta;
1172
1173 rb_set_commit_to_write(cpu_buffer);
1174}
1175
1176/**
1177 * ring_buffer_unlock_commit - commit a reserved
1178 * @buffer: The buffer to commit to
1179 * @event: The event pointer to commit.
1180 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1181 *
1182 * This commits the data to the ring buffer, and releases any locks held.
1183 *
1184 * Must be paired with ring_buffer_lock_reserve.
1185 */
1186int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1187 struct ring_buffer_event *event,
1188 unsigned long flags)
1189{
1190 struct ring_buffer_per_cpu *cpu_buffer;
1191 int cpu = raw_smp_processor_id();
1192
1193 cpu_buffer = buffer->buffers[cpu];
1194
1195 rb_commit(cpu_buffer, event);
1196
1197 /*
1198 * Only the last preempt count needs to restore preemption.
1199 */
1200 if (preempt_count() == 1) {
1201 if (per_cpu(rb_need_resched, cpu))
1202 preempt_enable_no_resched_notrace();
1203 else
1204 preempt_enable_notrace();
1205 } else
1206 preempt_enable_no_resched_notrace();
1207
1208 return 0;
1209}
1210
1211/**
1212 * ring_buffer_write - write data to the buffer without reserving
1213 * @buffer: The ring buffer to write to.
1214 * @length: The length of the data being written (excluding the event header)
1215 * @data: The data to write to the buffer.
1216 *
1217 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1218 * one function. If you already have the data to write to the buffer, it
1219 * may be easier to simply call this function.
1220 *
1221 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1222 * and not the length of the event which would hold the header.
1223 */
1224int ring_buffer_write(struct ring_buffer *buffer,
1225 unsigned long length,
1226 void *data)
1227{
1228 struct ring_buffer_per_cpu *cpu_buffer;
1229 struct ring_buffer_event *event;
1230 unsigned long event_length;
1231 void *body;
1232 int ret = -EBUSY;
1233 int cpu, resched;
1234
1235 if (atomic_read(&buffer->record_disabled))
1236 return -EBUSY;
1237
1238 resched = need_resched();
1239 preempt_disable_notrace();
1240
1241 cpu = raw_smp_processor_id();
1242
1243 if (!cpu_isset(cpu, buffer->cpumask))
1244 goto out;
1245
1246 cpu_buffer = buffer->buffers[cpu];
1247
1248 if (atomic_read(&cpu_buffer->record_disabled))
1249 goto out;
1250
1251 event_length = rb_calculate_event_length(length);
1252 event = rb_reserve_next_event(cpu_buffer,
1253 RINGBUF_TYPE_DATA, event_length);
1254 if (!event)
1255 goto out;
1256
1257 body = rb_event_data(event);
1258
1259 memcpy(body, data, length);
1260
1261 rb_commit(cpu_buffer, event);
1262
1263 ret = 0;
1264 out:
1265 if (resched)
1266 preempt_enable_no_resched_notrace();
1267 else
1268 preempt_enable_notrace();
1269
1270 return ret;
1271}
1272
1273static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1274{
1275 struct buffer_page *reader = cpu_buffer->reader_page;
1276 struct buffer_page *head = cpu_buffer->head_page;
1277 struct buffer_page *commit = cpu_buffer->commit_page;
1278
1279 return reader->read == rb_page_commit(reader) &&
1280 (commit == reader ||
1281 (commit == head &&
1282 head->read == rb_page_commit(commit)));
1283}
1284
1285/**
1286 * ring_buffer_record_disable - stop all writes into the buffer
1287 * @buffer: The ring buffer to stop writes to.
1288 *
1289 * This prevents all writes to the buffer. Any attempt to write
1290 * to the buffer after this will fail and return NULL.
1291 *
1292 * The caller should call synchronize_sched() after this.
1293 */
1294void ring_buffer_record_disable(struct ring_buffer *buffer)
1295{
1296 atomic_inc(&buffer->record_disabled);
1297}
1298
1299/**
1300 * ring_buffer_record_enable - enable writes to the buffer
1301 * @buffer: The ring buffer to enable writes
1302 *
1303 * Note, multiple disables will need the same number of enables
1304 * to truely enable the writing (much like preempt_disable).
1305 */
1306void ring_buffer_record_enable(struct ring_buffer *buffer)
1307{
1308 atomic_dec(&buffer->record_disabled);
1309}
1310
1311/**
1312 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1313 * @buffer: The ring buffer to stop writes to.
1314 * @cpu: The CPU buffer to stop
1315 *
1316 * This prevents all writes to the buffer. Any attempt to write
1317 * to the buffer after this will fail and return NULL.
1318 *
1319 * The caller should call synchronize_sched() after this.
1320 */
1321void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1322{
1323 struct ring_buffer_per_cpu *cpu_buffer;
1324
1325 if (!cpu_isset(cpu, buffer->cpumask))
1326 return;
1327
1328 cpu_buffer = buffer->buffers[cpu];
1329 atomic_inc(&cpu_buffer->record_disabled);
1330}
1331
1332/**
1333 * ring_buffer_record_enable_cpu - enable writes to the buffer
1334 * @buffer: The ring buffer to enable writes
1335 * @cpu: The CPU to enable.
1336 *
1337 * Note, multiple disables will need the same number of enables
1338 * to truely enable the writing (much like preempt_disable).
1339 */
1340void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1341{
1342 struct ring_buffer_per_cpu *cpu_buffer;
1343
1344 if (!cpu_isset(cpu, buffer->cpumask))
1345 return;
1346
1347 cpu_buffer = buffer->buffers[cpu];
1348 atomic_dec(&cpu_buffer->record_disabled);
1349}
1350
1351/**
1352 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1353 * @buffer: The ring buffer
1354 * @cpu: The per CPU buffer to get the entries from.
1355 */
1356unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1357{
1358 struct ring_buffer_per_cpu *cpu_buffer;
1359
1360 if (!cpu_isset(cpu, buffer->cpumask))
1361 return 0;
1362
1363 cpu_buffer = buffer->buffers[cpu];
1364 return cpu_buffer->entries;
1365}
1366
1367/**
1368 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1369 * @buffer: The ring buffer
1370 * @cpu: The per CPU buffer to get the number of overruns from
1371 */
1372unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1373{
1374 struct ring_buffer_per_cpu *cpu_buffer;
1375
1376 if (!cpu_isset(cpu, buffer->cpumask))
1377 return 0;
1378
1379 cpu_buffer = buffer->buffers[cpu];
1380 return cpu_buffer->overrun;
1381}
1382
1383/**
1384 * ring_buffer_entries - get the number of entries in a buffer
1385 * @buffer: The ring buffer
1386 *
1387 * Returns the total number of entries in the ring buffer
1388 * (all CPU entries)
1389 */
1390unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1391{
1392 struct ring_buffer_per_cpu *cpu_buffer;
1393 unsigned long entries = 0;
1394 int cpu;
1395
1396 /* if you care about this being correct, lock the buffer */
1397 for_each_buffer_cpu(buffer, cpu) {
1398 cpu_buffer = buffer->buffers[cpu];
1399 entries += cpu_buffer->entries;
1400 }
1401
1402 return entries;
1403}
1404
1405/**
1406 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1407 * @buffer: The ring buffer
1408 *
1409 * Returns the total number of overruns in the ring buffer
1410 * (all CPU entries)
1411 */
1412unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1413{
1414 struct ring_buffer_per_cpu *cpu_buffer;
1415 unsigned long overruns = 0;
1416 int cpu;
1417
1418 /* if you care about this being correct, lock the buffer */
1419 for_each_buffer_cpu(buffer, cpu) {
1420 cpu_buffer = buffer->buffers[cpu];
1421 overruns += cpu_buffer->overrun;
1422 }
1423
1424 return overruns;
1425}
1426
1427/**
1428 * ring_buffer_iter_reset - reset an iterator
1429 * @iter: The iterator to reset
1430 *
1431 * Resets the iterator, so that it will start from the beginning
1432 * again.
1433 */
1434void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1435{
1436 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1437
1438 /* Iterator usage is expected to have record disabled */
1439 if (list_empty(&cpu_buffer->reader_page->list)) {
1440 iter->head_page = cpu_buffer->head_page;
1441 iter->head = cpu_buffer->head_page->read;
1442 } else {
1443 iter->head_page = cpu_buffer->reader_page;
1444 iter->head = cpu_buffer->reader_page->read;
1445 }
1446 if (iter->head)
1447 iter->read_stamp = cpu_buffer->read_stamp;
1448 else
1449 iter->read_stamp = iter->head_page->time_stamp;
1450}
1451
1452/**
1453 * ring_buffer_iter_empty - check if an iterator has no more to read
1454 * @iter: The iterator to check
1455 */
1456int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1457{
1458 struct ring_buffer_per_cpu *cpu_buffer;
1459
1460 cpu_buffer = iter->cpu_buffer;
1461
1462 return iter->head_page == cpu_buffer->commit_page &&
1463 iter->head == rb_commit_index(cpu_buffer);
1464}
1465
1466static void
1467rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1468 struct ring_buffer_event *event)
1469{
1470 u64 delta;
1471
1472 switch (event->type) {
1473 case RINGBUF_TYPE_PADDING:
1474 return;
1475
1476 case RINGBUF_TYPE_TIME_EXTEND:
1477 delta = event->array[0];
1478 delta <<= TS_SHIFT;
1479 delta += event->time_delta;
1480 cpu_buffer->read_stamp += delta;
1481 return;
1482
1483 case RINGBUF_TYPE_TIME_STAMP:
1484 /* FIXME: not implemented */
1485 return;
1486
1487 case RINGBUF_TYPE_DATA:
1488 cpu_buffer->read_stamp += event->time_delta;
1489 return;
1490
1491 default:
1492 BUG();
1493 }
1494 return;
1495}
1496
1497static void
1498rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1499 struct ring_buffer_event *event)
1500{
1501 u64 delta;
1502
1503 switch (event->type) {
1504 case RINGBUF_TYPE_PADDING:
1505 return;
1506
1507 case RINGBUF_TYPE_TIME_EXTEND:
1508 delta = event->array[0];
1509 delta <<= TS_SHIFT;
1510 delta += event->time_delta;
1511 iter->read_stamp += delta;
1512 return;
1513
1514 case RINGBUF_TYPE_TIME_STAMP:
1515 /* FIXME: not implemented */
1516 return;
1517
1518 case RINGBUF_TYPE_DATA:
1519 iter->read_stamp += event->time_delta;
1520 return;
1521
1522 default:
1523 BUG();
1524 }
1525 return;
1526}
1527
1528static struct buffer_page *
1529rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1530{
1531 struct buffer_page *reader = NULL;
1532 unsigned long flags;
1533
1534 spin_lock_irqsave(&cpu_buffer->lock, flags);
1535
1536 again:
1537 reader = cpu_buffer->reader_page;
1538
1539 /* If there's more to read, return this page */
1540 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1541 goto out;
1542
1543 /* Never should we have an index greater than the size */
1544 RB_WARN_ON(cpu_buffer,
1545 cpu_buffer->reader_page->read > rb_page_size(reader));
1546
1547 /* check if we caught up to the tail */
1548 reader = NULL;
1549 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1550 goto out;
1551
1552 /*
1553 * Splice the empty reader page into the list around the head.
1554 * Reset the reader page to size zero.
1555 */
1556
1557 reader = cpu_buffer->head_page;
1558 cpu_buffer->reader_page->list.next = reader->list.next;
1559 cpu_buffer->reader_page->list.prev = reader->list.prev;
1560
1561 local_set(&cpu_buffer->reader_page->write, 0);
1562 local_set(&cpu_buffer->reader_page->commit, 0);
1563
1564 /* Make the reader page now replace the head */
1565 reader->list.prev->next = &cpu_buffer->reader_page->list;
1566 reader->list.next->prev = &cpu_buffer->reader_page->list;
1567
1568 /*
1569 * If the tail is on the reader, then we must set the head
1570 * to the inserted page, otherwise we set it one before.
1571 */
1572 cpu_buffer->head_page = cpu_buffer->reader_page;
1573
1574 if (cpu_buffer->commit_page != reader)
1575 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1576
1577 /* Finally update the reader page to the new head */
1578 cpu_buffer->reader_page = reader;
1579 rb_reset_reader_page(cpu_buffer);
1580
1581 goto again;
1582
1583 out:
1584 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1585
1586 return reader;
1587}
1588
1589static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1590{
1591 struct ring_buffer_event *event;
1592 struct buffer_page *reader;
1593 unsigned length;
1594
1595 reader = rb_get_reader_page(cpu_buffer);
1596
1597 /* This function should not be called when buffer is empty */
1598 BUG_ON(!reader);
1599
1600 event = rb_reader_event(cpu_buffer);
1601
1602 if (event->type == RINGBUF_TYPE_DATA)
1603 cpu_buffer->entries--;
1604
1605 rb_update_read_stamp(cpu_buffer, event);
1606
1607 length = rb_event_length(event);
1608 cpu_buffer->reader_page->read += length;
1609}
1610
1611static void rb_advance_iter(struct ring_buffer_iter *iter)
1612{
1613 struct ring_buffer *buffer;
1614 struct ring_buffer_per_cpu *cpu_buffer;
1615 struct ring_buffer_event *event;
1616 unsigned length;
1617
1618 cpu_buffer = iter->cpu_buffer;
1619 buffer = cpu_buffer->buffer;
1620
1621 /*
1622 * Check if we are at the end of the buffer.
1623 */
1624 if (iter->head >= rb_page_size(iter->head_page)) {
1625 BUG_ON(iter->head_page == cpu_buffer->commit_page);
1626 rb_inc_iter(iter);
1627 return;
1628 }
1629
1630 event = rb_iter_head_event(iter);
1631
1632 length = rb_event_length(event);
1633
1634 /*
1635 * This should not be called to advance the header if we are
1636 * at the tail of the buffer.
1637 */
1638 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1639 (iter->head + length > rb_commit_index(cpu_buffer)));
1640
1641 rb_update_iter_read_stamp(iter, event);
1642
1643 iter->head += length;
1644
1645 /* check for end of page padding */
1646 if ((iter->head >= rb_page_size(iter->head_page)) &&
1647 (iter->head_page != cpu_buffer->commit_page))
1648 rb_advance_iter(iter);
1649}
1650
1651/**
1652 * ring_buffer_peek - peek at the next event to be read
1653 * @buffer: The ring buffer to read
1654 * @cpu: The cpu to peak at
1655 * @ts: The timestamp counter of this event.
1656 *
1657 * This will return the event that will be read next, but does
1658 * not consume the data.
1659 */
1660struct ring_buffer_event *
1661ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1662{
1663 struct ring_buffer_per_cpu *cpu_buffer;
1664 struct ring_buffer_event *event;
1665 struct buffer_page *reader;
1666
1667 if (!cpu_isset(cpu, buffer->cpumask))
1668 return NULL;
1669
1670 cpu_buffer = buffer->buffers[cpu];
1671
1672 again:
1673 reader = rb_get_reader_page(cpu_buffer);
1674 if (!reader)
1675 return NULL;
1676
1677 event = rb_reader_event(cpu_buffer);
1678
1679 switch (event->type) {
1680 case RINGBUF_TYPE_PADDING:
1681 RB_WARN_ON(cpu_buffer, 1);
1682 rb_advance_reader(cpu_buffer);
1683 return NULL;
1684
1685 case RINGBUF_TYPE_TIME_EXTEND:
1686 /* Internal data, OK to advance */
1687 rb_advance_reader(cpu_buffer);
1688 goto again;
1689
1690 case RINGBUF_TYPE_TIME_STAMP:
1691 /* FIXME: not implemented */
1692 rb_advance_reader(cpu_buffer);
1693 goto again;
1694
1695 case RINGBUF_TYPE_DATA:
1696 if (ts) {
1697 *ts = cpu_buffer->read_stamp + event->time_delta;
1698 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1699 }
1700 return event;
1701
1702 default:
1703 BUG();
1704 }
1705
1706 return NULL;
1707}
1708
1709/**
1710 * ring_buffer_iter_peek - peek at the next event to be read
1711 * @iter: The ring buffer iterator
1712 * @ts: The timestamp counter of this event.
1713 *
1714 * This will return the event that will be read next, but does
1715 * not increment the iterator.
1716 */
1717struct ring_buffer_event *
1718ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1719{
1720 struct ring_buffer *buffer;
1721 struct ring_buffer_per_cpu *cpu_buffer;
1722 struct ring_buffer_event *event;
1723
1724 if (ring_buffer_iter_empty(iter))
1725 return NULL;
1726
1727 cpu_buffer = iter->cpu_buffer;
1728 buffer = cpu_buffer->buffer;
1729
1730 again:
1731 if (rb_per_cpu_empty(cpu_buffer))
1732 return NULL;
1733
1734 event = rb_iter_head_event(iter);
1735
1736 switch (event->type) {
1737 case RINGBUF_TYPE_PADDING:
1738 rb_inc_iter(iter);
1739 goto again;
1740
1741 case RINGBUF_TYPE_TIME_EXTEND:
1742 /* Internal data, OK to advance */
1743 rb_advance_iter(iter);
1744 goto again;
1745
1746 case RINGBUF_TYPE_TIME_STAMP:
1747 /* FIXME: not implemented */
1748 rb_advance_iter(iter);
1749 goto again;
1750
1751 case RINGBUF_TYPE_DATA:
1752 if (ts) {
1753 *ts = iter->read_stamp + event->time_delta;
1754 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1755 }
1756 return event;
1757
1758 default:
1759 BUG();
1760 }
1761
1762 return NULL;
1763}
1764
1765/**
1766 * ring_buffer_consume - return an event and consume it
1767 * @buffer: The ring buffer to get the next event from
1768 *
1769 * Returns the next event in the ring buffer, and that event is consumed.
1770 * Meaning, that sequential reads will keep returning a different event,
1771 * and eventually empty the ring buffer if the producer is slower.
1772 */
1773struct ring_buffer_event *
1774ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1775{
1776 struct ring_buffer_per_cpu *cpu_buffer;
1777 struct ring_buffer_event *event;
1778
1779 if (!cpu_isset(cpu, buffer->cpumask))
1780 return NULL;
1781
1782 event = ring_buffer_peek(buffer, cpu, ts);
1783 if (!event)
1784 return NULL;
1785
1786 cpu_buffer = buffer->buffers[cpu];
1787 rb_advance_reader(cpu_buffer);
1788
1789 return event;
1790}
1791
1792/**
1793 * ring_buffer_read_start - start a non consuming read of the buffer
1794 * @buffer: The ring buffer to read from
1795 * @cpu: The cpu buffer to iterate over
1796 *
1797 * This starts up an iteration through the buffer. It also disables
1798 * the recording to the buffer until the reading is finished.
1799 * This prevents the reading from being corrupted. This is not
1800 * a consuming read, so a producer is not expected.
1801 *
1802 * Must be paired with ring_buffer_finish.
1803 */
1804struct ring_buffer_iter *
1805ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1806{
1807 struct ring_buffer_per_cpu *cpu_buffer;
1808 struct ring_buffer_iter *iter;
1809 unsigned long flags;
1810
1811 if (!cpu_isset(cpu, buffer->cpumask))
1812 return NULL;
1813
1814 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1815 if (!iter)
1816 return NULL;
1817
1818 cpu_buffer = buffer->buffers[cpu];
1819
1820 iter->cpu_buffer = cpu_buffer;
1821
1822 atomic_inc(&cpu_buffer->record_disabled);
1823 synchronize_sched();
1824
1825 spin_lock_irqsave(&cpu_buffer->lock, flags);
1826 ring_buffer_iter_reset(iter);
1827 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1828
1829 return iter;
1830}
1831
1832/**
1833 * ring_buffer_finish - finish reading the iterator of the buffer
1834 * @iter: The iterator retrieved by ring_buffer_start
1835 *
1836 * This re-enables the recording to the buffer, and frees the
1837 * iterator.
1838 */
1839void
1840ring_buffer_read_finish(struct ring_buffer_iter *iter)
1841{
1842 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1843
1844 atomic_dec(&cpu_buffer->record_disabled);
1845 kfree(iter);
1846}
1847
1848/**
1849 * ring_buffer_read - read the next item in the ring buffer by the iterator
1850 * @iter: The ring buffer iterator
1851 * @ts: The time stamp of the event read.
1852 *
1853 * This reads the next event in the ring buffer and increments the iterator.
1854 */
1855struct ring_buffer_event *
1856ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1857{
1858 struct ring_buffer_event *event;
1859
1860 event = ring_buffer_iter_peek(iter, ts);
1861 if (!event)
1862 return NULL;
1863
1864 rb_advance_iter(iter);
1865
1866 return event;
1867}
1868
1869/**
1870 * ring_buffer_size - return the size of the ring buffer (in bytes)
1871 * @buffer: The ring buffer.
1872 */
1873unsigned long ring_buffer_size(struct ring_buffer *buffer)
1874{
1875 return BUF_PAGE_SIZE * buffer->pages;
1876}
1877
1878static void
1879rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1880{
1881 cpu_buffer->head_page
1882 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1883 local_set(&cpu_buffer->head_page->write, 0);
1884 local_set(&cpu_buffer->head_page->commit, 0);
1885
1886 cpu_buffer->head_page->read = 0;
1887
1888 cpu_buffer->tail_page = cpu_buffer->head_page;
1889 cpu_buffer->commit_page = cpu_buffer->head_page;
1890
1891 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1892 local_set(&cpu_buffer->reader_page->write, 0);
1893 local_set(&cpu_buffer->reader_page->commit, 0);
1894 cpu_buffer->reader_page->read = 0;
1895
1896 cpu_buffer->overrun = 0;
1897 cpu_buffer->entries = 0;
1898}
1899
1900/**
1901 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1902 * @buffer: The ring buffer to reset a per cpu buffer of
1903 * @cpu: The CPU buffer to be reset
1904 */
1905void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1906{
1907 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1908 unsigned long flags;
1909
1910 if (!cpu_isset(cpu, buffer->cpumask))
1911 return;
1912
1913 spin_lock_irqsave(&cpu_buffer->lock, flags);
1914
1915 rb_reset_cpu(cpu_buffer);
1916
1917 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1918}
1919
1920/**
1921 * ring_buffer_reset - reset a ring buffer
1922 * @buffer: The ring buffer to reset all cpu buffers
1923 */
1924void ring_buffer_reset(struct ring_buffer *buffer)
1925{
1926 int cpu;
1927
1928 for_each_buffer_cpu(buffer, cpu)
1929 ring_buffer_reset_cpu(buffer, cpu);
1930}
1931
1932/**
1933 * rind_buffer_empty - is the ring buffer empty?
1934 * @buffer: The ring buffer to test
1935 */
1936int ring_buffer_empty(struct ring_buffer *buffer)
1937{
1938 struct ring_buffer_per_cpu *cpu_buffer;
1939 int cpu;
1940
1941 /* yes this is racy, but if you don't like the race, lock the buffer */
1942 for_each_buffer_cpu(buffer, cpu) {
1943 cpu_buffer = buffer->buffers[cpu];
1944 if (!rb_per_cpu_empty(cpu_buffer))
1945 return 0;
1946 }
1947 return 1;
1948}
1949
1950/**
1951 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
1952 * @buffer: The ring buffer
1953 * @cpu: The CPU buffer to test
1954 */
1955int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
1956{
1957 struct ring_buffer_per_cpu *cpu_buffer;
1958
1959 if (!cpu_isset(cpu, buffer->cpumask))
1960 return 1;
1961
1962 cpu_buffer = buffer->buffers[cpu];
1963 return rb_per_cpu_empty(cpu_buffer);
1964}
1965
1966/**
1967 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
1968 * @buffer_a: One buffer to swap with
1969 * @buffer_b: The other buffer to swap with
1970 *
1971 * This function is useful for tracers that want to take a "snapshot"
1972 * of a CPU buffer and has another back up buffer lying around.
1973 * it is expected that the tracer handles the cpu buffer not being
1974 * used at the moment.
1975 */
1976int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
1977 struct ring_buffer *buffer_b, int cpu)
1978{
1979 struct ring_buffer_per_cpu *cpu_buffer_a;
1980 struct ring_buffer_per_cpu *cpu_buffer_b;
1981
1982 if (!cpu_isset(cpu, buffer_a->cpumask) ||
1983 !cpu_isset(cpu, buffer_b->cpumask))
1984 return -EINVAL;
1985
1986 /* At least make sure the two buffers are somewhat the same */
1987 if (buffer_a->size != buffer_b->size ||
1988 buffer_a->pages != buffer_b->pages)
1989 return -EINVAL;
1990
1991 cpu_buffer_a = buffer_a->buffers[cpu];
1992 cpu_buffer_b = buffer_b->buffers[cpu];
1993
1994 /*
1995 * We can't do a synchronize_sched here because this
1996 * function can be called in atomic context.
1997 * Normally this will be called from the same CPU as cpu.
1998 * If not it's up to the caller to protect this.
1999 */
2000 atomic_inc(&cpu_buffer_a->record_disabled);
2001 atomic_inc(&cpu_buffer_b->record_disabled);
2002
2003 buffer_a->buffers[cpu] = cpu_buffer_b;
2004 buffer_b->buffers[cpu] = cpu_buffer_a;
2005
2006 cpu_buffer_b->buffer = buffer_a;
2007 cpu_buffer_a->buffer = buffer_b;
2008
2009 atomic_dec(&cpu_buffer_a->record_disabled);
2010 atomic_dec(&cpu_buffer_b->record_disabled);
2011
2012 return 0;
2013}
2014
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8f3fb3db61c3..d345d649d073 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -14,6 +14,7 @@
14#include <linux/utsrelease.h> 14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/notifier.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
18#include <linux/pagemap.h> 19#include <linux/pagemap.h>
19#include <linux/hardirq.h> 20#include <linux/hardirq.h>
@@ -22,6 +23,7 @@
22#include <linux/ftrace.h> 23#include <linux/ftrace.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/kdebug.h>
25#include <linux/ctype.h> 27#include <linux/ctype.h>
26#include <linux/init.h> 28#include <linux/init.h>
27#include <linux/poll.h> 29#include <linux/poll.h>
@@ -31,25 +33,36 @@
31#include <linux/writeback.h> 33#include <linux/writeback.h>
32 34
33#include <linux/stacktrace.h> 35#include <linux/stacktrace.h>
36#include <linux/ring_buffer.h>
34 37
35#include "trace.h" 38#include "trace.h"
36 39
40#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
41
37unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 42unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
38unsigned long __read_mostly tracing_thresh; 43unsigned long __read_mostly tracing_thresh;
39 44
40static unsigned long __read_mostly tracing_nr_buffers; 45static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
46
47static inline void ftrace_disable_cpu(void)
48{
49 preempt_disable();
50 local_inc(&__get_cpu_var(ftrace_cpu_disabled));
51}
52
53static inline void ftrace_enable_cpu(void)
54{
55 local_dec(&__get_cpu_var(ftrace_cpu_disabled));
56 preempt_enable();
57}
58
41static cpumask_t __read_mostly tracing_buffer_mask; 59static cpumask_t __read_mostly tracing_buffer_mask;
42 60
43#define for_each_tracing_cpu(cpu) \ 61#define for_each_tracing_cpu(cpu) \
44 for_each_cpu_mask(cpu, tracing_buffer_mask) 62 for_each_cpu_mask(cpu, tracing_buffer_mask)
45 63
46static int trace_alloc_page(void);
47static int trace_free_page(void);
48
49static int tracing_disabled = 1; 64static int tracing_disabled = 1;
50 65
51static unsigned long tracing_pages_allocated;
52
53long 66long
54ns2usecs(cycle_t nsec) 67ns2usecs(cycle_t nsec)
55{ 68{
@@ -60,7 +73,9 @@ ns2usecs(cycle_t nsec)
60 73
61cycle_t ftrace_now(int cpu) 74cycle_t ftrace_now(int cpu)
62{ 75{
63 return cpu_clock(cpu); 76 u64 ts = ring_buffer_time_stamp(cpu);
77 ring_buffer_normalize_time_stamp(cpu, &ts);
78 return ts;
64} 79}
65 80
66/* 81/*
@@ -100,11 +115,18 @@ static int tracer_enabled = 1;
100int ftrace_function_enabled; 115int ftrace_function_enabled;
101 116
102/* 117/*
103 * trace_nr_entries is the number of entries that is allocated 118 * trace_buf_size is the size in bytes that is allocated
104 * for a buffer. Note, the number of entries is always rounded 119 * for a buffer. Note, the number of bytes is always rounded
105 * to ENTRIES_PER_PAGE. 120 * to page size.
121 *
122 * This number is purposely set to a low number of 16384.
123 * If the dump on oops happens, it will be much appreciated
124 * to not have to wait for all that output. Anyway this can be
125 * boot time and run time configurable.
106 */ 126 */
107static unsigned long trace_nr_entries = 65536UL; 127#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
128
129static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
108 130
109/* trace_types holds a link list of available tracers. */ 131/* trace_types holds a link list of available tracers. */
110static struct tracer *trace_types __read_mostly; 132static struct tracer *trace_types __read_mostly;
@@ -133,24 +155,6 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
133/* trace_flags holds iter_ctrl options */ 155/* trace_flags holds iter_ctrl options */
134unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 156unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
135 157
136static notrace void no_trace_init(struct trace_array *tr)
137{
138 int cpu;
139
140 ftrace_function_enabled = 0;
141 if(tr->ctrl)
142 for_each_online_cpu(cpu)
143 tracing_reset(tr->data[cpu]);
144 tracer_enabled = 0;
145}
146
147/* dummy trace to disable tracing */
148static struct tracer no_tracer __read_mostly = {
149 .name = "none",
150 .init = no_trace_init
151};
152
153
154/** 158/**
155 * trace_wake_up - wake up tasks waiting for trace input 159 * trace_wake_up - wake up tasks waiting for trace input
156 * 160 *
@@ -167,23 +171,21 @@ void trace_wake_up(void)
167 wake_up(&trace_wait); 171 wake_up(&trace_wait);
168} 172}
169 173
170#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry)) 174static int __init set_buf_size(char *str)
171
172static int __init set_nr_entries(char *str)
173{ 175{
174 unsigned long nr_entries; 176 unsigned long buf_size;
175 int ret; 177 int ret;
176 178
177 if (!str) 179 if (!str)
178 return 0; 180 return 0;
179 ret = strict_strtoul(str, 0, &nr_entries); 181 ret = strict_strtoul(str, 0, &buf_size);
180 /* nr_entries can not be zero */ 182 /* nr_entries can not be zero */
181 if (ret < 0 || nr_entries == 0) 183 if (ret < 0 || buf_size == 0)
182 return 0; 184 return 0;
183 trace_nr_entries = nr_entries; 185 trace_buf_size = buf_size;
184 return 1; 186 return 1;
185} 187}
186__setup("trace_entries=", set_nr_entries); 188__setup("trace_buf_size=", set_buf_size);
187 189
188unsigned long nsecs_to_usecs(unsigned long nsecs) 190unsigned long nsecs_to_usecs(unsigned long nsecs)
189{ 191{
@@ -191,21 +193,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
191} 193}
192 194
193/* 195/*
194 * trace_flag_type is an enumeration that holds different
195 * states when a trace occurs. These are:
196 * IRQS_OFF - interrupts were disabled
197 * NEED_RESCED - reschedule is requested
198 * HARDIRQ - inside an interrupt handler
199 * SOFTIRQ - inside a softirq handler
200 */
201enum trace_flag_type {
202 TRACE_FLAG_IRQS_OFF = 0x01,
203 TRACE_FLAG_NEED_RESCHED = 0x02,
204 TRACE_FLAG_HARDIRQ = 0x04,
205 TRACE_FLAG_SOFTIRQ = 0x08,
206};
207
208/*
209 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 196 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
210 * control the output of kernel symbols. 197 * control the output of kernel symbols.
211 */ 198 */
@@ -224,6 +211,7 @@ static const char *trace_options[] = {
224 "block", 211 "block",
225 "stacktrace", 212 "stacktrace",
226 "sched-tree", 213 "sched-tree",
214 "ftrace_printk",
227 NULL 215 NULL
228}; 216};
229 217
@@ -266,54 +254,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
266 tracing_record_cmdline(current); 254 tracing_record_cmdline(current);
267} 255}
268 256
269#define CHECK_COND(cond) \
270 if (unlikely(cond)) { \
271 tracing_disabled = 1; \
272 WARN_ON(1); \
273 return -1; \
274 }
275
276/**
277 * check_pages - integrity check of trace buffers
278 *
279 * As a safty measure we check to make sure the data pages have not
280 * been corrupted.
281 */
282int check_pages(struct trace_array_cpu *data)
283{
284 struct page *page, *tmp;
285
286 CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
287 CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
288
289 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
290 CHECK_COND(page->lru.next->prev != &page->lru);
291 CHECK_COND(page->lru.prev->next != &page->lru);
292 }
293
294 return 0;
295}
296
297/**
298 * head_page - page address of the first page in per_cpu buffer.
299 *
300 * head_page returns the page address of the first page in
301 * a per_cpu buffer. This also preforms various consistency
302 * checks to make sure the buffer has not been corrupted.
303 */
304void *head_page(struct trace_array_cpu *data)
305{
306 struct page *page;
307
308 if (list_empty(&data->trace_pages))
309 return NULL;
310
311 page = list_entry(data->trace_pages.next, struct page, lru);
312 BUG_ON(&page->lru == &data->trace_pages);
313
314 return page_address(page);
315}
316
317/** 257/**
318 * trace_seq_printf - sequence printing of trace information 258 * trace_seq_printf - sequence printing of trace information
319 * @s: trace sequence descriptor 259 * @s: trace sequence descriptor
@@ -395,28 +335,23 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
395 return len; 335 return len;
396} 336}
397 337
398#define HEX_CHARS 17 338#define MAX_MEMHEX_BYTES 8
399static const char hex2asc[] = "0123456789abcdef"; 339#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
400 340
401static int 341static int
402trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) 342trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
403{ 343{
404 unsigned char hex[HEX_CHARS]; 344 unsigned char hex[HEX_CHARS];
405 unsigned char *data = mem; 345 unsigned char *data = mem;
406 unsigned char byte;
407 int i, j; 346 int i, j;
408 347
409 BUG_ON(len >= HEX_CHARS);
410
411#ifdef __BIG_ENDIAN 348#ifdef __BIG_ENDIAN
412 for (i = 0, j = 0; i < len; i++) { 349 for (i = 0, j = 0; i < len; i++) {
413#else 350#else
414 for (i = len-1, j = 0; i >= 0; i--) { 351 for (i = len-1, j = 0; i >= 0; i--) {
415#endif 352#endif
416 byte = data[i]; 353 hex[j++] = hex_asc_hi(data[i]);
417 354 hex[j++] = hex_asc_lo(data[i]);
418 hex[j++] = hex2asc[byte & 0x0f];
419 hex[j++] = hex2asc[byte >> 4];
420 } 355 }
421 hex[j++] = ' '; 356 hex[j++] = ' ';
422 357
@@ -460,34 +395,6 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
460 trace_seq_reset(s); 395 trace_seq_reset(s);
461} 396}
462 397
463/*
464 * flip the trace buffers between two trace descriptors.
465 * This usually is the buffers between the global_trace and
466 * the max_tr to record a snapshot of a current trace.
467 *
468 * The ftrace_max_lock must be held.
469 */
470static void
471flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
472{
473 struct list_head flip_pages;
474
475 INIT_LIST_HEAD(&flip_pages);
476
477 memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
478 sizeof(struct trace_array_cpu) -
479 offsetof(struct trace_array_cpu, trace_head_idx));
480
481 check_pages(tr1);
482 check_pages(tr2);
483 list_splice_init(&tr1->trace_pages, &flip_pages);
484 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
485 list_splice_init(&flip_pages, &tr2->trace_pages);
486 BUG_ON(!list_empty(&flip_pages));
487 check_pages(tr1);
488 check_pages(tr2);
489}
490
491/** 398/**
492 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 399 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
493 * @tr: tracer 400 * @tr: tracer
@@ -500,17 +407,17 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
500void 407void
501update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 408update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
502{ 409{
503 struct trace_array_cpu *data; 410 struct ring_buffer *buf = tr->buffer;
504 int i;
505 411
506 WARN_ON_ONCE(!irqs_disabled()); 412 WARN_ON_ONCE(!irqs_disabled());
507 __raw_spin_lock(&ftrace_max_lock); 413 __raw_spin_lock(&ftrace_max_lock);
508 /* clear out all the previous traces */ 414
509 for_each_tracing_cpu(i) { 415 tr->buffer = max_tr.buffer;
510 data = tr->data[i]; 416 max_tr.buffer = buf;
511 flip_trace(max_tr.data[i], data); 417
512 tracing_reset(data); 418 ftrace_disable_cpu();
513 } 419 ring_buffer_reset(tr->buffer);
420 ftrace_enable_cpu();
514 421
515 __update_max_tr(tr, tsk, cpu); 422 __update_max_tr(tr, tsk, cpu);
516 __raw_spin_unlock(&ftrace_max_lock); 423 __raw_spin_unlock(&ftrace_max_lock);
@@ -527,16 +434,19 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
527void 434void
528update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 435update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
529{ 436{
530 struct trace_array_cpu *data = tr->data[cpu]; 437 int ret;
531 int i;
532 438
533 WARN_ON_ONCE(!irqs_disabled()); 439 WARN_ON_ONCE(!irqs_disabled());
534 __raw_spin_lock(&ftrace_max_lock); 440 __raw_spin_lock(&ftrace_max_lock);
535 for_each_tracing_cpu(i)
536 tracing_reset(max_tr.data[i]);
537 441
538 flip_trace(max_tr.data[cpu], data); 442 ftrace_disable_cpu();
539 tracing_reset(data); 443
444 ring_buffer_reset(max_tr.buffer);
445 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
446
447 ftrace_enable_cpu();
448
449 WARN_ON_ONCE(ret);
540 450
541 __update_max_tr(tr, tsk, cpu); 451 __update_max_tr(tr, tsk, cpu);
542 __raw_spin_unlock(&ftrace_max_lock); 452 __raw_spin_unlock(&ftrace_max_lock);
@@ -573,7 +483,6 @@ int register_tracer(struct tracer *type)
573#ifdef CONFIG_FTRACE_STARTUP_TEST 483#ifdef CONFIG_FTRACE_STARTUP_TEST
574 if (type->selftest) { 484 if (type->selftest) {
575 struct tracer *saved_tracer = current_trace; 485 struct tracer *saved_tracer = current_trace;
576 struct trace_array_cpu *data;
577 struct trace_array *tr = &global_trace; 486 struct trace_array *tr = &global_trace;
578 int saved_ctrl = tr->ctrl; 487 int saved_ctrl = tr->ctrl;
579 int i; 488 int i;
@@ -585,10 +494,7 @@ int register_tracer(struct tracer *type)
585 * If we fail, we do not register this tracer. 494 * If we fail, we do not register this tracer.
586 */ 495 */
587 for_each_tracing_cpu(i) { 496 for_each_tracing_cpu(i) {
588 data = tr->data[i]; 497 tracing_reset(tr, i);
589 if (!head_page(data))
590 continue;
591 tracing_reset(data);
592 } 498 }
593 current_trace = type; 499 current_trace = type;
594 tr->ctrl = 0; 500 tr->ctrl = 0;
@@ -604,10 +510,7 @@ int register_tracer(struct tracer *type)
604 } 510 }
605 /* Only reset on passing, to avoid touching corrupted buffers */ 511 /* Only reset on passing, to avoid touching corrupted buffers */
606 for_each_tracing_cpu(i) { 512 for_each_tracing_cpu(i) {
607 data = tr->data[i]; 513 tracing_reset(tr, i);
608 if (!head_page(data))
609 continue;
610 tracing_reset(data);
611 } 514 }
612 printk(KERN_CONT "PASSED\n"); 515 printk(KERN_CONT "PASSED\n");
613 } 516 }
@@ -653,13 +556,11 @@ void unregister_tracer(struct tracer *type)
653 mutex_unlock(&trace_types_lock); 556 mutex_unlock(&trace_types_lock);
654} 557}
655 558
656void tracing_reset(struct trace_array_cpu *data) 559void tracing_reset(struct trace_array *tr, int cpu)
657{ 560{
658 data->trace_idx = 0; 561 ftrace_disable_cpu();
659 data->overrun = 0; 562 ring_buffer_reset_cpu(tr->buffer, cpu);
660 data->trace_head = data->trace_tail = head_page(data); 563 ftrace_enable_cpu();
661 data->trace_head_idx = 0;
662 data->trace_tail_idx = 0;
663} 564}
664 565
665#define SAVED_CMDLINES 128 566#define SAVED_CMDLINES 128
@@ -745,82 +646,16 @@ void tracing_record_cmdline(struct task_struct *tsk)
745 trace_save_cmdline(tsk); 646 trace_save_cmdline(tsk);
746} 647}
747 648
748static inline struct list_head * 649void
749trace_next_list(struct trace_array_cpu *data, struct list_head *next) 650tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
750{ 651 int pc)
751 /*
752 * Roundrobin - but skip the head (which is not a real page):
753 */
754 next = next->next;
755 if (unlikely(next == &data->trace_pages))
756 next = next->next;
757 BUG_ON(next == &data->trace_pages);
758
759 return next;
760}
761
762static inline void *
763trace_next_page(struct trace_array_cpu *data, void *addr)
764{
765 struct list_head *next;
766 struct page *page;
767
768 page = virt_to_page(addr);
769
770 next = trace_next_list(data, &page->lru);
771 page = list_entry(next, struct page, lru);
772
773 return page_address(page);
774}
775
776static inline struct trace_entry *
777tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
778{
779 unsigned long idx, idx_next;
780 struct trace_entry *entry;
781
782 data->trace_idx++;
783 idx = data->trace_head_idx;
784 idx_next = idx + 1;
785
786 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
787
788 entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
789
790 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
791 data->trace_head = trace_next_page(data, data->trace_head);
792 idx_next = 0;
793 }
794
795 if (data->trace_head == data->trace_tail &&
796 idx_next == data->trace_tail_idx) {
797 /* overrun */
798 data->overrun++;
799 data->trace_tail_idx++;
800 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
801 data->trace_tail =
802 trace_next_page(data, data->trace_tail);
803 data->trace_tail_idx = 0;
804 }
805 }
806
807 data->trace_head_idx = idx_next;
808
809 return entry;
810}
811
812static inline void
813tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
814{ 652{
815 struct task_struct *tsk = current; 653 struct task_struct *tsk = current;
816 unsigned long pc;
817
818 pc = preempt_count();
819 654
820 entry->preempt_count = pc & 0xff; 655 entry->preempt_count = pc & 0xff;
821 entry->pid = (tsk) ? tsk->pid : 0; 656 entry->pid = (tsk) ? tsk->pid : 0;
822 entry->t = ftrace_now(raw_smp_processor_id()); 657 entry->flags =
823 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 658 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
824 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 659 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
825 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 660 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
826 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 661 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -828,145 +663,139 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
828 663
829void 664void
830trace_function(struct trace_array *tr, struct trace_array_cpu *data, 665trace_function(struct trace_array *tr, struct trace_array_cpu *data,
831 unsigned long ip, unsigned long parent_ip, unsigned long flags) 666 unsigned long ip, unsigned long parent_ip, unsigned long flags,
667 int pc)
832{ 668{
833 struct trace_entry *entry; 669 struct ring_buffer_event *event;
670 struct ftrace_entry *entry;
834 unsigned long irq_flags; 671 unsigned long irq_flags;
835 672
836 raw_local_irq_save(irq_flags); 673 /* If we are reading the ring buffer, don't trace */
837 __raw_spin_lock(&data->lock); 674 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
838 entry = tracing_get_trace_entry(tr, data); 675 return;
839 tracing_generic_entry_update(entry, flags); 676
840 entry->type = TRACE_FN; 677 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
841 entry->fn.ip = ip; 678 &irq_flags);
842 entry->fn.parent_ip = parent_ip; 679 if (!event)
843 __raw_spin_unlock(&data->lock); 680 return;
844 raw_local_irq_restore(irq_flags); 681 entry = ring_buffer_event_data(event);
682 tracing_generic_entry_update(&entry->ent, flags, pc);
683 entry->ent.type = TRACE_FN;
684 entry->ip = ip;
685 entry->parent_ip = parent_ip;
686 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
845} 687}
846 688
847void 689void
848ftrace(struct trace_array *tr, struct trace_array_cpu *data, 690ftrace(struct trace_array *tr, struct trace_array_cpu *data,
849 unsigned long ip, unsigned long parent_ip, unsigned long flags) 691 unsigned long ip, unsigned long parent_ip, unsigned long flags,
692 int pc)
850{ 693{
851 if (likely(!atomic_read(&data->disabled))) 694 if (likely(!atomic_read(&data->disabled)))
852 trace_function(tr, data, ip, parent_ip, flags); 695 trace_function(tr, data, ip, parent_ip, flags, pc);
853} 696}
854 697
855#ifdef CONFIG_MMIOTRACE 698static void ftrace_trace_stack(struct trace_array *tr,
856void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, 699 struct trace_array_cpu *data,
857 struct mmiotrace_rw *rw) 700 unsigned long flags,
701 int skip, int pc)
858{ 702{
859 struct trace_entry *entry; 703 struct ring_buffer_event *event;
704 struct stack_entry *entry;
705 struct stack_trace trace;
860 unsigned long irq_flags; 706 unsigned long irq_flags;
861 707
862 raw_local_irq_save(irq_flags); 708 if (!(trace_flags & TRACE_ITER_STACKTRACE))
863 __raw_spin_lock(&data->lock); 709 return;
864
865 entry = tracing_get_trace_entry(tr, data);
866 tracing_generic_entry_update(entry, 0);
867 entry->type = TRACE_MMIO_RW;
868 entry->mmiorw = *rw;
869
870 __raw_spin_unlock(&data->lock);
871 raw_local_irq_restore(irq_flags);
872
873 trace_wake_up();
874}
875
876void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
877 struct mmiotrace_map *map)
878{
879 struct trace_entry *entry;
880 unsigned long irq_flags;
881 710
882 raw_local_irq_save(irq_flags); 711 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
883 __raw_spin_lock(&data->lock); 712 &irq_flags);
713 if (!event)
714 return;
715 entry = ring_buffer_event_data(event);
716 tracing_generic_entry_update(&entry->ent, flags, pc);
717 entry->ent.type = TRACE_STACK;
884 718
885 entry = tracing_get_trace_entry(tr, data); 719 memset(&entry->caller, 0, sizeof(entry->caller));
886 tracing_generic_entry_update(entry, 0);
887 entry->type = TRACE_MMIO_MAP;
888 entry->mmiomap = *map;
889 720
890 __raw_spin_unlock(&data->lock); 721 trace.nr_entries = 0;
891 raw_local_irq_restore(irq_flags); 722 trace.max_entries = FTRACE_STACK_ENTRIES;
723 trace.skip = skip;
724 trace.entries = entry->caller;
892 725
893 trace_wake_up(); 726 save_stack_trace(&trace);
727 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
894} 728}
895#endif
896 729
897void __trace_stack(struct trace_array *tr, 730void __trace_stack(struct trace_array *tr,
898 struct trace_array_cpu *data, 731 struct trace_array_cpu *data,
899 unsigned long flags, 732 unsigned long flags,
900 int skip) 733 int skip)
901{ 734{
902 struct trace_entry *entry; 735 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
903 struct stack_trace trace;
904
905 if (!(trace_flags & TRACE_ITER_STACKTRACE))
906 return;
907
908 entry = tracing_get_trace_entry(tr, data);
909 tracing_generic_entry_update(entry, flags);
910 entry->type = TRACE_STACK;
911
912 memset(&entry->stack, 0, sizeof(entry->stack));
913
914 trace.nr_entries = 0;
915 trace.max_entries = FTRACE_STACK_ENTRIES;
916 trace.skip = skip;
917 trace.entries = entry->stack.caller;
918
919 save_stack_trace(&trace);
920} 736}
921 737
922void 738static void
923__trace_special(void *__tr, void *__data, 739ftrace_trace_special(void *__tr, void *__data,
924 unsigned long arg1, unsigned long arg2, unsigned long arg3) 740 unsigned long arg1, unsigned long arg2, unsigned long arg3,
741 int pc)
925{ 742{
743 struct ring_buffer_event *event;
926 struct trace_array_cpu *data = __data; 744 struct trace_array_cpu *data = __data;
927 struct trace_array *tr = __tr; 745 struct trace_array *tr = __tr;
928 struct trace_entry *entry; 746 struct special_entry *entry;
929 unsigned long irq_flags; 747 unsigned long irq_flags;
930 748
931 raw_local_irq_save(irq_flags); 749 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
932 __raw_spin_lock(&data->lock); 750 &irq_flags);
933 entry = tracing_get_trace_entry(tr, data); 751 if (!event)
934 tracing_generic_entry_update(entry, 0); 752 return;
935 entry->type = TRACE_SPECIAL; 753 entry = ring_buffer_event_data(event);
936 entry->special.arg1 = arg1; 754 tracing_generic_entry_update(&entry->ent, 0, pc);
937 entry->special.arg2 = arg2; 755 entry->ent.type = TRACE_SPECIAL;
938 entry->special.arg3 = arg3; 756 entry->arg1 = arg1;
939 __trace_stack(tr, data, irq_flags, 4); 757 entry->arg2 = arg2;
940 __raw_spin_unlock(&data->lock); 758 entry->arg3 = arg3;
941 raw_local_irq_restore(irq_flags); 759 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
760 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
942 761
943 trace_wake_up(); 762 trace_wake_up();
944} 763}
945 764
946void 765void
766__trace_special(void *__tr, void *__data,
767 unsigned long arg1, unsigned long arg2, unsigned long arg3)
768{
769 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
770}
771
772void
947tracing_sched_switch_trace(struct trace_array *tr, 773tracing_sched_switch_trace(struct trace_array *tr,
948 struct trace_array_cpu *data, 774 struct trace_array_cpu *data,
949 struct task_struct *prev, 775 struct task_struct *prev,
950 struct task_struct *next, 776 struct task_struct *next,
951 unsigned long flags) 777 unsigned long flags, int pc)
952{ 778{
953 struct trace_entry *entry; 779 struct ring_buffer_event *event;
780 struct ctx_switch_entry *entry;
954 unsigned long irq_flags; 781 unsigned long irq_flags;
955 782
956 raw_local_irq_save(irq_flags); 783 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
957 __raw_spin_lock(&data->lock); 784 &irq_flags);
958 entry = tracing_get_trace_entry(tr, data); 785 if (!event)
959 tracing_generic_entry_update(entry, flags); 786 return;
960 entry->type = TRACE_CTX; 787 entry = ring_buffer_event_data(event);
961 entry->ctx.prev_pid = prev->pid; 788 tracing_generic_entry_update(&entry->ent, flags, pc);
962 entry->ctx.prev_prio = prev->prio; 789 entry->ent.type = TRACE_CTX;
963 entry->ctx.prev_state = prev->state; 790 entry->prev_pid = prev->pid;
964 entry->ctx.next_pid = next->pid; 791 entry->prev_prio = prev->prio;
965 entry->ctx.next_prio = next->prio; 792 entry->prev_state = prev->state;
966 entry->ctx.next_state = next->state; 793 entry->next_pid = next->pid;
967 __trace_stack(tr, data, flags, 5); 794 entry->next_prio = next->prio;
968 __raw_spin_unlock(&data->lock); 795 entry->next_state = next->state;
969 raw_local_irq_restore(irq_flags); 796 entry->next_cpu = task_cpu(next);
797 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
798 ftrace_trace_stack(tr, data, flags, 5, pc);
970} 799}
971 800
972void 801void
@@ -974,25 +803,28 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
974 struct trace_array_cpu *data, 803 struct trace_array_cpu *data,
975 struct task_struct *wakee, 804 struct task_struct *wakee,
976 struct task_struct *curr, 805 struct task_struct *curr,
977 unsigned long flags) 806 unsigned long flags, int pc)
978{ 807{
979 struct trace_entry *entry; 808 struct ring_buffer_event *event;
809 struct ctx_switch_entry *entry;
980 unsigned long irq_flags; 810 unsigned long irq_flags;
981 811
982 raw_local_irq_save(irq_flags); 812 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
983 __raw_spin_lock(&data->lock); 813 &irq_flags);
984 entry = tracing_get_trace_entry(tr, data); 814 if (!event)
985 tracing_generic_entry_update(entry, flags); 815 return;
986 entry->type = TRACE_WAKE; 816 entry = ring_buffer_event_data(event);
987 entry->ctx.prev_pid = curr->pid; 817 tracing_generic_entry_update(&entry->ent, flags, pc);
988 entry->ctx.prev_prio = curr->prio; 818 entry->ent.type = TRACE_WAKE;
989 entry->ctx.prev_state = curr->state; 819 entry->prev_pid = curr->pid;
990 entry->ctx.next_pid = wakee->pid; 820 entry->prev_prio = curr->prio;
991 entry->ctx.next_prio = wakee->prio; 821 entry->prev_state = curr->state;
992 entry->ctx.next_state = wakee->state; 822 entry->next_pid = wakee->pid;
993 __trace_stack(tr, data, flags, 6); 823 entry->next_prio = wakee->prio;
994 __raw_spin_unlock(&data->lock); 824 entry->next_state = wakee->state;
995 raw_local_irq_restore(irq_flags); 825 entry->next_cpu = task_cpu(wakee);
826 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
827 ftrace_trace_stack(tr, data, flags, 6, pc);
996 828
997 trace_wake_up(); 829 trace_wake_up();
998} 830}
@@ -1002,23 +834,21 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1002{ 834{
1003 struct trace_array *tr = &global_trace; 835 struct trace_array *tr = &global_trace;
1004 struct trace_array_cpu *data; 836 struct trace_array_cpu *data;
1005 unsigned long flags;
1006 long disabled;
1007 int cpu; 837 int cpu;
838 int pc;
1008 839
1009 if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl) 840 if (tracing_disabled || !tr->ctrl)
1010 return; 841 return;
1011 842
1012 local_irq_save(flags); 843 pc = preempt_count();
844 preempt_disable_notrace();
1013 cpu = raw_smp_processor_id(); 845 cpu = raw_smp_processor_id();
1014 data = tr->data[cpu]; 846 data = tr->data[cpu];
1015 disabled = atomic_inc_return(&data->disabled);
1016 847
1017 if (likely(disabled == 1)) 848 if (likely(!atomic_read(&data->disabled)))
1018 __trace_special(tr, data, arg1, arg2, arg3); 849 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
1019 850
1020 atomic_dec(&data->disabled); 851 preempt_enable_notrace();
1021 local_irq_restore(flags);
1022} 852}
1023 853
1024#ifdef CONFIG_FTRACE 854#ifdef CONFIG_FTRACE
@@ -1029,7 +859,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1029 struct trace_array_cpu *data; 859 struct trace_array_cpu *data;
1030 unsigned long flags; 860 unsigned long flags;
1031 long disabled; 861 long disabled;
1032 int cpu; 862 int cpu, resched;
863 int pc;
1033 864
1034 if (unlikely(!ftrace_function_enabled)) 865 if (unlikely(!ftrace_function_enabled))
1035 return; 866 return;
@@ -1037,16 +868,22 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1037 if (skip_trace(ip)) 868 if (skip_trace(ip))
1038 return; 869 return;
1039 870
1040 local_irq_save(flags); 871 pc = preempt_count();
872 resched = need_resched();
873 preempt_disable_notrace();
874 local_save_flags(flags);
1041 cpu = raw_smp_processor_id(); 875 cpu = raw_smp_processor_id();
1042 data = tr->data[cpu]; 876 data = tr->data[cpu];
1043 disabled = atomic_inc_return(&data->disabled); 877 disabled = atomic_inc_return(&data->disabled);
1044 878
1045 if (likely(disabled == 1)) 879 if (likely(disabled == 1))
1046 trace_function(tr, data, ip, parent_ip, flags); 880 trace_function(tr, data, ip, parent_ip, flags, pc);
1047 881
1048 atomic_dec(&data->disabled); 882 atomic_dec(&data->disabled);
1049 local_irq_restore(flags); 883 if (resched)
884 preempt_enable_no_resched_notrace();
885 else
886 preempt_enable_notrace();
1050} 887}
1051 888
1052static struct ftrace_ops trace_ops __read_mostly = 889static struct ftrace_ops trace_ops __read_mostly =
@@ -1073,111 +910,96 @@ enum trace_file_type {
1073 TRACE_FILE_LAT_FMT = 1, 910 TRACE_FILE_LAT_FMT = 1,
1074}; 911};
1075 912
1076static struct trace_entry * 913static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
1077trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
1078 struct trace_iterator *iter, int cpu)
1079{ 914{
1080 struct page *page; 915 /* Don't allow ftrace to trace into the ring buffers */
1081 struct trace_entry *array; 916 ftrace_disable_cpu();
1082 917
1083 if (iter->next_idx[cpu] >= tr->entries || 918 iter->idx++;
1084 iter->next_idx[cpu] >= data->trace_idx || 919 if (iter->buffer_iter[iter->cpu])
1085 (data->trace_head == data->trace_tail && 920 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1086 data->trace_head_idx == data->trace_tail_idx))
1087 return NULL;
1088 921
1089 if (!iter->next_page[cpu]) { 922 ftrace_enable_cpu();
1090 /* Initialize the iterator for this cpu trace buffer */ 923}
1091 WARN_ON(!data->trace_tail); 924
1092 page = virt_to_page(data->trace_tail); 925static struct trace_entry *
1093 iter->next_page[cpu] = &page->lru; 926peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1094 iter->next_page_idx[cpu] = data->trace_tail_idx; 927{
1095 } 928 struct ring_buffer_event *event;
929 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1096 930
1097 page = list_entry(iter->next_page[cpu], struct page, lru); 931 /* Don't allow ftrace to trace into the ring buffers */
1098 BUG_ON(&data->trace_pages == &page->lru); 932 ftrace_disable_cpu();
933
934 if (buf_iter)
935 event = ring_buffer_iter_peek(buf_iter, ts);
936 else
937 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1099 938
1100 array = page_address(page); 939 ftrace_enable_cpu();
1101 940
1102 WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE); 941 return event ? ring_buffer_event_data(event) : NULL;
1103 return &array[iter->next_page_idx[cpu]];
1104} 942}
1105 943
1106static struct trace_entry * 944static struct trace_entry *
1107find_next_entry(struct trace_iterator *iter, int *ent_cpu) 945__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1108{ 946{
1109 struct trace_array *tr = iter->tr; 947 struct ring_buffer *buffer = iter->tr->buffer;
1110 struct trace_entry *ent, *next = NULL; 948 struct trace_entry *ent, *next = NULL;
949 u64 next_ts = 0, ts;
1111 int next_cpu = -1; 950 int next_cpu = -1;
1112 int cpu; 951 int cpu;
1113 952
1114 for_each_tracing_cpu(cpu) { 953 for_each_tracing_cpu(cpu) {
1115 if (!head_page(tr->data[cpu])) 954
955 if (ring_buffer_empty_cpu(buffer, cpu))
1116 continue; 956 continue;
1117 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); 957
958 ent = peek_next_entry(iter, cpu, &ts);
959
1118 /* 960 /*
1119 * Pick the entry with the smallest timestamp: 961 * Pick the entry with the smallest timestamp:
1120 */ 962 */
1121 if (ent && (!next || ent->t < next->t)) { 963 if (ent && (!next || ts < next_ts)) {
1122 next = ent; 964 next = ent;
1123 next_cpu = cpu; 965 next_cpu = cpu;
966 next_ts = ts;
1124 } 967 }
1125 } 968 }
1126 969
1127 if (ent_cpu) 970 if (ent_cpu)
1128 *ent_cpu = next_cpu; 971 *ent_cpu = next_cpu;
1129 972
973 if (ent_ts)
974 *ent_ts = next_ts;
975
1130 return next; 976 return next;
1131} 977}
1132 978
1133static void trace_iterator_increment(struct trace_iterator *iter) 979/* Find the next real entry, without updating the iterator itself */
980static struct trace_entry *
981find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1134{ 982{
1135 iter->idx++; 983 return __find_next_entry(iter, ent_cpu, ent_ts);
1136 iter->next_idx[iter->cpu]++;
1137 iter->next_page_idx[iter->cpu]++;
1138
1139 if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
1140 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
1141
1142 iter->next_page_idx[iter->cpu] = 0;
1143 iter->next_page[iter->cpu] =
1144 trace_next_list(data, iter->next_page[iter->cpu]);
1145 }
1146} 984}
1147 985
1148static void trace_consume(struct trace_iterator *iter) 986/* Find the next real entry, and increment the iterator to the next entry */
987static void *find_next_entry_inc(struct trace_iterator *iter)
1149{ 988{
1150 struct trace_array_cpu *data = iter->tr->data[iter->cpu]; 989 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1151 990
1152 data->trace_tail_idx++; 991 if (iter->ent)
1153 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { 992 trace_iterator_increment(iter, iter->cpu);
1154 data->trace_tail = trace_next_page(data, data->trace_tail);
1155 data->trace_tail_idx = 0;
1156 }
1157 993
1158 /* Check if we empty it, then reset the index */ 994 return iter->ent ? iter : NULL;
1159 if (data->trace_head == data->trace_tail &&
1160 data->trace_head_idx == data->trace_tail_idx)
1161 data->trace_idx = 0;
1162} 995}
1163 996
1164static void *find_next_entry_inc(struct trace_iterator *iter) 997static void trace_consume(struct trace_iterator *iter)
1165{ 998{
1166 struct trace_entry *next; 999 /* Don't allow ftrace to trace into the ring buffers */
1167 int next_cpu = -1; 1000 ftrace_disable_cpu();
1168 1001 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1169 next = find_next_entry(iter, &next_cpu); 1002 ftrace_enable_cpu();
1170
1171 iter->prev_ent = iter->ent;
1172 iter->prev_cpu = iter->cpu;
1173
1174 iter->ent = next;
1175 iter->cpu = next_cpu;
1176
1177 if (next)
1178 trace_iterator_increment(iter);
1179
1180 return next ? iter : NULL;
1181} 1003}
1182 1004
1183static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1005static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@@ -1210,7 +1032,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1210 struct trace_iterator *iter = m->private; 1032 struct trace_iterator *iter = m->private;
1211 void *p = NULL; 1033 void *p = NULL;
1212 loff_t l = 0; 1034 loff_t l = 0;
1213 int i; 1035 int cpu;
1214 1036
1215 mutex_lock(&trace_types_lock); 1037 mutex_lock(&trace_types_lock);
1216 1038
@@ -1229,14 +1051,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1229 iter->ent = NULL; 1051 iter->ent = NULL;
1230 iter->cpu = 0; 1052 iter->cpu = 0;
1231 iter->idx = -1; 1053 iter->idx = -1;
1232 iter->prev_ent = NULL;
1233 iter->prev_cpu = -1;
1234 1054
1235 for_each_tracing_cpu(i) { 1055 ftrace_disable_cpu();
1236 iter->next_idx[i] = 0; 1056
1237 iter->next_page[i] = NULL; 1057 for_each_tracing_cpu(cpu) {
1058 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
1238 } 1059 }
1239 1060
1061 ftrace_enable_cpu();
1062
1240 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1063 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1241 ; 1064 ;
1242 1065
@@ -1330,21 +1153,21 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1330 1153
1331static void print_lat_help_header(struct seq_file *m) 1154static void print_lat_help_header(struct seq_file *m)
1332{ 1155{
1333 seq_puts(m, "# _------=> CPU# \n"); 1156 seq_puts(m, "# _------=> CPU# \n");
1334 seq_puts(m, "# / _-----=> irqs-off \n"); 1157 seq_puts(m, "# / _-----=> irqs-off \n");
1335 seq_puts(m, "# | / _----=> need-resched \n"); 1158 seq_puts(m, "# | / _----=> need-resched \n");
1336 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1159 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1337 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1160 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1338 seq_puts(m, "# |||| / \n"); 1161 seq_puts(m, "# |||| / \n");
1339 seq_puts(m, "# ||||| delay \n"); 1162 seq_puts(m, "# ||||| delay \n");
1340 seq_puts(m, "# cmd pid ||||| time | caller \n"); 1163 seq_puts(m, "# cmd pid ||||| time | caller \n");
1341 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1164 seq_puts(m, "# \\ / ||||| \\ | / \n");
1342} 1165}
1343 1166
1344static void print_func_help_header(struct seq_file *m) 1167static void print_func_help_header(struct seq_file *m)
1345{ 1168{
1346 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 1169 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1347 seq_puts(m, "# | | | | |\n"); 1170 seq_puts(m, "# | | | | |\n");
1348} 1171}
1349 1172
1350 1173
@@ -1355,23 +1178,16 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1355 struct trace_array *tr = iter->tr; 1178 struct trace_array *tr = iter->tr;
1356 struct trace_array_cpu *data = tr->data[tr->cpu]; 1179 struct trace_array_cpu *data = tr->data[tr->cpu];
1357 struct tracer *type = current_trace; 1180 struct tracer *type = current_trace;
1358 unsigned long total = 0; 1181 unsigned long total;
1359 unsigned long entries = 0; 1182 unsigned long entries;
1360 int cpu;
1361 const char *name = "preemption"; 1183 const char *name = "preemption";
1362 1184
1363 if (type) 1185 if (type)
1364 name = type->name; 1186 name = type->name;
1365 1187
1366 for_each_tracing_cpu(cpu) { 1188 entries = ring_buffer_entries(iter->tr->buffer);
1367 if (head_page(tr->data[cpu])) { 1189 total = entries +
1368 total += tr->data[cpu]->trace_idx; 1190 ring_buffer_overruns(iter->tr->buffer);
1369 if (tr->data[cpu]->trace_idx > tr->entries)
1370 entries += tr->entries;
1371 else
1372 entries += tr->data[cpu]->trace_idx;
1373 }
1374 }
1375 1191
1376 seq_printf(m, "%s latency trace v1.1.5 on %s\n", 1192 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1377 name, UTS_RELEASE); 1193 name, UTS_RELEASE);
@@ -1428,7 +1244,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1428 comm = trace_find_cmdline(entry->pid); 1244 comm = trace_find_cmdline(entry->pid);
1429 1245
1430 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); 1246 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1431 trace_seq_printf(s, "%d", cpu); 1247 trace_seq_printf(s, "%3d", cpu);
1432 trace_seq_printf(s, "%c%c", 1248 trace_seq_printf(s, "%c%c",
1433 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', 1249 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1434 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); 1250 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
@@ -1457,7 +1273,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1457unsigned long preempt_mark_thresh = 100; 1273unsigned long preempt_mark_thresh = 100;
1458 1274
1459static void 1275static void
1460lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, 1276lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1461 unsigned long rel_usecs) 1277 unsigned long rel_usecs)
1462{ 1278{
1463 trace_seq_printf(s, " %4lldus", abs_usecs); 1279 trace_seq_printf(s, " %4lldus", abs_usecs);
@@ -1471,34 +1287,76 @@ lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1471 1287
1472static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1288static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1473 1289
1474static int 1290/*
1291 * The message is supposed to contain an ending newline.
1292 * If the printing stops prematurely, try to add a newline of our own.
1293 */
1294void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1295{
1296 struct trace_entry *ent;
1297 struct trace_field_cont *cont;
1298 bool ok = true;
1299
1300 ent = peek_next_entry(iter, iter->cpu, NULL);
1301 if (!ent || ent->type != TRACE_CONT) {
1302 trace_seq_putc(s, '\n');
1303 return;
1304 }
1305
1306 do {
1307 cont = (struct trace_field_cont *)ent;
1308 if (ok)
1309 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
1310
1311 ftrace_disable_cpu();
1312
1313 if (iter->buffer_iter[iter->cpu])
1314 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1315 else
1316 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1317
1318 ftrace_enable_cpu();
1319
1320 ent = peek_next_entry(iter, iter->cpu, NULL);
1321 } while (ent && ent->type == TRACE_CONT);
1322
1323 if (!ok)
1324 trace_seq_putc(s, '\n');
1325}
1326
1327static enum print_line_t
1475print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1328print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1476{ 1329{
1477 struct trace_seq *s = &iter->seq; 1330 struct trace_seq *s = &iter->seq;
1478 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1331 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1479 struct trace_entry *next_entry = find_next_entry(iter, NULL); 1332 struct trace_entry *next_entry;
1480 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 1333 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1481 struct trace_entry *entry = iter->ent; 1334 struct trace_entry *entry = iter->ent;
1482 unsigned long abs_usecs; 1335 unsigned long abs_usecs;
1483 unsigned long rel_usecs; 1336 unsigned long rel_usecs;
1337 u64 next_ts;
1484 char *comm; 1338 char *comm;
1485 int S, T; 1339 int S, T;
1486 int i; 1340 int i;
1487 unsigned state; 1341 unsigned state;
1488 1342
1343 if (entry->type == TRACE_CONT)
1344 return TRACE_TYPE_HANDLED;
1345
1346 next_entry = find_next_entry(iter, NULL, &next_ts);
1489 if (!next_entry) 1347 if (!next_entry)
1490 next_entry = entry; 1348 next_ts = iter->ts;
1491 rel_usecs = ns2usecs(next_entry->t - entry->t); 1349 rel_usecs = ns2usecs(next_ts - iter->ts);
1492 abs_usecs = ns2usecs(entry->t - iter->tr->time_start); 1350 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
1493 1351
1494 if (verbose) { 1352 if (verbose) {
1495 comm = trace_find_cmdline(entry->pid); 1353 comm = trace_find_cmdline(entry->pid);
1496 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]" 1354 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1497 " %ld.%03ldms (+%ld.%03ldms): ", 1355 " %ld.%03ldms (+%ld.%03ldms): ",
1498 comm, 1356 comm,
1499 entry->pid, cpu, entry->flags, 1357 entry->pid, cpu, entry->flags,
1500 entry->preempt_count, trace_idx, 1358 entry->preempt_count, trace_idx,
1501 ns2usecs(entry->t), 1359 ns2usecs(iter->ts),
1502 abs_usecs/1000, 1360 abs_usecs/1000,
1503 abs_usecs % 1000, rel_usecs/1000, 1361 abs_usecs % 1000, rel_usecs/1000,
1504 rel_usecs % 1000); 1362 rel_usecs % 1000);
@@ -1507,52 +1365,85 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1507 lat_print_timestamp(s, abs_usecs, rel_usecs); 1365 lat_print_timestamp(s, abs_usecs, rel_usecs);
1508 } 1366 }
1509 switch (entry->type) { 1367 switch (entry->type) {
1510 case TRACE_FN: 1368 case TRACE_FN: {
1511 seq_print_ip_sym(s, entry->fn.ip, sym_flags); 1369 struct ftrace_entry *field;
1370
1371 trace_assign_type(field, entry);
1372
1373 seq_print_ip_sym(s, field->ip, sym_flags);
1512 trace_seq_puts(s, " ("); 1374 trace_seq_puts(s, " (");
1513 if (kretprobed(entry->fn.parent_ip)) 1375 if (kretprobed(field->parent_ip))
1514 trace_seq_puts(s, KRETPROBE_MSG); 1376 trace_seq_puts(s, KRETPROBE_MSG);
1515 else 1377 else
1516 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags); 1378 seq_print_ip_sym(s, field->parent_ip, sym_flags);
1517 trace_seq_puts(s, ")\n"); 1379 trace_seq_puts(s, ")\n");
1518 break; 1380 break;
1381 }
1519 case TRACE_CTX: 1382 case TRACE_CTX:
1520 case TRACE_WAKE: 1383 case TRACE_WAKE: {
1521 T = entry->ctx.next_state < sizeof(state_to_char) ? 1384 struct ctx_switch_entry *field;
1522 state_to_char[entry->ctx.next_state] : 'X'; 1385
1386 trace_assign_type(field, entry);
1387
1388 T = field->next_state < sizeof(state_to_char) ?
1389 state_to_char[field->next_state] : 'X';
1523 1390
1524 state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0; 1391 state = field->prev_state ?
1392 __ffs(field->prev_state) + 1 : 0;
1525 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; 1393 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1526 comm = trace_find_cmdline(entry->ctx.next_pid); 1394 comm = trace_find_cmdline(field->next_pid);
1527 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n", 1395 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1528 entry->ctx.prev_pid, 1396 field->prev_pid,
1529 entry->ctx.prev_prio, 1397 field->prev_prio,
1530 S, entry->type == TRACE_CTX ? "==>" : " +", 1398 S, entry->type == TRACE_CTX ? "==>" : " +",
1531 entry->ctx.next_pid, 1399 field->next_cpu,
1532 entry->ctx.next_prio, 1400 field->next_pid,
1401 field->next_prio,
1533 T, comm); 1402 T, comm);
1534 break; 1403 break;
1535 case TRACE_SPECIAL: 1404 }
1405 case TRACE_SPECIAL: {
1406 struct special_entry *field;
1407
1408 trace_assign_type(field, entry);
1409
1536 trace_seq_printf(s, "# %ld %ld %ld\n", 1410 trace_seq_printf(s, "# %ld %ld %ld\n",
1537 entry->special.arg1, 1411 field->arg1,
1538 entry->special.arg2, 1412 field->arg2,
1539 entry->special.arg3); 1413 field->arg3);
1540 break; 1414 break;
1541 case TRACE_STACK: 1415 }
1416 case TRACE_STACK: {
1417 struct stack_entry *field;
1418
1419 trace_assign_type(field, entry);
1420
1542 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1421 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1543 if (i) 1422 if (i)
1544 trace_seq_puts(s, " <= "); 1423 trace_seq_puts(s, " <= ");
1545 seq_print_ip_sym(s, entry->stack.caller[i], sym_flags); 1424 seq_print_ip_sym(s, field->caller[i], sym_flags);
1546 } 1425 }
1547 trace_seq_puts(s, "\n"); 1426 trace_seq_puts(s, "\n");
1548 break; 1427 break;
1428 }
1429 case TRACE_PRINT: {
1430 struct print_entry *field;
1431
1432 trace_assign_type(field, entry);
1433
1434 seq_print_ip_sym(s, field->ip, sym_flags);
1435 trace_seq_printf(s, ": %s", field->buf);
1436 if (entry->flags & TRACE_FLAG_CONT)
1437 trace_seq_print_cont(s, iter);
1438 break;
1439 }
1549 default: 1440 default:
1550 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1441 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1551 } 1442 }
1552 return 1; 1443 return TRACE_TYPE_HANDLED;
1553} 1444}
1554 1445
1555static int print_trace_fmt(struct trace_iterator *iter) 1446static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1556{ 1447{
1557 struct trace_seq *s = &iter->seq; 1448 struct trace_seq *s = &iter->seq;
1558 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1449 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1567,90 +1458,126 @@ static int print_trace_fmt(struct trace_iterator *iter)
1567 1458
1568 entry = iter->ent; 1459 entry = iter->ent;
1569 1460
1461 if (entry->type == TRACE_CONT)
1462 return TRACE_TYPE_HANDLED;
1463
1570 comm = trace_find_cmdline(iter->ent->pid); 1464 comm = trace_find_cmdline(iter->ent->pid);
1571 1465
1572 t = ns2usecs(entry->t); 1466 t = ns2usecs(iter->ts);
1573 usec_rem = do_div(t, 1000000ULL); 1467 usec_rem = do_div(t, 1000000ULL);
1574 secs = (unsigned long)t; 1468 secs = (unsigned long)t;
1575 1469
1576 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); 1470 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1577 if (!ret) 1471 if (!ret)
1578 return 0; 1472 return TRACE_TYPE_PARTIAL_LINE;
1579 ret = trace_seq_printf(s, "[%02d] ", iter->cpu); 1473 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
1580 if (!ret) 1474 if (!ret)
1581 return 0; 1475 return TRACE_TYPE_PARTIAL_LINE;
1582 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); 1476 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1583 if (!ret) 1477 if (!ret)
1584 return 0; 1478 return TRACE_TYPE_PARTIAL_LINE;
1585 1479
1586 switch (entry->type) { 1480 switch (entry->type) {
1587 case TRACE_FN: 1481 case TRACE_FN: {
1588 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags); 1482 struct ftrace_entry *field;
1483
1484 trace_assign_type(field, entry);
1485
1486 ret = seq_print_ip_sym(s, field->ip, sym_flags);
1589 if (!ret) 1487 if (!ret)
1590 return 0; 1488 return TRACE_TYPE_PARTIAL_LINE;
1591 if ((sym_flags & TRACE_ITER_PRINT_PARENT) && 1489 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1592 entry->fn.parent_ip) { 1490 field->parent_ip) {
1593 ret = trace_seq_printf(s, " <-"); 1491 ret = trace_seq_printf(s, " <-");
1594 if (!ret) 1492 if (!ret)
1595 return 0; 1493 return TRACE_TYPE_PARTIAL_LINE;
1596 if (kretprobed(entry->fn.parent_ip)) 1494 if (kretprobed(field->parent_ip))
1597 ret = trace_seq_puts(s, KRETPROBE_MSG); 1495 ret = trace_seq_puts(s, KRETPROBE_MSG);
1598 else 1496 else
1599 ret = seq_print_ip_sym(s, entry->fn.parent_ip, 1497 ret = seq_print_ip_sym(s,
1498 field->parent_ip,
1600 sym_flags); 1499 sym_flags);
1601 if (!ret) 1500 if (!ret)
1602 return 0; 1501 return TRACE_TYPE_PARTIAL_LINE;
1603 } 1502 }
1604 ret = trace_seq_printf(s, "\n"); 1503 ret = trace_seq_printf(s, "\n");
1605 if (!ret) 1504 if (!ret)
1606 return 0; 1505 return TRACE_TYPE_PARTIAL_LINE;
1607 break; 1506 break;
1507 }
1608 case TRACE_CTX: 1508 case TRACE_CTX:
1609 case TRACE_WAKE: 1509 case TRACE_WAKE: {
1610 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1510 struct ctx_switch_entry *field;
1611 state_to_char[entry->ctx.prev_state] : 'X'; 1511
1612 T = entry->ctx.next_state < sizeof(state_to_char) ? 1512 trace_assign_type(field, entry);
1613 state_to_char[entry->ctx.next_state] : 'X'; 1513
1614 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n", 1514 S = field->prev_state < sizeof(state_to_char) ?
1615 entry->ctx.prev_pid, 1515 state_to_char[field->prev_state] : 'X';
1616 entry->ctx.prev_prio, 1516 T = field->next_state < sizeof(state_to_char) ?
1517 state_to_char[field->next_state] : 'X';
1518 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1519 field->prev_pid,
1520 field->prev_prio,
1617 S, 1521 S,
1618 entry->type == TRACE_CTX ? "==>" : " +", 1522 entry->type == TRACE_CTX ? "==>" : " +",
1619 entry->ctx.next_pid, 1523 field->next_cpu,
1620 entry->ctx.next_prio, 1524 field->next_pid,
1525 field->next_prio,
1621 T); 1526 T);
1622 if (!ret) 1527 if (!ret)
1623 return 0; 1528 return TRACE_TYPE_PARTIAL_LINE;
1624 break; 1529 break;
1625 case TRACE_SPECIAL: 1530 }
1531 case TRACE_SPECIAL: {
1532 struct special_entry *field;
1533
1534 trace_assign_type(field, entry);
1535
1626 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1536 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1627 entry->special.arg1, 1537 field->arg1,
1628 entry->special.arg2, 1538 field->arg2,
1629 entry->special.arg3); 1539 field->arg3);
1630 if (!ret) 1540 if (!ret)
1631 return 0; 1541 return TRACE_TYPE_PARTIAL_LINE;
1632 break; 1542 break;
1633 case TRACE_STACK: 1543 }
1544 case TRACE_STACK: {
1545 struct stack_entry *field;
1546
1547 trace_assign_type(field, entry);
1548
1634 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1549 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1635 if (i) { 1550 if (i) {
1636 ret = trace_seq_puts(s, " <= "); 1551 ret = trace_seq_puts(s, " <= ");
1637 if (!ret) 1552 if (!ret)
1638 return 0; 1553 return TRACE_TYPE_PARTIAL_LINE;
1639 } 1554 }
1640 ret = seq_print_ip_sym(s, entry->stack.caller[i], 1555 ret = seq_print_ip_sym(s, field->caller[i],
1641 sym_flags); 1556 sym_flags);
1642 if (!ret) 1557 if (!ret)
1643 return 0; 1558 return TRACE_TYPE_PARTIAL_LINE;
1644 } 1559 }
1645 ret = trace_seq_puts(s, "\n"); 1560 ret = trace_seq_puts(s, "\n");
1646 if (!ret) 1561 if (!ret)
1647 return 0; 1562 return TRACE_TYPE_PARTIAL_LINE;
1648 break; 1563 break;
1649 } 1564 }
1650 return 1; 1565 case TRACE_PRINT: {
1566 struct print_entry *field;
1567
1568 trace_assign_type(field, entry);
1569
1570 seq_print_ip_sym(s, field->ip, sym_flags);
1571 trace_seq_printf(s, ": %s", field->buf);
1572 if (entry->flags & TRACE_FLAG_CONT)
1573 trace_seq_print_cont(s, iter);
1574 break;
1575 }
1576 }
1577 return TRACE_TYPE_HANDLED;
1651} 1578}
1652 1579
1653static int print_raw_fmt(struct trace_iterator *iter) 1580static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1654{ 1581{
1655 struct trace_seq *s = &iter->seq; 1582 struct trace_seq *s = &iter->seq;
1656 struct trace_entry *entry; 1583 struct trace_entry *entry;
@@ -1659,47 +1586,77 @@ static int print_raw_fmt(struct trace_iterator *iter)
1659 1586
1660 entry = iter->ent; 1587 entry = iter->ent;
1661 1588
1589 if (entry->type == TRACE_CONT)
1590 return TRACE_TYPE_HANDLED;
1591
1662 ret = trace_seq_printf(s, "%d %d %llu ", 1592 ret = trace_seq_printf(s, "%d %d %llu ",
1663 entry->pid, iter->cpu, entry->t); 1593 entry->pid, iter->cpu, iter->ts);
1664 if (!ret) 1594 if (!ret)
1665 return 0; 1595 return TRACE_TYPE_PARTIAL_LINE;
1666 1596
1667 switch (entry->type) { 1597 switch (entry->type) {
1668 case TRACE_FN: 1598 case TRACE_FN: {
1599 struct ftrace_entry *field;
1600
1601 trace_assign_type(field, entry);
1602
1669 ret = trace_seq_printf(s, "%x %x\n", 1603 ret = trace_seq_printf(s, "%x %x\n",
1670 entry->fn.ip, entry->fn.parent_ip); 1604 field->ip,
1605 field->parent_ip);
1671 if (!ret) 1606 if (!ret)
1672 return 0; 1607 return TRACE_TYPE_PARTIAL_LINE;
1673 break; 1608 break;
1609 }
1674 case TRACE_CTX: 1610 case TRACE_CTX:
1675 case TRACE_WAKE: 1611 case TRACE_WAKE: {
1676 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1612 struct ctx_switch_entry *field;
1677 state_to_char[entry->ctx.prev_state] : 'X'; 1613
1678 T = entry->ctx.next_state < sizeof(state_to_char) ? 1614 trace_assign_type(field, entry);
1679 state_to_char[entry->ctx.next_state] : 'X'; 1615
1616 S = field->prev_state < sizeof(state_to_char) ?
1617 state_to_char[field->prev_state] : 'X';
1618 T = field->next_state < sizeof(state_to_char) ?
1619 state_to_char[field->next_state] : 'X';
1680 if (entry->type == TRACE_WAKE) 1620 if (entry->type == TRACE_WAKE)
1681 S = '+'; 1621 S = '+';
1682 ret = trace_seq_printf(s, "%d %d %c %d %d %c\n", 1622 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1683 entry->ctx.prev_pid, 1623 field->prev_pid,
1684 entry->ctx.prev_prio, 1624 field->prev_prio,
1685 S, 1625 S,
1686 entry->ctx.next_pid, 1626 field->next_cpu,
1687 entry->ctx.next_prio, 1627 field->next_pid,
1628 field->next_prio,
1688 T); 1629 T);
1689 if (!ret) 1630 if (!ret)
1690 return 0; 1631 return TRACE_TYPE_PARTIAL_LINE;
1691 break; 1632 break;
1633 }
1692 case TRACE_SPECIAL: 1634 case TRACE_SPECIAL:
1693 case TRACE_STACK: 1635 case TRACE_STACK: {
1636 struct special_entry *field;
1637
1638 trace_assign_type(field, entry);
1639
1694 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1640 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1695 entry->special.arg1, 1641 field->arg1,
1696 entry->special.arg2, 1642 field->arg2,
1697 entry->special.arg3); 1643 field->arg3);
1698 if (!ret) 1644 if (!ret)
1699 return 0; 1645 return TRACE_TYPE_PARTIAL_LINE;
1700 break; 1646 break;
1701 } 1647 }
1702 return 1; 1648 case TRACE_PRINT: {
1649 struct print_entry *field;
1650
1651 trace_assign_type(field, entry);
1652
1653 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
1654 if (entry->flags & TRACE_FLAG_CONT)
1655 trace_seq_print_cont(s, iter);
1656 break;
1657 }
1658 }
1659 return TRACE_TYPE_HANDLED;
1703} 1660}
1704 1661
1705#define SEQ_PUT_FIELD_RET(s, x) \ 1662#define SEQ_PUT_FIELD_RET(s, x) \
@@ -1710,11 +1667,12 @@ do { \
1710 1667
1711#define SEQ_PUT_HEX_FIELD_RET(s, x) \ 1668#define SEQ_PUT_HEX_FIELD_RET(s, x) \
1712do { \ 1669do { \
1670 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
1713 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ 1671 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
1714 return 0; \ 1672 return 0; \
1715} while (0) 1673} while (0)
1716 1674
1717static int print_hex_fmt(struct trace_iterator *iter) 1675static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1718{ 1676{
1719 struct trace_seq *s = &iter->seq; 1677 struct trace_seq *s = &iter->seq;
1720 unsigned char newline = '\n'; 1678 unsigned char newline = '\n';
@@ -1723,97 +1681,139 @@ static int print_hex_fmt(struct trace_iterator *iter)
1723 1681
1724 entry = iter->ent; 1682 entry = iter->ent;
1725 1683
1684 if (entry->type == TRACE_CONT)
1685 return TRACE_TYPE_HANDLED;
1686
1726 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 1687 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1727 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 1688 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1728 SEQ_PUT_HEX_FIELD_RET(s, entry->t); 1689 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1729 1690
1730 switch (entry->type) { 1691 switch (entry->type) {
1731 case TRACE_FN: 1692 case TRACE_FN: {
1732 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip); 1693 struct ftrace_entry *field;
1733 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1694
1695 trace_assign_type(field, entry);
1696
1697 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1698 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
1734 break; 1699 break;
1700 }
1735 case TRACE_CTX: 1701 case TRACE_CTX:
1736 case TRACE_WAKE: 1702 case TRACE_WAKE: {
1737 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1703 struct ctx_switch_entry *field;
1738 state_to_char[entry->ctx.prev_state] : 'X'; 1704
1739 T = entry->ctx.next_state < sizeof(state_to_char) ? 1705 trace_assign_type(field, entry);
1740 state_to_char[entry->ctx.next_state] : 'X'; 1706
1707 S = field->prev_state < sizeof(state_to_char) ?
1708 state_to_char[field->prev_state] : 'X';
1709 T = field->next_state < sizeof(state_to_char) ?
1710 state_to_char[field->next_state] : 'X';
1741 if (entry->type == TRACE_WAKE) 1711 if (entry->type == TRACE_WAKE)
1742 S = '+'; 1712 S = '+';
1743 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid); 1713 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1744 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio); 1714 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1745 SEQ_PUT_HEX_FIELD_RET(s, S); 1715 SEQ_PUT_HEX_FIELD_RET(s, S);
1746 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid); 1716 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1747 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio); 1717 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1748 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1718 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
1749 SEQ_PUT_HEX_FIELD_RET(s, T); 1719 SEQ_PUT_HEX_FIELD_RET(s, T);
1750 break; 1720 break;
1721 }
1751 case TRACE_SPECIAL: 1722 case TRACE_SPECIAL:
1752 case TRACE_STACK: 1723 case TRACE_STACK: {
1753 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1); 1724 struct special_entry *field;
1754 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2); 1725
1755 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3); 1726 trace_assign_type(field, entry);
1727
1728 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1729 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1730 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1756 break; 1731 break;
1757 } 1732 }
1733 }
1758 SEQ_PUT_FIELD_RET(s, newline); 1734 SEQ_PUT_FIELD_RET(s, newline);
1759 1735
1760 return 1; 1736 return TRACE_TYPE_HANDLED;
1761} 1737}
1762 1738
1763static int print_bin_fmt(struct trace_iterator *iter) 1739static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1764{ 1740{
1765 struct trace_seq *s = &iter->seq; 1741 struct trace_seq *s = &iter->seq;
1766 struct trace_entry *entry; 1742 struct trace_entry *entry;
1767 1743
1768 entry = iter->ent; 1744 entry = iter->ent;
1769 1745
1746 if (entry->type == TRACE_CONT)
1747 return TRACE_TYPE_HANDLED;
1748
1770 SEQ_PUT_FIELD_RET(s, entry->pid); 1749 SEQ_PUT_FIELD_RET(s, entry->pid);
1771 SEQ_PUT_FIELD_RET(s, entry->cpu); 1750 SEQ_PUT_FIELD_RET(s, iter->cpu);
1772 SEQ_PUT_FIELD_RET(s, entry->t); 1751 SEQ_PUT_FIELD_RET(s, iter->ts);
1773 1752
1774 switch (entry->type) { 1753 switch (entry->type) {
1775 case TRACE_FN: 1754 case TRACE_FN: {
1776 SEQ_PUT_FIELD_RET(s, entry->fn.ip); 1755 struct ftrace_entry *field;
1777 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip); 1756
1757 trace_assign_type(field, entry);
1758
1759 SEQ_PUT_FIELD_RET(s, field->ip);
1760 SEQ_PUT_FIELD_RET(s, field->parent_ip);
1778 break; 1761 break;
1779 case TRACE_CTX: 1762 }
1780 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid); 1763 case TRACE_CTX: {
1781 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio); 1764 struct ctx_switch_entry *field;
1782 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state); 1765
1783 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid); 1766 trace_assign_type(field, entry);
1784 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio); 1767
1785 SEQ_PUT_FIELD_RET(s, entry->ctx.next_state); 1768 SEQ_PUT_FIELD_RET(s, field->prev_pid);
1769 SEQ_PUT_FIELD_RET(s, field->prev_prio);
1770 SEQ_PUT_FIELD_RET(s, field->prev_state);
1771 SEQ_PUT_FIELD_RET(s, field->next_pid);
1772 SEQ_PUT_FIELD_RET(s, field->next_prio);
1773 SEQ_PUT_FIELD_RET(s, field->next_state);
1786 break; 1774 break;
1775 }
1787 case TRACE_SPECIAL: 1776 case TRACE_SPECIAL:
1788 case TRACE_STACK: 1777 case TRACE_STACK: {
1789 SEQ_PUT_FIELD_RET(s, entry->special.arg1); 1778 struct special_entry *field;
1790 SEQ_PUT_FIELD_RET(s, entry->special.arg2); 1779
1791 SEQ_PUT_FIELD_RET(s, entry->special.arg3); 1780 trace_assign_type(field, entry);
1781
1782 SEQ_PUT_FIELD_RET(s, field->arg1);
1783 SEQ_PUT_FIELD_RET(s, field->arg2);
1784 SEQ_PUT_FIELD_RET(s, field->arg3);
1792 break; 1785 break;
1793 } 1786 }
1787 }
1794 return 1; 1788 return 1;
1795} 1789}
1796 1790
1797static int trace_empty(struct trace_iterator *iter) 1791static int trace_empty(struct trace_iterator *iter)
1798{ 1792{
1799 struct trace_array_cpu *data;
1800 int cpu; 1793 int cpu;
1801 1794
1802 for_each_tracing_cpu(cpu) { 1795 for_each_tracing_cpu(cpu) {
1803 data = iter->tr->data[cpu]; 1796 if (iter->buffer_iter[cpu]) {
1804 1797 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1805 if (head_page(data) && data->trace_idx && 1798 return 0;
1806 (data->trace_tail != data->trace_head || 1799 } else {
1807 data->trace_tail_idx != data->trace_head_idx)) 1800 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1808 return 0; 1801 return 0;
1802 }
1809 } 1803 }
1804
1810 return 1; 1805 return 1;
1811} 1806}
1812 1807
1813static int print_trace_line(struct trace_iterator *iter) 1808static enum print_line_t print_trace_line(struct trace_iterator *iter)
1814{ 1809{
1815 if (iter->trace && iter->trace->print_line) 1810 enum print_line_t ret;
1816 return iter->trace->print_line(iter); 1811
1812 if (iter->trace && iter->trace->print_line) {
1813 ret = iter->trace->print_line(iter);
1814 if (ret != TRACE_TYPE_UNHANDLED)
1815 return ret;
1816 }
1817 1817
1818 if (trace_flags & TRACE_ITER_BIN) 1818 if (trace_flags & TRACE_ITER_BIN)
1819 return print_bin_fmt(iter); 1819 return print_bin_fmt(iter);
@@ -1869,6 +1869,8 @@ static struct trace_iterator *
1869__tracing_open(struct inode *inode, struct file *file, int *ret) 1869__tracing_open(struct inode *inode, struct file *file, int *ret)
1870{ 1870{
1871 struct trace_iterator *iter; 1871 struct trace_iterator *iter;
1872 struct seq_file *m;
1873 int cpu;
1872 1874
1873 if (tracing_disabled) { 1875 if (tracing_disabled) {
1874 *ret = -ENODEV; 1876 *ret = -ENODEV;
@@ -1889,28 +1891,45 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1889 iter->trace = current_trace; 1891 iter->trace = current_trace;
1890 iter->pos = -1; 1892 iter->pos = -1;
1891 1893
1894 for_each_tracing_cpu(cpu) {
1895
1896 iter->buffer_iter[cpu] =
1897 ring_buffer_read_start(iter->tr->buffer, cpu);
1898
1899 if (!iter->buffer_iter[cpu])
1900 goto fail_buffer;
1901 }
1902
1892 /* TODO stop tracer */ 1903 /* TODO stop tracer */
1893 *ret = seq_open(file, &tracer_seq_ops); 1904 *ret = seq_open(file, &tracer_seq_ops);
1894 if (!*ret) { 1905 if (*ret)
1895 struct seq_file *m = file->private_data; 1906 goto fail_buffer;
1896 m->private = iter;
1897 1907
1898 /* stop the trace while dumping */ 1908 m = file->private_data;
1899 if (iter->tr->ctrl) { 1909 m->private = iter;
1900 tracer_enabled = 0;
1901 ftrace_function_enabled = 0;
1902 }
1903 1910
1904 if (iter->trace && iter->trace->open) 1911 /* stop the trace while dumping */
1905 iter->trace->open(iter); 1912 if (iter->tr->ctrl) {
1906 } else { 1913 tracer_enabled = 0;
1907 kfree(iter); 1914 ftrace_function_enabled = 0;
1908 iter = NULL;
1909 } 1915 }
1916
1917 if (iter->trace && iter->trace->open)
1918 iter->trace->open(iter);
1919
1910 mutex_unlock(&trace_types_lock); 1920 mutex_unlock(&trace_types_lock);
1911 1921
1912 out: 1922 out:
1913 return iter; 1923 return iter;
1924
1925 fail_buffer:
1926 for_each_tracing_cpu(cpu) {
1927 if (iter->buffer_iter[cpu])
1928 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1929 }
1930 mutex_unlock(&trace_types_lock);
1931
1932 return ERR_PTR(-ENOMEM);
1914} 1933}
1915 1934
1916int tracing_open_generic(struct inode *inode, struct file *filp) 1935int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -1926,8 +1945,14 @@ int tracing_release(struct inode *inode, struct file *file)
1926{ 1945{
1927 struct seq_file *m = (struct seq_file *)file->private_data; 1946 struct seq_file *m = (struct seq_file *)file->private_data;
1928 struct trace_iterator *iter = m->private; 1947 struct trace_iterator *iter = m->private;
1948 int cpu;
1929 1949
1930 mutex_lock(&trace_types_lock); 1950 mutex_lock(&trace_types_lock);
1951 for_each_tracing_cpu(cpu) {
1952 if (iter->buffer_iter[cpu])
1953 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1954 }
1955
1931 if (iter->trace && iter->trace->close) 1956 if (iter->trace && iter->trace->close)
1932 iter->trace->close(iter); 1957 iter->trace->close(iter);
1933 1958
@@ -2352,9 +2377,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2352 struct tracer *t; 2377 struct tracer *t;
2353 char buf[max_tracer_type_len+1]; 2378 char buf[max_tracer_type_len+1];
2354 int i; 2379 int i;
2380 size_t ret;
2355 2381
2356 if (cnt > max_tracer_type_len) 2382 if (cnt > max_tracer_type_len)
2357 cnt = max_tracer_type_len; 2383 cnt = max_tracer_type_len;
2384 ret = cnt;
2358 2385
2359 if (copy_from_user(&buf, ubuf, cnt)) 2386 if (copy_from_user(&buf, ubuf, cnt))
2360 return -EFAULT; 2387 return -EFAULT;
@@ -2370,7 +2397,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2370 if (strcmp(t->name, buf) == 0) 2397 if (strcmp(t->name, buf) == 0)
2371 break; 2398 break;
2372 } 2399 }
2373 if (!t || t == current_trace) 2400 if (!t) {
2401 ret = -EINVAL;
2402 goto out;
2403 }
2404 if (t == current_trace)
2374 goto out; 2405 goto out;
2375 2406
2376 if (current_trace && current_trace->reset) 2407 if (current_trace && current_trace->reset)
@@ -2383,9 +2414,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2383 out: 2414 out:
2384 mutex_unlock(&trace_types_lock); 2415 mutex_unlock(&trace_types_lock);
2385 2416
2386 filp->f_pos += cnt; 2417 if (ret == cnt)
2418 filp->f_pos += cnt;
2387 2419
2388 return cnt; 2420 return ret;
2389} 2421}
2390 2422
2391static ssize_t 2423static ssize_t
@@ -2500,20 +2532,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2500 size_t cnt, loff_t *ppos) 2532 size_t cnt, loff_t *ppos)
2501{ 2533{
2502 struct trace_iterator *iter = filp->private_data; 2534 struct trace_iterator *iter = filp->private_data;
2503 struct trace_array_cpu *data;
2504 static cpumask_t mask;
2505 unsigned long flags;
2506#ifdef CONFIG_FTRACE
2507 int ftrace_save;
2508#endif
2509 int cpu;
2510 ssize_t sret; 2535 ssize_t sret;
2511 2536
2512 /* return any leftover data */ 2537 /* return any leftover data */
2513 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2538 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2514 if (sret != -EBUSY) 2539 if (sret != -EBUSY)
2515 return sret; 2540 return sret;
2516 sret = 0;
2517 2541
2518 trace_seq_reset(&iter->seq); 2542 trace_seq_reset(&iter->seq);
2519 2543
@@ -2524,6 +2548,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2524 goto out; 2548 goto out;
2525 } 2549 }
2526 2550
2551waitagain:
2552 sret = 0;
2527 while (trace_empty(iter)) { 2553 while (trace_empty(iter)) {
2528 2554
2529 if ((filp->f_flags & O_NONBLOCK)) { 2555 if ((filp->f_flags & O_NONBLOCK)) {
@@ -2588,46 +2614,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2588 offsetof(struct trace_iterator, seq)); 2614 offsetof(struct trace_iterator, seq));
2589 iter->pos = -1; 2615 iter->pos = -1;
2590 2616
2591 /*
2592 * We need to stop all tracing on all CPUS to read the
2593 * the next buffer. This is a bit expensive, but is
2594 * not done often. We fill all what we can read,
2595 * and then release the locks again.
2596 */
2597
2598 cpus_clear(mask);
2599 local_irq_save(flags);
2600#ifdef CONFIG_FTRACE
2601 ftrace_save = ftrace_enabled;
2602 ftrace_enabled = 0;
2603#endif
2604 smp_wmb();
2605 for_each_tracing_cpu(cpu) {
2606 data = iter->tr->data[cpu];
2607
2608 if (!head_page(data) || !data->trace_idx)
2609 continue;
2610
2611 atomic_inc(&data->disabled);
2612 cpu_set(cpu, mask);
2613 }
2614
2615 for_each_cpu_mask(cpu, mask) {
2616 data = iter->tr->data[cpu];
2617 __raw_spin_lock(&data->lock);
2618
2619 if (data->overrun > iter->last_overrun[cpu])
2620 iter->overrun[cpu] +=
2621 data->overrun - iter->last_overrun[cpu];
2622 iter->last_overrun[cpu] = data->overrun;
2623 }
2624
2625 while (find_next_entry_inc(iter) != NULL) { 2617 while (find_next_entry_inc(iter) != NULL) {
2626 int ret; 2618 enum print_line_t ret;
2627 int len = iter->seq.len; 2619 int len = iter->seq.len;
2628 2620
2629 ret = print_trace_line(iter); 2621 ret = print_trace_line(iter);
2630 if (!ret) { 2622 if (ret == TRACE_TYPE_PARTIAL_LINE) {
2631 /* don't print partial lines */ 2623 /* don't print partial lines */
2632 iter->seq.len = len; 2624 iter->seq.len = len;
2633 break; 2625 break;
@@ -2639,26 +2631,17 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2639 break; 2631 break;
2640 } 2632 }
2641 2633
2642 for_each_cpu_mask(cpu, mask) {
2643 data = iter->tr->data[cpu];
2644 __raw_spin_unlock(&data->lock);
2645 }
2646
2647 for_each_cpu_mask(cpu, mask) {
2648 data = iter->tr->data[cpu];
2649 atomic_dec(&data->disabled);
2650 }
2651#ifdef CONFIG_FTRACE
2652 ftrace_enabled = ftrace_save;
2653#endif
2654 local_irq_restore(flags);
2655
2656 /* Now copy what we have to the user */ 2634 /* Now copy what we have to the user */
2657 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2635 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2658 if (iter->seq.readpos >= iter->seq.len) 2636 if (iter->seq.readpos >= iter->seq.len)
2659 trace_seq_reset(&iter->seq); 2637 trace_seq_reset(&iter->seq);
2638
2639 /*
2640 * If there was nothing to send to user, inspite of consuming trace
2641 * entries, go back to wait for more entries.
2642 */
2660 if (sret == -EBUSY) 2643 if (sret == -EBUSY)
2661 sret = 0; 2644 goto waitagain;
2662 2645
2663out: 2646out:
2664 mutex_unlock(&trace_types_lock); 2647 mutex_unlock(&trace_types_lock);
@@ -2684,7 +2667,8 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2684{ 2667{
2685 unsigned long val; 2668 unsigned long val;
2686 char buf[64]; 2669 char buf[64];
2687 int i, ret; 2670 int ret;
2671 struct trace_array *tr = filp->private_data;
2688 2672
2689 if (cnt >= sizeof(buf)) 2673 if (cnt >= sizeof(buf))
2690 return -EINVAL; 2674 return -EINVAL;
@@ -2704,59 +2688,38 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2704 2688
2705 mutex_lock(&trace_types_lock); 2689 mutex_lock(&trace_types_lock);
2706 2690
2707 if (current_trace != &no_tracer) { 2691 if (tr->ctrl) {
2708 cnt = -EBUSY; 2692 cnt = -EBUSY;
2709 pr_info("ftrace: set current_tracer to none" 2693 pr_info("ftrace: please disable tracing"
2710 " before modifying buffer size\n"); 2694 " before modifying buffer size\n");
2711 goto out; 2695 goto out;
2712 } 2696 }
2713 2697
2714 if (val > global_trace.entries) { 2698 if (val != global_trace.entries) {
2715 long pages_requested; 2699 ret = ring_buffer_resize(global_trace.buffer, val);
2716 unsigned long freeable_pages; 2700 if (ret < 0) {
2717 2701 cnt = ret;
2718 /* make sure we have enough memory before mapping */
2719 pages_requested =
2720 (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
2721
2722 /* account for each buffer (and max_tr) */
2723 pages_requested *= tracing_nr_buffers * 2;
2724
2725 /* Check for overflow */
2726 if (pages_requested < 0) {
2727 cnt = -ENOMEM;
2728 goto out;
2729 }
2730
2731 freeable_pages = determine_dirtyable_memory();
2732
2733 /* we only allow to request 1/4 of useable memory */
2734 if (pages_requested >
2735 ((freeable_pages + tracing_pages_allocated) / 4)) {
2736 cnt = -ENOMEM;
2737 goto out; 2702 goto out;
2738 } 2703 }
2739 2704
2740 while (global_trace.entries < val) { 2705 ret = ring_buffer_resize(max_tr.buffer, val);
2741 if (trace_alloc_page()) { 2706 if (ret < 0) {
2742 cnt = -ENOMEM; 2707 int r;
2743 goto out; 2708 cnt = ret;
2709 r = ring_buffer_resize(global_trace.buffer,
2710 global_trace.entries);
2711 if (r < 0) {
2712 /* AARGH! We are left with different
2713 * size max buffer!!!! */
2714 WARN_ON(1);
2715 tracing_disabled = 1;
2744 } 2716 }
2745 /* double check that we don't go over the known pages */ 2717 goto out;
2746 if (tracing_pages_allocated > pages_requested)
2747 break;
2748 } 2718 }
2749 2719
2750 } else { 2720 global_trace.entries = val;
2751 /* include the number of entries in val (inc of page entries) */
2752 while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
2753 trace_free_page();
2754 } 2721 }
2755 2722
2756 /* check integrity */
2757 for_each_tracing_cpu(i)
2758 check_pages(global_trace.data[i]);
2759
2760 filp->f_pos += cnt; 2723 filp->f_pos += cnt;
2761 2724
2762 /* If check pages failed, return ENOMEM */ 2725 /* If check pages failed, return ENOMEM */
@@ -2769,6 +2732,52 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2769 return cnt; 2732 return cnt;
2770} 2733}
2771 2734
2735static int mark_printk(const char *fmt, ...)
2736{
2737 int ret;
2738 va_list args;
2739 va_start(args, fmt);
2740 ret = trace_vprintk(0, fmt, args);
2741 va_end(args);
2742 return ret;
2743}
2744
2745static ssize_t
2746tracing_mark_write(struct file *filp, const char __user *ubuf,
2747 size_t cnt, loff_t *fpos)
2748{
2749 char *buf;
2750 char *end;
2751 struct trace_array *tr = &global_trace;
2752
2753 if (!tr->ctrl || tracing_disabled)
2754 return -EINVAL;
2755
2756 if (cnt > TRACE_BUF_SIZE)
2757 cnt = TRACE_BUF_SIZE;
2758
2759 buf = kmalloc(cnt + 1, GFP_KERNEL);
2760 if (buf == NULL)
2761 return -ENOMEM;
2762
2763 if (copy_from_user(buf, ubuf, cnt)) {
2764 kfree(buf);
2765 return -EFAULT;
2766 }
2767
2768 /* Cut from the first nil or newline. */
2769 buf[cnt] = '\0';
2770 end = strchr(buf, '\n');
2771 if (end)
2772 *end = '\0';
2773
2774 cnt = mark_printk("%s\n", buf);
2775 kfree(buf);
2776 *fpos += cnt;
2777
2778 return cnt;
2779}
2780
2772static struct file_operations tracing_max_lat_fops = { 2781static struct file_operations tracing_max_lat_fops = {
2773 .open = tracing_open_generic, 2782 .open = tracing_open_generic,
2774 .read = tracing_max_lat_read, 2783 .read = tracing_max_lat_read,
@@ -2800,6 +2809,11 @@ static struct file_operations tracing_entries_fops = {
2800 .write = tracing_entries_write, 2809 .write = tracing_entries_write,
2801}; 2810};
2802 2811
2812static struct file_operations tracing_mark_fops = {
2813 .open = tracing_open_generic,
2814 .write = tracing_mark_write,
2815};
2816
2803#ifdef CONFIG_DYNAMIC_FTRACE 2817#ifdef CONFIG_DYNAMIC_FTRACE
2804 2818
2805static ssize_t 2819static ssize_t
@@ -2846,7 +2860,7 @@ struct dentry *tracing_init_dentry(void)
2846#include "trace_selftest.c" 2860#include "trace_selftest.c"
2847#endif 2861#endif
2848 2862
2849static __init void tracer_init_debugfs(void) 2863static __init int tracer_init_debugfs(void)
2850{ 2864{
2851 struct dentry *d_tracer; 2865 struct dentry *d_tracer;
2852 struct dentry *entry; 2866 struct dentry *entry;
@@ -2881,12 +2895,12 @@ static __init void tracer_init_debugfs(void)
2881 entry = debugfs_create_file("available_tracers", 0444, d_tracer, 2895 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2882 &global_trace, &show_traces_fops); 2896 &global_trace, &show_traces_fops);
2883 if (!entry) 2897 if (!entry)
2884 pr_warning("Could not create debugfs 'trace' entry\n"); 2898 pr_warning("Could not create debugfs 'available_tracers' entry\n");
2885 2899
2886 entry = debugfs_create_file("current_tracer", 0444, d_tracer, 2900 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2887 &global_trace, &set_tracer_fops); 2901 &global_trace, &set_tracer_fops);
2888 if (!entry) 2902 if (!entry)
2889 pr_warning("Could not create debugfs 'trace' entry\n"); 2903 pr_warning("Could not create debugfs 'current_tracer' entry\n");
2890 2904
2891 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, 2905 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2892 &tracing_max_latency, 2906 &tracing_max_latency,
@@ -2899,7 +2913,7 @@ static __init void tracer_init_debugfs(void)
2899 &tracing_thresh, &tracing_max_lat_fops); 2913 &tracing_thresh, &tracing_max_lat_fops);
2900 if (!entry) 2914 if (!entry)
2901 pr_warning("Could not create debugfs " 2915 pr_warning("Could not create debugfs "
2902 "'tracing_threash' entry\n"); 2916 "'tracing_thresh' entry\n");
2903 entry = debugfs_create_file("README", 0644, d_tracer, 2917 entry = debugfs_create_file("README", 0644, d_tracer,
2904 NULL, &tracing_readme_fops); 2918 NULL, &tracing_readme_fops);
2905 if (!entry) 2919 if (!entry)
@@ -2909,13 +2923,19 @@ static __init void tracer_init_debugfs(void)
2909 NULL, &tracing_pipe_fops); 2923 NULL, &tracing_pipe_fops);
2910 if (!entry) 2924 if (!entry)
2911 pr_warning("Could not create debugfs " 2925 pr_warning("Could not create debugfs "
2912 "'tracing_threash' entry\n"); 2926 "'trace_pipe' entry\n");
2913 2927
2914 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 2928 entry = debugfs_create_file("trace_entries", 0644, d_tracer,
2915 &global_trace, &tracing_entries_fops); 2929 &global_trace, &tracing_entries_fops);
2916 if (!entry) 2930 if (!entry)
2917 pr_warning("Could not create debugfs " 2931 pr_warning("Could not create debugfs "
2918 "'tracing_threash' entry\n"); 2932 "'trace_entries' entry\n");
2933
2934 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2935 NULL, &tracing_mark_fops);
2936 if (!entry)
2937 pr_warning("Could not create debugfs "
2938 "'trace_marker' entry\n");
2919 2939
2920#ifdef CONFIG_DYNAMIC_FTRACE 2940#ifdef CONFIG_DYNAMIC_FTRACE
2921 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 2941 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
@@ -2928,230 +2948,263 @@ static __init void tracer_init_debugfs(void)
2928#ifdef CONFIG_SYSPROF_TRACER 2948#ifdef CONFIG_SYSPROF_TRACER
2929 init_tracer_sysprof_debugfs(d_tracer); 2949 init_tracer_sysprof_debugfs(d_tracer);
2930#endif 2950#endif
2951 return 0;
2931} 2952}
2932 2953
2933static int trace_alloc_page(void) 2954int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2934{ 2955{
2956 static DEFINE_SPINLOCK(trace_buf_lock);
2957 static char trace_buf[TRACE_BUF_SIZE];
2958
2959 struct ring_buffer_event *event;
2960 struct trace_array *tr = &global_trace;
2935 struct trace_array_cpu *data; 2961 struct trace_array_cpu *data;
2936 struct page *page, *tmp; 2962 struct print_entry *entry;
2937 LIST_HEAD(pages); 2963 unsigned long flags, irq_flags;
2938 void *array; 2964 int cpu, len = 0, size, pc;
2939 unsigned pages_allocated = 0;
2940 int i;
2941 2965
2942 /* first allocate a page for each CPU */ 2966 if (!tr->ctrl || tracing_disabled)
2943 for_each_tracing_cpu(i) { 2967 return 0;
2944 array = (void *)__get_free_page(GFP_KERNEL);
2945 if (array == NULL) {
2946 printk(KERN_ERR "tracer: failed to allocate page"
2947 "for trace buffer!\n");
2948 goto free_pages;
2949 }
2950 2968
2951 pages_allocated++; 2969 pc = preempt_count();
2952 page = virt_to_page(array); 2970 preempt_disable_notrace();
2953 list_add(&page->lru, &pages); 2971 cpu = raw_smp_processor_id();
2972 data = tr->data[cpu];
2954 2973
2955/* Only allocate if we are actually using the max trace */ 2974 if (unlikely(atomic_read(&data->disabled)))
2956#ifdef CONFIG_TRACER_MAX_TRACE 2975 goto out;
2957 array = (void *)__get_free_page(GFP_KERNEL);
2958 if (array == NULL) {
2959 printk(KERN_ERR "tracer: failed to allocate page"
2960 "for trace buffer!\n");
2961 goto free_pages;
2962 }
2963 pages_allocated++;
2964 page = virt_to_page(array);
2965 list_add(&page->lru, &pages);
2966#endif
2967 }
2968 2976
2969 /* Now that we successfully allocate a page per CPU, add them */ 2977 spin_lock_irqsave(&trace_buf_lock, flags);
2970 for_each_tracing_cpu(i) { 2978 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
2971 data = global_trace.data[i];
2972 page = list_entry(pages.next, struct page, lru);
2973 list_del_init(&page->lru);
2974 list_add_tail(&page->lru, &data->trace_pages);
2975 ClearPageLRU(page);
2976 2979
2977#ifdef CONFIG_TRACER_MAX_TRACE 2980 len = min(len, TRACE_BUF_SIZE-1);
2978 data = max_tr.data[i]; 2981 trace_buf[len] = 0;
2979 page = list_entry(pages.next, struct page, lru);
2980 list_del_init(&page->lru);
2981 list_add_tail(&page->lru, &data->trace_pages);
2982 SetPageLRU(page);
2983#endif
2984 }
2985 tracing_pages_allocated += pages_allocated;
2986 global_trace.entries += ENTRIES_PER_PAGE;
2987 2982
2988 return 0; 2983 size = sizeof(*entry) + len + 1;
2984 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
2985 if (!event)
2986 goto out_unlock;
2987 entry = ring_buffer_event_data(event);
2988 tracing_generic_entry_update(&entry->ent, flags, pc);
2989 entry->ent.type = TRACE_PRINT;
2990 entry->ip = ip;
2989 2991
2990 free_pages: 2992 memcpy(&entry->buf, trace_buf, len);
2991 list_for_each_entry_safe(page, tmp, &pages, lru) { 2993 entry->buf[len] = 0;
2992 list_del_init(&page->lru); 2994 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
2993 __free_page(page); 2995
2994 } 2996 out_unlock:
2995 return -ENOMEM; 2997 spin_unlock_irqrestore(&trace_buf_lock, flags);
2998
2999 out:
3000 preempt_enable_notrace();
3001
3002 return len;
2996} 3003}
3004EXPORT_SYMBOL_GPL(trace_vprintk);
2997 3005
2998static int trace_free_page(void) 3006int __ftrace_printk(unsigned long ip, const char *fmt, ...)
2999{ 3007{
3000 struct trace_array_cpu *data; 3008 int ret;
3001 struct page *page; 3009 va_list ap;
3002 struct list_head *p;
3003 int i;
3004 int ret = 0;
3005 3010
3006 /* free one page from each buffer */ 3011 if (!(trace_flags & TRACE_ITER_PRINTK))
3007 for_each_tracing_cpu(i) { 3012 return 0;
3008 data = global_trace.data[i];
3009 p = data->trace_pages.next;
3010 if (p == &data->trace_pages) {
3011 /* should never happen */
3012 WARN_ON(1);
3013 tracing_disabled = 1;
3014 ret = -1;
3015 break;
3016 }
3017 page = list_entry(p, struct page, lru);
3018 ClearPageLRU(page);
3019 list_del(&page->lru);
3020 tracing_pages_allocated--;
3021 tracing_pages_allocated--;
3022 __free_page(page);
3023 3013
3024 tracing_reset(data); 3014 va_start(ap, fmt);
3015 ret = trace_vprintk(ip, fmt, ap);
3016 va_end(ap);
3017 return ret;
3018}
3019EXPORT_SYMBOL_GPL(__ftrace_printk);
3025 3020
3026#ifdef CONFIG_TRACER_MAX_TRACE 3021static int trace_panic_handler(struct notifier_block *this,
3027 data = max_tr.data[i]; 3022 unsigned long event, void *unused)
3028 p = data->trace_pages.next; 3023{
3029 if (p == &data->trace_pages) { 3024 ftrace_dump();
3030 /* should never happen */ 3025 return NOTIFY_OK;
3031 WARN_ON(1); 3026}
3032 tracing_disabled = 1;
3033 ret = -1;
3034 break;
3035 }
3036 page = list_entry(p, struct page, lru);
3037 ClearPageLRU(page);
3038 list_del(&page->lru);
3039 __free_page(page);
3040 3027
3041 tracing_reset(data); 3028static struct notifier_block trace_panic_notifier = {
3042#endif 3029 .notifier_call = trace_panic_handler,
3043 } 3030 .next = NULL,
3044 global_trace.entries -= ENTRIES_PER_PAGE; 3031 .priority = 150 /* priority: INT_MAX >= x >= 0 */
3032};
3045 3033
3046 return ret; 3034static int trace_die_handler(struct notifier_block *self,
3035 unsigned long val,
3036 void *data)
3037{
3038 switch (val) {
3039 case DIE_OOPS:
3040 ftrace_dump();
3041 break;
3042 default:
3043 break;
3044 }
3045 return NOTIFY_OK;
3047} 3046}
3048 3047
3049__init static int tracer_alloc_buffers(void) 3048static struct notifier_block trace_die_notifier = {
3049 .notifier_call = trace_die_handler,
3050 .priority = 200
3051};
3052
3053/*
3054 * printk is set to max of 1024, we really don't need it that big.
3055 * Nothing should be printing 1000 characters anyway.
3056 */
3057#define TRACE_MAX_PRINT 1000
3058
3059/*
3060 * Define here KERN_TRACE so that we have one place to modify
3061 * it if we decide to change what log level the ftrace dump
3062 * should be at.
3063 */
3064#define KERN_TRACE KERN_INFO
3065
3066static void
3067trace_printk_seq(struct trace_seq *s)
3050{ 3068{
3051 struct trace_array_cpu *data; 3069 /* Probably should print a warning here. */
3052 void *array; 3070 if (s->len >= 1000)
3053 struct page *page; 3071 s->len = 1000;
3054 int pages = 0;
3055 int ret = -ENOMEM;
3056 int i;
3057 3072
3058 /* TODO: make the number of buffers hot pluggable with CPUS */ 3073 /* should be zero ended, but we are paranoid. */
3059 tracing_nr_buffers = num_possible_cpus(); 3074 s->buffer[s->len] = 0;
3060 tracing_buffer_mask = cpu_possible_map;
3061 3075
3062 /* Allocate the first page for all buffers */ 3076 printk(KERN_TRACE "%s", s->buffer);
3063 for_each_tracing_cpu(i) {
3064 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3065 max_tr.data[i] = &per_cpu(max_data, i);
3066 3077
3067 array = (void *)__get_free_page(GFP_KERNEL); 3078 trace_seq_reset(s);
3068 if (array == NULL) { 3079}
3069 printk(KERN_ERR "tracer: failed to allocate page" 3080
3070 "for trace buffer!\n"); 3081
3071 goto free_buffers; 3082void ftrace_dump(void)
3072 } 3083{
3084 static DEFINE_SPINLOCK(ftrace_dump_lock);
3085 /* use static because iter can be a bit big for the stack */
3086 static struct trace_iterator iter;
3087 static cpumask_t mask;
3088 static int dump_ran;
3089 unsigned long flags;
3090 int cnt = 0, cpu;
3073 3091
3074 /* set the array to the list */ 3092 /* only one dump */
3075 INIT_LIST_HEAD(&data->trace_pages); 3093 spin_lock_irqsave(&ftrace_dump_lock, flags);
3076 page = virt_to_page(array); 3094 if (dump_ran)
3077 list_add(&page->lru, &data->trace_pages); 3095 goto out;
3078 /* use the LRU flag to differentiate the two buffers */
3079 ClearPageLRU(page);
3080 3096
3081 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 3097 dump_ran = 1;
3082 max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3083 3098
3084/* Only allocate if we are actually using the max trace */ 3099 /* No turning back! */
3085#ifdef CONFIG_TRACER_MAX_TRACE 3100 ftrace_kill_atomic();
3086 array = (void *)__get_free_page(GFP_KERNEL);
3087 if (array == NULL) {
3088 printk(KERN_ERR "tracer: failed to allocate page"
3089 "for trace buffer!\n");
3090 goto free_buffers;
3091 }
3092 3101
3093 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages); 3102 for_each_tracing_cpu(cpu) {
3094 page = virt_to_page(array); 3103 atomic_inc(&global_trace.data[cpu]->disabled);
3095 list_add(&page->lru, &max_tr.data[i]->trace_pages);
3096 SetPageLRU(page);
3097#endif
3098 } 3104 }
3099 3105
3106 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3107
3108 iter.tr = &global_trace;
3109 iter.trace = current_trace;
3110
3100 /* 3111 /*
3101 * Since we allocate by orders of pages, we may be able to 3112 * We need to stop all tracing on all CPUS to read the
3102 * round up a bit. 3113 * the next buffer. This is a bit expensive, but is
3114 * not done often. We fill all what we can read,
3115 * and then release the locks again.
3103 */ 3116 */
3104 global_trace.entries = ENTRIES_PER_PAGE;
3105 pages++;
3106 3117
3107 while (global_trace.entries < trace_nr_entries) { 3118 cpus_clear(mask);
3108 if (trace_alloc_page()) 3119
3109 break; 3120 while (!trace_empty(&iter)) {
3110 pages++; 3121
3122 if (!cnt)
3123 printk(KERN_TRACE "---------------------------------\n");
3124
3125 cnt++;
3126
3127 /* reset all but tr, trace, and overruns */
3128 memset(&iter.seq, 0,
3129 sizeof(struct trace_iterator) -
3130 offsetof(struct trace_iterator, seq));
3131 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3132 iter.pos = -1;
3133
3134 if (find_next_entry_inc(&iter) != NULL) {
3135 print_trace_line(&iter);
3136 trace_consume(&iter);
3137 }
3138
3139 trace_printk_seq(&iter.seq);
3111 } 3140 }
3112 max_tr.entries = global_trace.entries;
3113 3141
3114 pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n", 3142 if (!cnt)
3115 pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE); 3143 printk(KERN_TRACE " (ftrace buffer empty)\n");
3116 pr_info(" actual entries %ld\n", global_trace.entries); 3144 else
3145 printk(KERN_TRACE "---------------------------------\n");
3146
3147 out:
3148 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3149}
3150
3151__init static int tracer_alloc_buffers(void)
3152{
3153 struct trace_array_cpu *data;
3154 int i;
3155
3156 /* TODO: make the number of buffers hot pluggable with CPUS */
3157 tracing_buffer_mask = cpu_possible_map;
3158
3159 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3160 TRACE_BUFFER_FLAGS);
3161 if (!global_trace.buffer) {
3162 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3163 WARN_ON(1);
3164 return 0;
3165 }
3166 global_trace.entries = ring_buffer_size(global_trace.buffer);
3117 3167
3118 tracer_init_debugfs(); 3168#ifdef CONFIG_TRACER_MAX_TRACE
3169 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3170 TRACE_BUFFER_FLAGS);
3171 if (!max_tr.buffer) {
3172 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3173 WARN_ON(1);
3174 ring_buffer_free(global_trace.buffer);
3175 return 0;
3176 }
3177 max_tr.entries = ring_buffer_size(max_tr.buffer);
3178 WARN_ON(max_tr.entries != global_trace.entries);
3179#endif
3180
3181 /* Allocate the first page for all buffers */
3182 for_each_tracing_cpu(i) {
3183 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3184 max_tr.data[i] = &per_cpu(max_data, i);
3185 }
3119 3186
3120 trace_init_cmdlines(); 3187 trace_init_cmdlines();
3121 3188
3122 register_tracer(&no_tracer); 3189 register_tracer(&nop_trace);
3123 current_trace = &no_tracer; 3190#ifdef CONFIG_BOOT_TRACER
3191 register_tracer(&boot_tracer);
3192 current_trace = &boot_tracer;
3193 current_trace->init(&global_trace);
3194#else
3195 current_trace = &nop_trace;
3196#endif
3124 3197
3125 /* All seems OK, enable tracing */ 3198 /* All seems OK, enable tracing */
3126 global_trace.ctrl = tracer_enabled; 3199 global_trace.ctrl = tracer_enabled;
3127 tracing_disabled = 0; 3200 tracing_disabled = 0;
3128 3201
3129 return 0; 3202 atomic_notifier_chain_register(&panic_notifier_list,
3203 &trace_panic_notifier);
3130 3204
3131 free_buffers: 3205 register_die_notifier(&trace_die_notifier);
3132 for (i-- ; i >= 0; i--) {
3133 struct page *page, *tmp;
3134 struct trace_array_cpu *data = global_trace.data[i];
3135 3206
3136 if (data) { 3207 return 0;
3137 list_for_each_entry_safe(page, tmp,
3138 &data->trace_pages, lru) {
3139 list_del_init(&page->lru);
3140 __free_page(page);
3141 }
3142 }
3143
3144#ifdef CONFIG_TRACER_MAX_TRACE
3145 data = max_tr.data[i];
3146 if (data) {
3147 list_for_each_entry_safe(page, tmp,
3148 &data->trace_pages, lru) {
3149 list_del_init(&page->lru);
3150 __free_page(page);
3151 }
3152 }
3153#endif
3154 }
3155 return ret;
3156} 3208}
3157fs_initcall(tracer_alloc_buffers); 3209early_initcall(tracer_alloc_buffers);
3210fs_initcall(tracer_init_debugfs);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f69f86788c2b..f1f99572cde7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -5,7 +5,9 @@
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/clocksource.h> 7#include <linux/clocksource.h>
8#include <linux/ring_buffer.h>
8#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/ftrace.h>
9 11
10enum trace_type { 12enum trace_type {
11 __TRACE_FIRST_TYPE = 0, 13 __TRACE_FIRST_TYPE = 0,
@@ -13,38 +15,60 @@ enum trace_type {
13 TRACE_FN, 15 TRACE_FN,
14 TRACE_CTX, 16 TRACE_CTX,
15 TRACE_WAKE, 17 TRACE_WAKE,
18 TRACE_CONT,
16 TRACE_STACK, 19 TRACE_STACK,
20 TRACE_PRINT,
17 TRACE_SPECIAL, 21 TRACE_SPECIAL,
18 TRACE_MMIO_RW, 22 TRACE_MMIO_RW,
19 TRACE_MMIO_MAP, 23 TRACE_MMIO_MAP,
24 TRACE_BOOT,
20 25
21 __TRACE_LAST_TYPE 26 __TRACE_LAST_TYPE
22}; 27};
23 28
24/* 29/*
30 * The trace entry - the most basic unit of tracing. This is what
31 * is printed in the end as a single line in the trace output, such as:
32 *
33 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
34 */
35struct trace_entry {
36 unsigned char type;
37 unsigned char cpu;
38 unsigned char flags;
39 unsigned char preempt_count;
40 int pid;
41};
42
43/*
25 * Function trace entry - function address and parent function addres: 44 * Function trace entry - function address and parent function addres:
26 */ 45 */
27struct ftrace_entry { 46struct ftrace_entry {
47 struct trace_entry ent;
28 unsigned long ip; 48 unsigned long ip;
29 unsigned long parent_ip; 49 unsigned long parent_ip;
30}; 50};
51extern struct tracer boot_tracer;
31 52
32/* 53/*
33 * Context switch trace entry - which task (and prio) we switched from/to: 54 * Context switch trace entry - which task (and prio) we switched from/to:
34 */ 55 */
35struct ctx_switch_entry { 56struct ctx_switch_entry {
57 struct trace_entry ent;
36 unsigned int prev_pid; 58 unsigned int prev_pid;
37 unsigned char prev_prio; 59 unsigned char prev_prio;
38 unsigned char prev_state; 60 unsigned char prev_state;
39 unsigned int next_pid; 61 unsigned int next_pid;
40 unsigned char next_prio; 62 unsigned char next_prio;
41 unsigned char next_state; 63 unsigned char next_state;
64 unsigned int next_cpu;
42}; 65};
43 66
44/* 67/*
45 * Special (free-form) trace entry: 68 * Special (free-form) trace entry:
46 */ 69 */
47struct special_entry { 70struct special_entry {
71 struct trace_entry ent;
48 unsigned long arg1; 72 unsigned long arg1;
49 unsigned long arg2; 73 unsigned long arg2;
50 unsigned long arg3; 74 unsigned long arg3;
@@ -57,33 +81,60 @@ struct special_entry {
57#define FTRACE_STACK_ENTRIES 8 81#define FTRACE_STACK_ENTRIES 8
58 82
59struct stack_entry { 83struct stack_entry {
84 struct trace_entry ent;
60 unsigned long caller[FTRACE_STACK_ENTRIES]; 85 unsigned long caller[FTRACE_STACK_ENTRIES];
61}; 86};
62 87
63/* 88/*
64 * The trace entry - the most basic unit of tracing. This is what 89 * ftrace_printk entry:
65 * is printed in the end as a single line in the trace output, such as:
66 *
67 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
68 */ 90 */
69struct trace_entry { 91struct print_entry {
70 char type; 92 struct trace_entry ent;
71 char cpu; 93 unsigned long ip;
72 char flags; 94 char buf[];
73 char preempt_count; 95};
74 int pid; 96
75 cycle_t t; 97#define TRACE_OLD_SIZE 88
76 union { 98
77 struct ftrace_entry fn; 99struct trace_field_cont {
78 struct ctx_switch_entry ctx; 100 unsigned char type;
79 struct special_entry special; 101 /* Temporary till we get rid of this completely */
80 struct stack_entry stack; 102 char buf[TRACE_OLD_SIZE - 1];
81 struct mmiotrace_rw mmiorw; 103};
82 struct mmiotrace_map mmiomap; 104
83 }; 105struct trace_mmiotrace_rw {
106 struct trace_entry ent;
107 struct mmiotrace_rw rw;
84}; 108};
85 109
86#define TRACE_ENTRY_SIZE sizeof(struct trace_entry) 110struct trace_mmiotrace_map {
111 struct trace_entry ent;
112 struct mmiotrace_map map;
113};
114
115struct trace_boot {
116 struct trace_entry ent;
117 struct boot_trace initcall;
118};
119
120/*
121 * trace_flag_type is an enumeration that holds different
122 * states when a trace occurs. These are:
123 * IRQS_OFF - interrupts were disabled
124 * NEED_RESCED - reschedule is requested
125 * HARDIRQ - inside an interrupt handler
126 * SOFTIRQ - inside a softirq handler
127 * CONT - multiple entries hold the trace item
128 */
129enum trace_flag_type {
130 TRACE_FLAG_IRQS_OFF = 0x01,
131 TRACE_FLAG_NEED_RESCHED = 0x02,
132 TRACE_FLAG_HARDIRQ = 0x04,
133 TRACE_FLAG_SOFTIRQ = 0x08,
134 TRACE_FLAG_CONT = 0x10,
135};
136
137#define TRACE_BUF_SIZE 1024
87 138
88/* 139/*
89 * The CPU trace array - it consists of thousands of trace entries 140 * The CPU trace array - it consists of thousands of trace entries
@@ -91,16 +142,9 @@ struct trace_entry {
91 * the trace, etc.) 142 * the trace, etc.)
92 */ 143 */
93struct trace_array_cpu { 144struct trace_array_cpu {
94 struct list_head trace_pages;
95 atomic_t disabled; 145 atomic_t disabled;
96 raw_spinlock_t lock;
97 struct lock_class_key lock_key;
98 146
99 /* these fields get copied into max-trace: */ 147 /* these fields get copied into max-trace: */
100 unsigned trace_head_idx;
101 unsigned trace_tail_idx;
102 void *trace_head; /* producer */
103 void *trace_tail; /* consumer */
104 unsigned long trace_idx; 148 unsigned long trace_idx;
105 unsigned long overrun; 149 unsigned long overrun;
106 unsigned long saved_latency; 150 unsigned long saved_latency;
@@ -124,6 +168,7 @@ struct trace_iterator;
124 * They have on/off state as well: 168 * They have on/off state as well:
125 */ 169 */
126struct trace_array { 170struct trace_array {
171 struct ring_buffer *buffer;
127 unsigned long entries; 172 unsigned long entries;
128 long ctrl; 173 long ctrl;
129 int cpu; 174 int cpu;
@@ -132,6 +177,56 @@ struct trace_array {
132 struct trace_array_cpu *data[NR_CPUS]; 177 struct trace_array_cpu *data[NR_CPUS];
133}; 178};
134 179
180#define FTRACE_CMP_TYPE(var, type) \
181 __builtin_types_compatible_p(typeof(var), type *)
182
183#undef IF_ASSIGN
184#define IF_ASSIGN(var, entry, etype, id) \
185 if (FTRACE_CMP_TYPE(var, etype)) { \
186 var = (typeof(var))(entry); \
187 WARN_ON(id && (entry)->type != id); \
188 break; \
189 }
190
191/* Will cause compile errors if type is not found. */
192extern void __ftrace_bad_type(void);
193
194/*
195 * The trace_assign_type is a verifier that the entry type is
196 * the same as the type being assigned. To add new types simply
197 * add a line with the following format:
198 *
199 * IF_ASSIGN(var, ent, type, id);
200 *
201 * Where "type" is the trace type that includes the trace_entry
202 * as the "ent" item. And "id" is the trace identifier that is
203 * used in the trace_type enum.
204 *
205 * If the type can have more than one id, then use zero.
206 */
207#define trace_assign_type(var, ent) \
208 do { \
209 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
210 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
211 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
212 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
213 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
214 IF_ASSIGN(var, ent, struct special_entry, 0); \
215 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
216 TRACE_MMIO_RW); \
217 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
218 TRACE_MMIO_MAP); \
219 IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \
220 __ftrace_bad_type(); \
221 } while (0)
222
223/* Return values for print_line callback */
224enum print_line_t {
225 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
226 TRACE_TYPE_HANDLED = 1,
227 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
228};
229
135/* 230/*
136 * A specific tracer, represented by methods that operate on a trace array: 231 * A specific tracer, represented by methods that operate on a trace array:
137 */ 232 */
@@ -152,7 +247,7 @@ struct tracer {
152 int (*selftest)(struct tracer *trace, 247 int (*selftest)(struct tracer *trace,
153 struct trace_array *tr); 248 struct trace_array *tr);
154#endif 249#endif
155 int (*print_line)(struct trace_iterator *iter); 250 enum print_line_t (*print_line)(struct trace_iterator *iter);
156 struct tracer *next; 251 struct tracer *next;
157 int print_max; 252 int print_max;
158}; 253};
@@ -171,57 +266,58 @@ struct trace_iterator {
171 struct trace_array *tr; 266 struct trace_array *tr;
172 struct tracer *trace; 267 struct tracer *trace;
173 void *private; 268 void *private;
174 long last_overrun[NR_CPUS]; 269 struct ring_buffer_iter *buffer_iter[NR_CPUS];
175 long overrun[NR_CPUS];
176 270
177 /* The below is zeroed out in pipe_read */ 271 /* The below is zeroed out in pipe_read */
178 struct trace_seq seq; 272 struct trace_seq seq;
179 struct trace_entry *ent; 273 struct trace_entry *ent;
180 int cpu; 274 int cpu;
181 275 u64 ts;
182 struct trace_entry *prev_ent;
183 int prev_cpu;
184 276
185 unsigned long iter_flags; 277 unsigned long iter_flags;
186 loff_t pos; 278 loff_t pos;
187 unsigned long next_idx[NR_CPUS];
188 struct list_head *next_page[NR_CPUS];
189 unsigned next_page_idx[NR_CPUS];
190 long idx; 279 long idx;
191}; 280};
192 281
193void tracing_reset(struct trace_array_cpu *data); 282void trace_wake_up(void);
283void tracing_reset(struct trace_array *tr, int cpu);
194int tracing_open_generic(struct inode *inode, struct file *filp); 284int tracing_open_generic(struct inode *inode, struct file *filp);
195struct dentry *tracing_init_dentry(void); 285struct dentry *tracing_init_dentry(void);
196void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 286void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
197 287
288struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
289 struct trace_array_cpu *data);
290void tracing_generic_entry_update(struct trace_entry *entry,
291 unsigned long flags,
292 int pc);
293
198void ftrace(struct trace_array *tr, 294void ftrace(struct trace_array *tr,
199 struct trace_array_cpu *data, 295 struct trace_array_cpu *data,
200 unsigned long ip, 296 unsigned long ip,
201 unsigned long parent_ip, 297 unsigned long parent_ip,
202 unsigned long flags); 298 unsigned long flags, int pc);
203void tracing_sched_switch_trace(struct trace_array *tr, 299void tracing_sched_switch_trace(struct trace_array *tr,
204 struct trace_array_cpu *data, 300 struct trace_array_cpu *data,
205 struct task_struct *prev, 301 struct task_struct *prev,
206 struct task_struct *next, 302 struct task_struct *next,
207 unsigned long flags); 303 unsigned long flags, int pc);
208void tracing_record_cmdline(struct task_struct *tsk); 304void tracing_record_cmdline(struct task_struct *tsk);
209 305
210void tracing_sched_wakeup_trace(struct trace_array *tr, 306void tracing_sched_wakeup_trace(struct trace_array *tr,
211 struct trace_array_cpu *data, 307 struct trace_array_cpu *data,
212 struct task_struct *wakee, 308 struct task_struct *wakee,
213 struct task_struct *cur, 309 struct task_struct *cur,
214 unsigned long flags); 310 unsigned long flags, int pc);
215void trace_special(struct trace_array *tr, 311void trace_special(struct trace_array *tr,
216 struct trace_array_cpu *data, 312 struct trace_array_cpu *data,
217 unsigned long arg1, 313 unsigned long arg1,
218 unsigned long arg2, 314 unsigned long arg2,
219 unsigned long arg3); 315 unsigned long arg3, int pc);
220void trace_function(struct trace_array *tr, 316void trace_function(struct trace_array *tr,
221 struct trace_array_cpu *data, 317 struct trace_array_cpu *data,
222 unsigned long ip, 318 unsigned long ip,
223 unsigned long parent_ip, 319 unsigned long parent_ip,
224 unsigned long flags); 320 unsigned long flags, int pc);
225 321
226void tracing_start_cmdline_record(void); 322void tracing_start_cmdline_record(void);
227void tracing_stop_cmdline_record(void); 323void tracing_stop_cmdline_record(void);
@@ -268,51 +364,33 @@ extern unsigned long ftrace_update_tot_cnt;
268extern int DYN_FTRACE_TEST_NAME(void); 364extern int DYN_FTRACE_TEST_NAME(void);
269#endif 365#endif
270 366
271#ifdef CONFIG_MMIOTRACE
272extern void __trace_mmiotrace_rw(struct trace_array *tr,
273 struct trace_array_cpu *data,
274 struct mmiotrace_rw *rw);
275extern void __trace_mmiotrace_map(struct trace_array *tr,
276 struct trace_array_cpu *data,
277 struct mmiotrace_map *map);
278#endif
279
280#ifdef CONFIG_FTRACE_STARTUP_TEST 367#ifdef CONFIG_FTRACE_STARTUP_TEST
281#ifdef CONFIG_FTRACE
282extern int trace_selftest_startup_function(struct tracer *trace, 368extern int trace_selftest_startup_function(struct tracer *trace,
283 struct trace_array *tr); 369 struct trace_array *tr);
284#endif
285#ifdef CONFIG_IRQSOFF_TRACER
286extern int trace_selftest_startup_irqsoff(struct tracer *trace, 370extern int trace_selftest_startup_irqsoff(struct tracer *trace,
287 struct trace_array *tr); 371 struct trace_array *tr);
288#endif
289#ifdef CONFIG_PREEMPT_TRACER
290extern int trace_selftest_startup_preemptoff(struct tracer *trace, 372extern int trace_selftest_startup_preemptoff(struct tracer *trace,
291 struct trace_array *tr); 373 struct trace_array *tr);
292#endif
293#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
294extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 374extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
295 struct trace_array *tr); 375 struct trace_array *tr);
296#endif
297#ifdef CONFIG_SCHED_TRACER
298extern int trace_selftest_startup_wakeup(struct tracer *trace, 376extern int trace_selftest_startup_wakeup(struct tracer *trace,
299 struct trace_array *tr); 377 struct trace_array *tr);
300#endif 378extern int trace_selftest_startup_nop(struct tracer *trace,
301#ifdef CONFIG_CONTEXT_SWITCH_TRACER 379 struct trace_array *tr);
302extern int trace_selftest_startup_sched_switch(struct tracer *trace, 380extern int trace_selftest_startup_sched_switch(struct tracer *trace,
303 struct trace_array *tr); 381 struct trace_array *tr);
304#endif
305#ifdef CONFIG_SYSPROF_TRACER
306extern int trace_selftest_startup_sysprof(struct tracer *trace, 382extern int trace_selftest_startup_sysprof(struct tracer *trace,
307 struct trace_array *tr); 383 struct trace_array *tr);
308#endif
309#endif /* CONFIG_FTRACE_STARTUP_TEST */ 384#endif /* CONFIG_FTRACE_STARTUP_TEST */
310 385
311extern void *head_page(struct trace_array_cpu *data); 386extern void *head_page(struct trace_array_cpu *data);
312extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); 387extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
388extern void trace_seq_print_cont(struct trace_seq *s,
389 struct trace_iterator *iter);
313extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 390extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
314 size_t cnt); 391 size_t cnt);
315extern long ns2usecs(cycle_t nsec); 392extern long ns2usecs(cycle_t nsec);
393extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
316 394
317extern unsigned long trace_flags; 395extern unsigned long trace_flags;
318 396
@@ -334,6 +412,9 @@ enum trace_iterator_flags {
334 TRACE_ITER_BLOCK = 0x80, 412 TRACE_ITER_BLOCK = 0x80,
335 TRACE_ITER_STACKTRACE = 0x100, 413 TRACE_ITER_STACKTRACE = 0x100,
336 TRACE_ITER_SCHED_TREE = 0x200, 414 TRACE_ITER_SCHED_TREE = 0x200,
415 TRACE_ITER_PRINTK = 0x400,
337}; 416};
338 417
418extern struct tracer nop_trace;
419
339#endif /* _LINUX_KERNEL_TRACE_H */ 420#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
new file mode 100644
index 000000000000..d0a5e50eeff2
--- /dev/null
+++ b/kernel/trace/trace_boot.c
@@ -0,0 +1,126 @@
1/*
2 * ring buffer based initcalls tracer
3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 *
6 */
7
8#include <linux/init.h>
9#include <linux/debugfs.h>
10#include <linux/ftrace.h>
11#include <linux/kallsyms.h>
12
13#include "trace.h"
14
15static struct trace_array *boot_trace;
16static int trace_boot_enabled;
17
18
19/* Should be started after do_pre_smp_initcalls() in init/main.c */
20void start_boot_trace(void)
21{
22 trace_boot_enabled = 1;
23}
24
25void stop_boot_trace(void)
26{
27 trace_boot_enabled = 0;
28}
29
30void reset_boot_trace(struct trace_array *tr)
31{
32 stop_boot_trace();
33}
34
35static void boot_trace_init(struct trace_array *tr)
36{
37 int cpu;
38 boot_trace = tr;
39
40 trace_boot_enabled = 0;
41
42 for_each_cpu_mask(cpu, cpu_possible_map)
43 tracing_reset(tr, cpu);
44}
45
46static void boot_trace_ctrl_update(struct trace_array *tr)
47{
48 if (tr->ctrl)
49 start_boot_trace();
50 else
51 stop_boot_trace();
52}
53
54static enum print_line_t initcall_print_line(struct trace_iterator *iter)
55{
56 int ret;
57 struct trace_entry *entry = iter->ent;
58 struct trace_boot *field = (struct trace_boot *)entry;
59 struct boot_trace *it = &field->initcall;
60 struct trace_seq *s = &iter->seq;
61 struct timespec calltime = ktime_to_timespec(it->calltime);
62 struct timespec rettime = ktime_to_timespec(it->rettime);
63
64 if (entry->type == TRACE_BOOT) {
65 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
66 calltime.tv_sec,
67 calltime.tv_nsec,
68 it->func, it->caller);
69 if (!ret)
70 return TRACE_TYPE_PARTIAL_LINE;
71
72 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
73 "returned %d after %lld msecs\n",
74 rettime.tv_sec,
75 rettime.tv_nsec,
76 it->func, it->result, it->duration);
77
78 if (!ret)
79 return TRACE_TYPE_PARTIAL_LINE;
80 return TRACE_TYPE_HANDLED;
81 }
82 return TRACE_TYPE_UNHANDLED;
83}
84
85struct tracer boot_tracer __read_mostly =
86{
87 .name = "initcall",
88 .init = boot_trace_init,
89 .reset = reset_boot_trace,
90 .ctrl_update = boot_trace_ctrl_update,
91 .print_line = initcall_print_line,
92};
93
94void trace_boot(struct boot_trace *it, initcall_t fn)
95{
96 struct ring_buffer_event *event;
97 struct trace_boot *entry;
98 struct trace_array_cpu *data;
99 unsigned long irq_flags;
100 struct trace_array *tr = boot_trace;
101
102 if (!trace_boot_enabled)
103 return;
104
105 /* Get its name now since this function could
106 * disappear because it is in the .init section.
107 */
108 sprint_symbol(it->func, (unsigned long)fn);
109 preempt_disable();
110 data = tr->data[smp_processor_id()];
111
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
113 &irq_flags);
114 if (!event)
115 goto out;
116 entry = ring_buffer_event_data(event);
117 tracing_generic_entry_update(&entry->ent, 0, 0);
118 entry->ent.type = TRACE_BOOT;
119 entry->initcall = *it;
120 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
121
122 trace_wake_up();
123
124 out:
125 preempt_enable();
126}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 312144897970..e90eb0c2c56c 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -23,7 +23,7 @@ static void function_reset(struct trace_array *tr)
23 tr->time_start = ftrace_now(tr->cpu); 23 tr->time_start = ftrace_now(tr->cpu);
24 24
25 for_each_online_cpu(cpu) 25 for_each_online_cpu(cpu)
26 tracing_reset(tr->data[cpu]); 26 tracing_reset(tr, cpu);
27} 27}
28 28
29static void start_function_trace(struct trace_array *tr) 29static void start_function_trace(struct trace_array *tr)
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index ece6cfb649fa..a7db7f040ae0 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
95 disabled = atomic_inc_return(&data->disabled); 95 disabled = atomic_inc_return(&data->disabled);
96 96
97 if (likely(disabled == 1)) 97 if (likely(disabled == 1))
98 trace_function(tr, data, ip, parent_ip, flags); 98 trace_function(tr, data, ip, parent_ip, flags, preempt_count());
99 99
100 atomic_dec(&data->disabled); 100 atomic_dec(&data->disabled);
101} 101}
@@ -130,6 +130,7 @@ check_critical_timing(struct trace_array *tr,
130 unsigned long latency, t0, t1; 130 unsigned long latency, t0, t1;
131 cycle_t T0, T1, delta; 131 cycle_t T0, T1, delta;
132 unsigned long flags; 132 unsigned long flags;
133 int pc;
133 134
134 /* 135 /*
135 * usecs conversion is slow so we try to delay the conversion 136 * usecs conversion is slow so we try to delay the conversion
@@ -141,6 +142,8 @@ check_critical_timing(struct trace_array *tr,
141 142
142 local_save_flags(flags); 143 local_save_flags(flags);
143 144
145 pc = preempt_count();
146
144 if (!report_latency(delta)) 147 if (!report_latency(delta))
145 goto out; 148 goto out;
146 149
@@ -150,7 +153,7 @@ check_critical_timing(struct trace_array *tr,
150 if (!report_latency(delta)) 153 if (!report_latency(delta))
151 goto out_unlock; 154 goto out_unlock;
152 155
153 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); 156 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
154 157
155 latency = nsecs_to_usecs(delta); 158 latency = nsecs_to_usecs(delta);
156 159
@@ -173,8 +176,8 @@ out_unlock:
173out: 176out:
174 data->critical_sequence = max_sequence; 177 data->critical_sequence = max_sequence;
175 data->preempt_timestamp = ftrace_now(cpu); 178 data->preempt_timestamp = ftrace_now(cpu);
176 tracing_reset(data); 179 tracing_reset(tr, cpu);
177 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); 180 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
178} 181}
179 182
180static inline void 183static inline void
@@ -203,11 +206,11 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
203 data->critical_sequence = max_sequence; 206 data->critical_sequence = max_sequence;
204 data->preempt_timestamp = ftrace_now(cpu); 207 data->preempt_timestamp = ftrace_now(cpu);
205 data->critical_start = parent_ip ? : ip; 208 data->critical_start = parent_ip ? : ip;
206 tracing_reset(data); 209 tracing_reset(tr, cpu);
207 210
208 local_save_flags(flags); 211 local_save_flags(flags);
209 212
210 trace_function(tr, data, ip, parent_ip, flags); 213 trace_function(tr, data, ip, parent_ip, flags, preempt_count());
211 214
212 per_cpu(tracing_cpu, cpu) = 1; 215 per_cpu(tracing_cpu, cpu) = 1;
213 216
@@ -234,14 +237,14 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
234 237
235 data = tr->data[cpu]; 238 data = tr->data[cpu];
236 239
237 if (unlikely(!data) || unlikely(!head_page(data)) || 240 if (unlikely(!data) ||
238 !data->critical_start || atomic_read(&data->disabled)) 241 !data->critical_start || atomic_read(&data->disabled))
239 return; 242 return;
240 243
241 atomic_inc(&data->disabled); 244 atomic_inc(&data->disabled);
242 245
243 local_save_flags(flags); 246 local_save_flags(flags);
244 trace_function(tr, data, ip, parent_ip, flags); 247 trace_function(tr, data, ip, parent_ip, flags, preempt_count());
245 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 248 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
246 data->critical_start = 0; 249 data->critical_start = 0;
247 atomic_dec(&data->disabled); 250 atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index b13dc19dcbb4..f28484618ff0 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -27,7 +27,7 @@ static void mmio_reset_data(struct trace_array *tr)
27 tr->time_start = ftrace_now(tr->cpu); 27 tr->time_start = ftrace_now(tr->cpu);
28 28
29 for_each_online_cpu(cpu) 29 for_each_online_cpu(cpu)
30 tracing_reset(tr->data[cpu]); 30 tracing_reset(tr, cpu);
31} 31}
32 32
33static void mmio_trace_init(struct trace_array *tr) 33static void mmio_trace_init(struct trace_array *tr)
@@ -130,10 +130,14 @@ static unsigned long count_overruns(struct trace_iterator *iter)
130{ 130{
131 int cpu; 131 int cpu;
132 unsigned long cnt = 0; 132 unsigned long cnt = 0;
133/* FIXME: */
134#if 0
133 for_each_online_cpu(cpu) { 135 for_each_online_cpu(cpu) {
134 cnt += iter->overrun[cpu]; 136 cnt += iter->overrun[cpu];
135 iter->overrun[cpu] = 0; 137 iter->overrun[cpu] = 0;
136 } 138 }
139#endif
140 (void)cpu;
137 return cnt; 141 return cnt;
138} 142}
139 143
@@ -171,17 +175,21 @@ print_out:
171 return (ret == -EBUSY) ? 0 : ret; 175 return (ret == -EBUSY) ? 0 : ret;
172} 176}
173 177
174static int mmio_print_rw(struct trace_iterator *iter) 178static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
175{ 179{
176 struct trace_entry *entry = iter->ent; 180 struct trace_entry *entry = iter->ent;
177 struct mmiotrace_rw *rw = &entry->mmiorw; 181 struct trace_mmiotrace_rw *field;
182 struct mmiotrace_rw *rw;
178 struct trace_seq *s = &iter->seq; 183 struct trace_seq *s = &iter->seq;
179 unsigned long long t = ns2usecs(entry->t); 184 unsigned long long t = ns2usecs(iter->ts);
180 unsigned long usec_rem = do_div(t, 1000000ULL); 185 unsigned long usec_rem = do_div(t, 1000000ULL);
181 unsigned secs = (unsigned long)t; 186 unsigned secs = (unsigned long)t;
182 int ret = 1; 187 int ret = 1;
183 188
184 switch (entry->mmiorw.opcode) { 189 trace_assign_type(field, entry);
190 rw = &field->rw;
191
192 switch (rw->opcode) {
185 case MMIO_READ: 193 case MMIO_READ:
186 ret = trace_seq_printf(s, 194 ret = trace_seq_printf(s,
187 "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", 195 "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
@@ -209,21 +217,25 @@ static int mmio_print_rw(struct trace_iterator *iter)
209 break; 217 break;
210 } 218 }
211 if (ret) 219 if (ret)
212 return 1; 220 return TRACE_TYPE_HANDLED;
213 return 0; 221 return TRACE_TYPE_PARTIAL_LINE;
214} 222}
215 223
216static int mmio_print_map(struct trace_iterator *iter) 224static enum print_line_t mmio_print_map(struct trace_iterator *iter)
217{ 225{
218 struct trace_entry *entry = iter->ent; 226 struct trace_entry *entry = iter->ent;
219 struct mmiotrace_map *m = &entry->mmiomap; 227 struct trace_mmiotrace_map *field;
228 struct mmiotrace_map *m;
220 struct trace_seq *s = &iter->seq; 229 struct trace_seq *s = &iter->seq;
221 unsigned long long t = ns2usecs(entry->t); 230 unsigned long long t = ns2usecs(iter->ts);
222 unsigned long usec_rem = do_div(t, 1000000ULL); 231 unsigned long usec_rem = do_div(t, 1000000ULL);
223 unsigned secs = (unsigned long)t; 232 unsigned secs = (unsigned long)t;
224 int ret = 1; 233 int ret;
225 234
226 switch (entry->mmiorw.opcode) { 235 trace_assign_type(field, entry);
236 m = &field->map;
237
238 switch (m->opcode) {
227 case MMIO_PROBE: 239 case MMIO_PROBE:
228 ret = trace_seq_printf(s, 240 ret = trace_seq_printf(s,
229 "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", 241 "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
@@ -241,20 +253,43 @@ static int mmio_print_map(struct trace_iterator *iter)
241 break; 253 break;
242 } 254 }
243 if (ret) 255 if (ret)
244 return 1; 256 return TRACE_TYPE_HANDLED;
245 return 0; 257 return TRACE_TYPE_PARTIAL_LINE;
258}
259
260static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
261{
262 struct trace_entry *entry = iter->ent;
263 struct print_entry *print = (struct print_entry *)entry;
264 const char *msg = print->buf;
265 struct trace_seq *s = &iter->seq;
266 unsigned long long t = ns2usecs(iter->ts);
267 unsigned long usec_rem = do_div(t, 1000000ULL);
268 unsigned secs = (unsigned long)t;
269 int ret;
270
271 /* The trailing newline must be in the message. */
272 ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
273 if (!ret)
274 return TRACE_TYPE_PARTIAL_LINE;
275
276 if (entry->flags & TRACE_FLAG_CONT)
277 trace_seq_print_cont(s, iter);
278
279 return TRACE_TYPE_HANDLED;
246} 280}
247 281
248/* return 0 to abort printing without consuming current entry in pipe mode */ 282static enum print_line_t mmio_print_line(struct trace_iterator *iter)
249static int mmio_print_line(struct trace_iterator *iter)
250{ 283{
251 switch (iter->ent->type) { 284 switch (iter->ent->type) {
252 case TRACE_MMIO_RW: 285 case TRACE_MMIO_RW:
253 return mmio_print_rw(iter); 286 return mmio_print_rw(iter);
254 case TRACE_MMIO_MAP: 287 case TRACE_MMIO_MAP:
255 return mmio_print_map(iter); 288 return mmio_print_map(iter);
289 case TRACE_PRINT:
290 return mmio_print_mark(iter);
256 default: 291 default:
257 return 1; /* ignore unknown entries */ 292 return TRACE_TYPE_HANDLED; /* ignore unknown entries */
258 } 293 }
259} 294}
260 295
@@ -276,6 +311,27 @@ __init static int init_mmio_trace(void)
276} 311}
277device_initcall(init_mmio_trace); 312device_initcall(init_mmio_trace);
278 313
314static void __trace_mmiotrace_rw(struct trace_array *tr,
315 struct trace_array_cpu *data,
316 struct mmiotrace_rw *rw)
317{
318 struct ring_buffer_event *event;
319 struct trace_mmiotrace_rw *entry;
320 unsigned long irq_flags;
321
322 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
323 &irq_flags);
324 if (!event)
325 return;
326 entry = ring_buffer_event_data(event);
327 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
328 entry->ent.type = TRACE_MMIO_RW;
329 entry->rw = *rw;
330 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
331
332 trace_wake_up();
333}
334
279void mmio_trace_rw(struct mmiotrace_rw *rw) 335void mmio_trace_rw(struct mmiotrace_rw *rw)
280{ 336{
281 struct trace_array *tr = mmio_trace_array; 337 struct trace_array *tr = mmio_trace_array;
@@ -283,6 +339,27 @@ void mmio_trace_rw(struct mmiotrace_rw *rw)
283 __trace_mmiotrace_rw(tr, data, rw); 339 __trace_mmiotrace_rw(tr, data, rw);
284} 340}
285 341
342static void __trace_mmiotrace_map(struct trace_array *tr,
343 struct trace_array_cpu *data,
344 struct mmiotrace_map *map)
345{
346 struct ring_buffer_event *event;
347 struct trace_mmiotrace_map *entry;
348 unsigned long irq_flags;
349
350 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
351 &irq_flags);
352 if (!event)
353 return;
354 entry = ring_buffer_event_data(event);
355 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
356 entry->ent.type = TRACE_MMIO_MAP;
357 entry->map = *map;
358 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
359
360 trace_wake_up();
361}
362
286void mmio_trace_mapping(struct mmiotrace_map *map) 363void mmio_trace_mapping(struct mmiotrace_map *map)
287{ 364{
288 struct trace_array *tr = mmio_trace_array; 365 struct trace_array *tr = mmio_trace_array;
@@ -293,3 +370,8 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
293 __trace_mmiotrace_map(tr, data, map); 370 __trace_mmiotrace_map(tr, data, map);
294 preempt_enable(); 371 preempt_enable();
295} 372}
373
374int mmio_trace_printk(const char *fmt, va_list args)
375{
376 return trace_vprintk(0, fmt, args);
377}
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
new file mode 100644
index 000000000000..4592b4862515
--- /dev/null
+++ b/kernel/trace/trace_nop.c
@@ -0,0 +1,64 @@
1/*
2 * nop tracer
3 *
4 * Copyright (C) 2008 Steven Noonan <steven@uplinklabs.net>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12
13#include "trace.h"
14
15static struct trace_array *ctx_trace;
16
17static void start_nop_trace(struct trace_array *tr)
18{
19 /* Nothing to do! */
20}
21
22static void stop_nop_trace(struct trace_array *tr)
23{
24 /* Nothing to do! */
25}
26
27static void nop_trace_init(struct trace_array *tr)
28{
29 int cpu;
30 ctx_trace = tr;
31
32 for_each_online_cpu(cpu)
33 tracing_reset(tr, cpu);
34
35 if (tr->ctrl)
36 start_nop_trace(tr);
37}
38
39static void nop_trace_reset(struct trace_array *tr)
40{
41 if (tr->ctrl)
42 stop_nop_trace(tr);
43}
44
45static void nop_trace_ctrl_update(struct trace_array *tr)
46{
47 /* When starting a new trace, reset the buffers */
48 if (tr->ctrl)
49 start_nop_trace(tr);
50 else
51 stop_nop_trace(tr);
52}
53
54struct tracer nop_trace __read_mostly =
55{
56 .name = "nop",
57 .init = nop_trace_init,
58 .reset = nop_trace_reset,
59 .ctrl_update = nop_trace_ctrl_update,
60#ifdef CONFIG_FTRACE_SELFTEST
61 .selftest = trace_selftest_startup_nop,
62#endif
63};
64
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index cb817a209aa0..b8f56beb1a62 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -9,8 +9,8 @@
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <linux/uaccess.h> 11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h> 12#include <linux/ftrace.h>
13#include <trace/sched.h>
14 14
15#include "trace.h" 15#include "trace.h"
16 16
@@ -19,15 +19,16 @@ static int __read_mostly tracer_enabled;
19static atomic_t sched_ref; 19static atomic_t sched_ref;
20 20
21static void 21static void
22sched_switch_func(void *private, void *__rq, struct task_struct *prev, 22probe_sched_switch(struct rq *__rq, struct task_struct *prev,
23 struct task_struct *next) 23 struct task_struct *next)
24{ 24{
25 struct trace_array **ptr = private;
26 struct trace_array *tr = *ptr;
27 struct trace_array_cpu *data; 25 struct trace_array_cpu *data;
28 unsigned long flags; 26 unsigned long flags;
29 long disabled;
30 int cpu; 27 int cpu;
28 int pc;
29
30 if (!atomic_read(&sched_ref))
31 return;
31 32
32 tracing_record_cmdline(prev); 33 tracing_record_cmdline(prev);
33 tracing_record_cmdline(next); 34 tracing_record_cmdline(next);
@@ -35,97 +36,41 @@ sched_switch_func(void *private, void *__rq, struct task_struct *prev,
35 if (!tracer_enabled) 36 if (!tracer_enabled)
36 return; 37 return;
37 38
39 pc = preempt_count();
38 local_irq_save(flags); 40 local_irq_save(flags);
39 cpu = raw_smp_processor_id(); 41 cpu = raw_smp_processor_id();
40 data = tr->data[cpu]; 42 data = ctx_trace->data[cpu];
41 disabled = atomic_inc_return(&data->disabled);
42 43
43 if (likely(disabled == 1)) 44 if (likely(!atomic_read(&data->disabled)))
44 tracing_sched_switch_trace(tr, data, prev, next, flags); 45 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
45 46
46 atomic_dec(&data->disabled);
47 local_irq_restore(flags); 47 local_irq_restore(flags);
48} 48}
49 49
50static notrace void
51sched_switch_callback(void *probe_data, void *call_data,
52 const char *format, va_list *args)
53{
54 struct task_struct *prev;
55 struct task_struct *next;
56 struct rq *__rq;
57
58 if (!atomic_read(&sched_ref))
59 return;
60
61 /* skip prev_pid %d next_pid %d prev_state %ld */
62 (void)va_arg(*args, int);
63 (void)va_arg(*args, int);
64 (void)va_arg(*args, long);
65 __rq = va_arg(*args, typeof(__rq));
66 prev = va_arg(*args, typeof(prev));
67 next = va_arg(*args, typeof(next));
68
69 /*
70 * If tracer_switch_func only points to the local
71 * switch func, it still needs the ptr passed to it.
72 */
73 sched_switch_func(probe_data, __rq, prev, next);
74}
75
76static void 50static void
77wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct 51probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
78 task_struct *curr)
79{ 52{
80 struct trace_array **ptr = private;
81 struct trace_array *tr = *ptr;
82 struct trace_array_cpu *data; 53 struct trace_array_cpu *data;
83 unsigned long flags; 54 unsigned long flags;
84 long disabled; 55 int cpu, pc;
85 int cpu;
86 56
87 if (!tracer_enabled) 57 if (!likely(tracer_enabled))
88 return; 58 return;
89 59
90 tracing_record_cmdline(curr); 60 pc = preempt_count();
61 tracing_record_cmdline(current);
91 62
92 local_irq_save(flags); 63 local_irq_save(flags);
93 cpu = raw_smp_processor_id(); 64 cpu = raw_smp_processor_id();
94 data = tr->data[cpu]; 65 data = ctx_trace->data[cpu];
95 disabled = atomic_inc_return(&data->disabled);
96 66
97 if (likely(disabled == 1)) 67 if (likely(!atomic_read(&data->disabled)))
98 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); 68 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
69 flags, pc);
99 70
100 atomic_dec(&data->disabled);
101 local_irq_restore(flags); 71 local_irq_restore(flags);
102} 72}
103 73
104static notrace void
105wake_up_callback(void *probe_data, void *call_data,
106 const char *format, va_list *args)
107{
108 struct task_struct *curr;
109 struct task_struct *task;
110 struct rq *__rq;
111
112 if (likely(!tracer_enabled))
113 return;
114
115 /* Skip pid %d state %ld */
116 (void)va_arg(*args, int);
117 (void)va_arg(*args, long);
118 /* now get the meat: "rq %p task %p rq->curr %p" */
119 __rq = va_arg(*args, typeof(__rq));
120 task = va_arg(*args, typeof(task));
121 curr = va_arg(*args, typeof(curr));
122
123 tracing_record_cmdline(task);
124 tracing_record_cmdline(curr);
125
126 wakeup_func(probe_data, __rq, task, curr);
127}
128
129static void sched_switch_reset(struct trace_array *tr) 74static void sched_switch_reset(struct trace_array *tr)
130{ 75{
131 int cpu; 76 int cpu;
@@ -133,67 +78,47 @@ static void sched_switch_reset(struct trace_array *tr)
133 tr->time_start = ftrace_now(tr->cpu); 78 tr->time_start = ftrace_now(tr->cpu);
134 79
135 for_each_online_cpu(cpu) 80 for_each_online_cpu(cpu)
136 tracing_reset(tr->data[cpu]); 81 tracing_reset(tr, cpu);
137} 82}
138 83
139static int tracing_sched_register(void) 84static int tracing_sched_register(void)
140{ 85{
141 int ret; 86 int ret;
142 87
143 ret = marker_probe_register("kernel_sched_wakeup", 88 ret = register_trace_sched_wakeup(probe_sched_wakeup);
144 "pid %d state %ld ## rq %p task %p rq->curr %p",
145 wake_up_callback,
146 &ctx_trace);
147 if (ret) { 89 if (ret) {
148 pr_info("wakeup trace: Couldn't add marker" 90 pr_info("wakeup trace: Couldn't activate tracepoint"
149 " probe to kernel_sched_wakeup\n"); 91 " probe to kernel_sched_wakeup\n");
150 return ret; 92 return ret;
151 } 93 }
152 94
153 ret = marker_probe_register("kernel_sched_wakeup_new", 95 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
154 "pid %d state %ld ## rq %p task %p rq->curr %p",
155 wake_up_callback,
156 &ctx_trace);
157 if (ret) { 96 if (ret) {
158 pr_info("wakeup trace: Couldn't add marker" 97 pr_info("wakeup trace: Couldn't activate tracepoint"
159 " probe to kernel_sched_wakeup_new\n"); 98 " probe to kernel_sched_wakeup_new\n");
160 goto fail_deprobe; 99 goto fail_deprobe;
161 } 100 }
162 101
163 ret = marker_probe_register("kernel_sched_schedule", 102 ret = register_trace_sched_switch(probe_sched_switch);
164 "prev_pid %d next_pid %d prev_state %ld "
165 "## rq %p prev %p next %p",
166 sched_switch_callback,
167 &ctx_trace);
168 if (ret) { 103 if (ret) {
169 pr_info("sched trace: Couldn't add marker" 104 pr_info("sched trace: Couldn't activate tracepoint"
170 " probe to kernel_sched_schedule\n"); 105 " probe to kernel_sched_schedule\n");
171 goto fail_deprobe_wake_new; 106 goto fail_deprobe_wake_new;
172 } 107 }
173 108
174 return ret; 109 return ret;
175fail_deprobe_wake_new: 110fail_deprobe_wake_new:
176 marker_probe_unregister("kernel_sched_wakeup_new", 111 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
177 wake_up_callback,
178 &ctx_trace);
179fail_deprobe: 112fail_deprobe:
180 marker_probe_unregister("kernel_sched_wakeup", 113 unregister_trace_sched_wakeup(probe_sched_wakeup);
181 wake_up_callback,
182 &ctx_trace);
183 return ret; 114 return ret;
184} 115}
185 116
186static void tracing_sched_unregister(void) 117static void tracing_sched_unregister(void)
187{ 118{
188 marker_probe_unregister("kernel_sched_schedule", 119 unregister_trace_sched_switch(probe_sched_switch);
189 sched_switch_callback, 120 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
190 &ctx_trace); 121 unregister_trace_sched_wakeup(probe_sched_wakeup);
191 marker_probe_unregister("kernel_sched_wakeup_new",
192 wake_up_callback,
193 &ctx_trace);
194 marker_probe_unregister("kernel_sched_wakeup",
195 wake_up_callback,
196 &ctx_trace);
197} 122}
198 123
199static void tracing_start_sched_switch(void) 124static void tracing_start_sched_switch(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e303ccb62cdf..fe4a252c2363 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -15,7 +15,7 @@
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/marker.h> 18#include <trace/sched.h>
19 19
20#include "trace.h" 20#include "trace.h"
21 21
@@ -44,10 +44,12 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
44 long disabled; 44 long disabled;
45 int resched; 45 int resched;
46 int cpu; 46 int cpu;
47 int pc;
47 48
48 if (likely(!wakeup_task)) 49 if (likely(!wakeup_task))
49 return; 50 return;
50 51
52 pc = preempt_count();
51 resched = need_resched(); 53 resched = need_resched();
52 preempt_disable_notrace(); 54 preempt_disable_notrace();
53 55
@@ -70,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
70 if (task_cpu(wakeup_task) != cpu) 72 if (task_cpu(wakeup_task) != cpu)
71 goto unlock; 73 goto unlock;
72 74
73 trace_function(tr, data, ip, parent_ip, flags); 75 trace_function(tr, data, ip, parent_ip, flags, pc);
74 76
75 unlock: 77 unlock:
76 __raw_spin_unlock(&wakeup_lock); 78 __raw_spin_unlock(&wakeup_lock);
@@ -112,17 +114,18 @@ static int report_latency(cycle_t delta)
112} 114}
113 115
114static void notrace 116static void notrace
115wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, 117probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
116 struct task_struct *next) 118 struct task_struct *next)
117{ 119{
118 unsigned long latency = 0, t0 = 0, t1 = 0; 120 unsigned long latency = 0, t0 = 0, t1 = 0;
119 struct trace_array **ptr = private;
120 struct trace_array *tr = *ptr;
121 struct trace_array_cpu *data; 121 struct trace_array_cpu *data;
122 cycle_t T0, T1, delta; 122 cycle_t T0, T1, delta;
123 unsigned long flags; 123 unsigned long flags;
124 long disabled; 124 long disabled;
125 int cpu; 125 int cpu;
126 int pc;
127
128 tracing_record_cmdline(prev);
126 129
127 if (unlikely(!tracer_enabled)) 130 if (unlikely(!tracer_enabled))
128 return; 131 return;
@@ -139,12 +142,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
139 if (next != wakeup_task) 142 if (next != wakeup_task)
140 return; 143 return;
141 144
145 pc = preempt_count();
146
142 /* The task we are waiting for is waking up */ 147 /* The task we are waiting for is waking up */
143 data = tr->data[wakeup_cpu]; 148 data = wakeup_trace->data[wakeup_cpu];
144 149
145 /* disable local data, not wakeup_cpu data */ 150 /* disable local data, not wakeup_cpu data */
146 cpu = raw_smp_processor_id(); 151 cpu = raw_smp_processor_id();
147 disabled = atomic_inc_return(&tr->data[cpu]->disabled); 152 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
148 if (likely(disabled != 1)) 153 if (likely(disabled != 1))
149 goto out; 154 goto out;
150 155
@@ -155,7 +160,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
155 if (unlikely(!tracer_enabled || next != wakeup_task)) 160 if (unlikely(!tracer_enabled || next != wakeup_task))
156 goto out_unlock; 161 goto out_unlock;
157 162
158 trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags); 163 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
159 164
160 /* 165 /*
161 * usecs conversion is slow so we try to delay the conversion 166 * usecs conversion is slow so we try to delay the conversion
@@ -174,39 +179,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
174 t0 = nsecs_to_usecs(T0); 179 t0 = nsecs_to_usecs(T0);
175 t1 = nsecs_to_usecs(T1); 180 t1 = nsecs_to_usecs(T1);
176 181
177 update_max_tr(tr, wakeup_task, wakeup_cpu); 182 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
178 183
179out_unlock: 184out_unlock:
180 __wakeup_reset(tr); 185 __wakeup_reset(wakeup_trace);
181 __raw_spin_unlock(&wakeup_lock); 186 __raw_spin_unlock(&wakeup_lock);
182 local_irq_restore(flags); 187 local_irq_restore(flags);
183out: 188out:
184 atomic_dec(&tr->data[cpu]->disabled); 189 atomic_dec(&wakeup_trace->data[cpu]->disabled);
185}
186
187static notrace void
188sched_switch_callback(void *probe_data, void *call_data,
189 const char *format, va_list *args)
190{
191 struct task_struct *prev;
192 struct task_struct *next;
193 struct rq *__rq;
194
195 /* skip prev_pid %d next_pid %d prev_state %ld */
196 (void)va_arg(*args, int);
197 (void)va_arg(*args, int);
198 (void)va_arg(*args, long);
199 __rq = va_arg(*args, typeof(__rq));
200 prev = va_arg(*args, typeof(prev));
201 next = va_arg(*args, typeof(next));
202
203 tracing_record_cmdline(prev);
204
205 /*
206 * If tracer_switch_func only points to the local
207 * switch func, it still needs the ptr passed to it.
208 */
209 wakeup_sched_switch(probe_data, __rq, prev, next);
210} 190}
211 191
212static void __wakeup_reset(struct trace_array *tr) 192static void __wakeup_reset(struct trace_array *tr)
@@ -216,7 +196,7 @@ static void __wakeup_reset(struct trace_array *tr)
216 196
217 for_each_possible_cpu(cpu) { 197 for_each_possible_cpu(cpu) {
218 data = tr->data[cpu]; 198 data = tr->data[cpu];
219 tracing_reset(data); 199 tracing_reset(tr, cpu);
220 } 200 }
221 201
222 wakeup_cpu = -1; 202 wakeup_cpu = -1;
@@ -240,19 +220,26 @@ static void wakeup_reset(struct trace_array *tr)
240} 220}
241 221
242static void 222static void
243wakeup_check_start(struct trace_array *tr, struct task_struct *p, 223probe_wakeup(struct rq *rq, struct task_struct *p)
244 struct task_struct *curr)
245{ 224{
246 int cpu = smp_processor_id(); 225 int cpu = smp_processor_id();
247 unsigned long flags; 226 unsigned long flags;
248 long disabled; 227 long disabled;
228 int pc;
229
230 if (likely(!tracer_enabled))
231 return;
232
233 tracing_record_cmdline(p);
234 tracing_record_cmdline(current);
249 235
250 if (likely(!rt_task(p)) || 236 if (likely(!rt_task(p)) ||
251 p->prio >= wakeup_prio || 237 p->prio >= wakeup_prio ||
252 p->prio >= curr->prio) 238 p->prio >= current->prio)
253 return; 239 return;
254 240
255 disabled = atomic_inc_return(&tr->data[cpu]->disabled); 241 pc = preempt_count();
242 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
256 if (unlikely(disabled != 1)) 243 if (unlikely(disabled != 1))
257 goto out; 244 goto out;
258 245
@@ -264,7 +251,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
264 goto out_locked; 251 goto out_locked;
265 252
266 /* reset the trace */ 253 /* reset the trace */
267 __wakeup_reset(tr); 254 __wakeup_reset(wakeup_trace);
268 255
269 wakeup_cpu = task_cpu(p); 256 wakeup_cpu = task_cpu(p);
270 wakeup_prio = p->prio; 257 wakeup_prio = p->prio;
@@ -274,74 +261,37 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
274 261
275 local_save_flags(flags); 262 local_save_flags(flags);
276 263
277 tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); 264 wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
278 trace_function(tr, tr->data[wakeup_cpu], 265 trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
279 CALLER_ADDR1, CALLER_ADDR2, flags); 266 CALLER_ADDR1, CALLER_ADDR2, flags, pc);
280 267
281out_locked: 268out_locked:
282 __raw_spin_unlock(&wakeup_lock); 269 __raw_spin_unlock(&wakeup_lock);
283out: 270out:
284 atomic_dec(&tr->data[cpu]->disabled); 271 atomic_dec(&wakeup_trace->data[cpu]->disabled);
285}
286
287static notrace void
288wake_up_callback(void *probe_data, void *call_data,
289 const char *format, va_list *args)
290{
291 struct trace_array **ptr = probe_data;
292 struct trace_array *tr = *ptr;
293 struct task_struct *curr;
294 struct task_struct *task;
295 struct rq *__rq;
296
297 if (likely(!tracer_enabled))
298 return;
299
300 /* Skip pid %d state %ld */
301 (void)va_arg(*args, int);
302 (void)va_arg(*args, long);
303 /* now get the meat: "rq %p task %p rq->curr %p" */
304 __rq = va_arg(*args, typeof(__rq));
305 task = va_arg(*args, typeof(task));
306 curr = va_arg(*args, typeof(curr));
307
308 tracing_record_cmdline(task);
309 tracing_record_cmdline(curr);
310
311 wakeup_check_start(tr, task, curr);
312} 272}
313 273
314static void start_wakeup_tracer(struct trace_array *tr) 274static void start_wakeup_tracer(struct trace_array *tr)
315{ 275{
316 int ret; 276 int ret;
317 277
318 ret = marker_probe_register("kernel_sched_wakeup", 278 ret = register_trace_sched_wakeup(probe_wakeup);
319 "pid %d state %ld ## rq %p task %p rq->curr %p",
320 wake_up_callback,
321 &wakeup_trace);
322 if (ret) { 279 if (ret) {
323 pr_info("wakeup trace: Couldn't add marker" 280 pr_info("wakeup trace: Couldn't activate tracepoint"
324 " probe to kernel_sched_wakeup\n"); 281 " probe to kernel_sched_wakeup\n");
325 return; 282 return;
326 } 283 }
327 284
328 ret = marker_probe_register("kernel_sched_wakeup_new", 285 ret = register_trace_sched_wakeup_new(probe_wakeup);
329 "pid %d state %ld ## rq %p task %p rq->curr %p",
330 wake_up_callback,
331 &wakeup_trace);
332 if (ret) { 286 if (ret) {
333 pr_info("wakeup trace: Couldn't add marker" 287 pr_info("wakeup trace: Couldn't activate tracepoint"
334 " probe to kernel_sched_wakeup_new\n"); 288 " probe to kernel_sched_wakeup_new\n");
335 goto fail_deprobe; 289 goto fail_deprobe;
336 } 290 }
337 291
338 ret = marker_probe_register("kernel_sched_schedule", 292 ret = register_trace_sched_switch(probe_wakeup_sched_switch);
339 "prev_pid %d next_pid %d prev_state %ld "
340 "## rq %p prev %p next %p",
341 sched_switch_callback,
342 &wakeup_trace);
343 if (ret) { 293 if (ret) {
344 pr_info("sched trace: Couldn't add marker" 294 pr_info("sched trace: Couldn't activate tracepoint"
345 " probe to kernel_sched_schedule\n"); 295 " probe to kernel_sched_schedule\n");
346 goto fail_deprobe_wake_new; 296 goto fail_deprobe_wake_new;
347 } 297 }
@@ -363,28 +313,18 @@ static void start_wakeup_tracer(struct trace_array *tr)
363 313
364 return; 314 return;
365fail_deprobe_wake_new: 315fail_deprobe_wake_new:
366 marker_probe_unregister("kernel_sched_wakeup_new", 316 unregister_trace_sched_wakeup_new(probe_wakeup);
367 wake_up_callback,
368 &wakeup_trace);
369fail_deprobe: 317fail_deprobe:
370 marker_probe_unregister("kernel_sched_wakeup", 318 unregister_trace_sched_wakeup(probe_wakeup);
371 wake_up_callback,
372 &wakeup_trace);
373} 319}
374 320
375static void stop_wakeup_tracer(struct trace_array *tr) 321static void stop_wakeup_tracer(struct trace_array *tr)
376{ 322{
377 tracer_enabled = 0; 323 tracer_enabled = 0;
378 unregister_ftrace_function(&trace_ops); 324 unregister_ftrace_function(&trace_ops);
379 marker_probe_unregister("kernel_sched_schedule", 325 unregister_trace_sched_switch(probe_wakeup_sched_switch);
380 sched_switch_callback, 326 unregister_trace_sched_wakeup_new(probe_wakeup);
381 &wakeup_trace); 327 unregister_trace_sched_wakeup(probe_wakeup);
382 marker_probe_unregister("kernel_sched_wakeup_new",
383 wake_up_callback,
384 &wakeup_trace);
385 marker_probe_unregister("kernel_sched_wakeup",
386 wake_up_callback,
387 &wakeup_trace);
388} 328}
389 329
390static void wakeup_tracer_init(struct trace_array *tr) 330static void wakeup_tracer_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 0911b7e073bf..09cf230d7eca 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -9,65 +9,29 @@ static inline int trace_valid_entry(struct trace_entry *entry)
9 case TRACE_FN: 9 case TRACE_FN:
10 case TRACE_CTX: 10 case TRACE_CTX:
11 case TRACE_WAKE: 11 case TRACE_WAKE:
12 case TRACE_CONT:
12 case TRACE_STACK: 13 case TRACE_STACK:
14 case TRACE_PRINT:
13 case TRACE_SPECIAL: 15 case TRACE_SPECIAL:
14 return 1; 16 return 1;
15 } 17 }
16 return 0; 18 return 0;
17} 19}
18 20
19static int 21static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
20trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
21{ 22{
22 struct trace_entry *entries; 23 struct ring_buffer_event *event;
23 struct page *page; 24 struct trace_entry *entry;
24 int idx = 0;
25 int i;
26 25
27 BUG_ON(list_empty(&data->trace_pages)); 26 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 page = list_entry(data->trace_pages.next, struct page, lru); 27 entry = ring_buffer_event_data(event);
29 entries = page_address(page);
30 28
31 check_pages(data); 29 if (!trace_valid_entry(entry)) {
32 if (head_page(data) != entries)
33 goto failed;
34
35 /*
36 * The starting trace buffer always has valid elements,
37 * if any element exists.
38 */
39 entries = head_page(data);
40
41 for (i = 0; i < tr->entries; i++) {
42
43 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
44 printk(KERN_CONT ".. invalid entry %d ", 30 printk(KERN_CONT ".. invalid entry %d ",
45 entries[idx].type); 31 entry->type);
46 goto failed; 32 goto failed;
47 } 33 }
48
49 idx++;
50 if (idx >= ENTRIES_PER_PAGE) {
51 page = virt_to_page(entries);
52 if (page->lru.next == &data->trace_pages) {
53 if (i != tr->entries - 1) {
54 printk(KERN_CONT ".. entries buffer mismatch");
55 goto failed;
56 }
57 } else {
58 page = list_entry(page->lru.next, struct page, lru);
59 entries = page_address(page);
60 }
61 idx = 0;
62 }
63 } 34 }
64
65 page = virt_to_page(entries);
66 if (page->lru.next != &data->trace_pages) {
67 printk(KERN_CONT ".. too many entries");
68 goto failed;
69 }
70
71 return 0; 35 return 0;
72 36
73 failed: 37 failed:
@@ -89,13 +53,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
89 /* Don't allow flipping of max traces now */ 53 /* Don't allow flipping of max traces now */
90 raw_local_irq_save(flags); 54 raw_local_irq_save(flags);
91 __raw_spin_lock(&ftrace_max_lock); 55 __raw_spin_lock(&ftrace_max_lock);
92 for_each_possible_cpu(cpu) {
93 if (!head_page(tr->data[cpu]))
94 continue;
95 56
96 cnt += tr->data[cpu]->trace_idx; 57 cnt = ring_buffer_entries(tr->buffer);
97 58
98 ret = trace_test_buffer_cpu(tr, tr->data[cpu]); 59 for_each_possible_cpu(cpu) {
60 ret = trace_test_buffer_cpu(tr, cpu);
99 if (ret) 61 if (ret)
100 break; 62 break;
101 } 63 }
@@ -120,11 +82,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
120 struct trace_array *tr, 82 struct trace_array *tr,
121 int (*func)(void)) 83 int (*func)(void))
122{ 84{
123 unsigned long count;
124 int ret;
125 int save_ftrace_enabled = ftrace_enabled; 85 int save_ftrace_enabled = ftrace_enabled;
126 int save_tracer_enabled = tracer_enabled; 86 int save_tracer_enabled = tracer_enabled;
87 unsigned long count;
127 char *func_name; 88 char *func_name;
89 int ret;
128 90
129 /* The ftrace test PASSED */ 91 /* The ftrace test PASSED */
130 printk(KERN_CONT "PASSED\n"); 92 printk(KERN_CONT "PASSED\n");
@@ -157,6 +119,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
157 /* enable tracing */ 119 /* enable tracing */
158 tr->ctrl = 1; 120 tr->ctrl = 1;
159 trace->init(tr); 121 trace->init(tr);
122
160 /* Sleep for a 1/10 of a second */ 123 /* Sleep for a 1/10 of a second */
161 msleep(100); 124 msleep(100);
162 125
@@ -212,10 +175,10 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
212int 175int
213trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 176trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
214{ 177{
215 unsigned long count;
216 int ret;
217 int save_ftrace_enabled = ftrace_enabled; 178 int save_ftrace_enabled = ftrace_enabled;
218 int save_tracer_enabled = tracer_enabled; 179 int save_tracer_enabled = tracer_enabled;
180 unsigned long count;
181 int ret;
219 182
220 /* make sure msleep has been recorded */ 183 /* make sure msleep has been recorded */
221 msleep(1); 184 msleep(1);
@@ -415,6 +378,15 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
415} 378}
416#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 379#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
417 380
381#ifdef CONFIG_NOP_TRACER
382int
383trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
384{
385 /* What could possibly go wrong? */
386 return 0;
387}
388#endif
389
418#ifdef CONFIG_SCHED_TRACER 390#ifdef CONFIG_SCHED_TRACER
419static int trace_wakeup_test_thread(void *data) 391static int trace_wakeup_test_thread(void *data)
420{ 392{
@@ -486,6 +458,9 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
486 458
487 wake_up_process(p); 459 wake_up_process(p);
488 460
461 /* give a little time to let the thread wake up */
462 msleep(100);
463
489 /* stop the tracing. */ 464 /* stop the tracing. */
490 tr->ctrl = 0; 465 tr->ctrl = 0;
491 trace->ctrl_update(tr); 466 trace->ctrl_update(tr);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
new file mode 100644
index 000000000000..74c5d9a3afae
--- /dev/null
+++ b/kernel/trace/trace_stack.c
@@ -0,0 +1,310 @@
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include "trace.h"
16
17#define STACK_TRACE_ENTRIES 500
18
19static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
22
23static struct stack_trace max_stack_trace = {
24 .max_entries = STACK_TRACE_ENTRIES,
25 .entries = stack_dump_trace,
26};
27
28static unsigned long max_stack_size;
29static raw_spinlock_t max_stack_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
31
32static int stack_trace_disabled __read_mostly;
33static DEFINE_PER_CPU(int, trace_active);
34
35static inline void check_stack(void)
36{
37 unsigned long this_size, flags;
38 unsigned long *p, *top, *start;
39 int i;
40
41 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
42 this_size = THREAD_SIZE - this_size;
43
44 if (this_size <= max_stack_size)
45 return;
46
47 raw_local_irq_save(flags);
48 __raw_spin_lock(&max_stack_lock);
49
50 /* a race could have already updated it */
51 if (this_size <= max_stack_size)
52 goto out;
53
54 max_stack_size = this_size;
55
56 max_stack_trace.nr_entries = 0;
57 max_stack_trace.skip = 3;
58
59 save_stack_trace(&max_stack_trace);
60
61 /*
62 * Now find where in the stack these are.
63 */
64 i = 0;
65 start = &this_size;
66 top = (unsigned long *)
67 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
68
69 /*
70 * Loop through all the entries. One of the entries may
71 * for some reason be missed on the stack, so we may
72 * have to account for them. If they are all there, this
73 * loop will only happen once. This code only takes place
74 * on a new max, so it is far from a fast path.
75 */
76 while (i < max_stack_trace.nr_entries) {
77
78 stack_dump_index[i] = this_size;
79 p = start;
80
81 for (; p < top && i < max_stack_trace.nr_entries; p++) {
82 if (*p == stack_dump_trace[i]) {
83 this_size = stack_dump_index[i++] =
84 (top - p) * sizeof(unsigned long);
85 /* Start the search from here */
86 start = p + 1;
87 }
88 }
89
90 i++;
91 }
92
93 out:
94 __raw_spin_unlock(&max_stack_lock);
95 raw_local_irq_restore(flags);
96}
97
98static void
99stack_trace_call(unsigned long ip, unsigned long parent_ip)
100{
101 int cpu, resched;
102
103 if (unlikely(!ftrace_enabled || stack_trace_disabled))
104 return;
105
106 resched = need_resched();
107 preempt_disable_notrace();
108
109 cpu = raw_smp_processor_id();
110 /* no atomic needed, we only modify this variable by this cpu */
111 if (per_cpu(trace_active, cpu)++ != 0)
112 goto out;
113
114 check_stack();
115
116 out:
117 per_cpu(trace_active, cpu)--;
118 /* prevent recursion in schedule */
119 if (resched)
120 preempt_enable_no_resched_notrace();
121 else
122 preempt_enable_notrace();
123}
124
125static struct ftrace_ops trace_ops __read_mostly =
126{
127 .func = stack_trace_call,
128};
129
130static ssize_t
131stack_max_size_read(struct file *filp, char __user *ubuf,
132 size_t count, loff_t *ppos)
133{
134 unsigned long *ptr = filp->private_data;
135 char buf[64];
136 int r;
137
138 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
139 if (r > sizeof(buf))
140 r = sizeof(buf);
141 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
142}
143
144static ssize_t
145stack_max_size_write(struct file *filp, const char __user *ubuf,
146 size_t count, loff_t *ppos)
147{
148 long *ptr = filp->private_data;
149 unsigned long val, flags;
150 char buf[64];
151 int ret;
152
153 if (count >= sizeof(buf))
154 return -EINVAL;
155
156 if (copy_from_user(&buf, ubuf, count))
157 return -EFAULT;
158
159 buf[count] = 0;
160
161 ret = strict_strtoul(buf, 10, &val);
162 if (ret < 0)
163 return ret;
164
165 raw_local_irq_save(flags);
166 __raw_spin_lock(&max_stack_lock);
167 *ptr = val;
168 __raw_spin_unlock(&max_stack_lock);
169 raw_local_irq_restore(flags);
170
171 return count;
172}
173
174static struct file_operations stack_max_size_fops = {
175 .open = tracing_open_generic,
176 .read = stack_max_size_read,
177 .write = stack_max_size_write,
178};
179
180static void *
181t_next(struct seq_file *m, void *v, loff_t *pos)
182{
183 long i = (long)m->private;
184
185 (*pos)++;
186
187 i++;
188
189 if (i >= max_stack_trace.nr_entries ||
190 stack_dump_trace[i] == ULONG_MAX)
191 return NULL;
192
193 m->private = (void *)i;
194
195 return &m->private;
196}
197
198static void *t_start(struct seq_file *m, loff_t *pos)
199{
200 void *t = &m->private;
201 loff_t l = 0;
202
203 local_irq_disable();
204 __raw_spin_lock(&max_stack_lock);
205
206 for (; t && l < *pos; t = t_next(m, t, &l))
207 ;
208
209 return t;
210}
211
212static void t_stop(struct seq_file *m, void *p)
213{
214 __raw_spin_unlock(&max_stack_lock);
215 local_irq_enable();
216}
217
218static int trace_lookup_stack(struct seq_file *m, long i)
219{
220 unsigned long addr = stack_dump_trace[i];
221#ifdef CONFIG_KALLSYMS
222 char str[KSYM_SYMBOL_LEN];
223
224 sprint_symbol(str, addr);
225
226 return seq_printf(m, "%s\n", str);
227#else
228 return seq_printf(m, "%p\n", (void*)addr);
229#endif
230}
231
232static int t_show(struct seq_file *m, void *v)
233{
234 long i = *(long *)v;
235 int size;
236
237 if (i < 0) {
238 seq_printf(m, " Depth Size Location"
239 " (%d entries)\n"
240 " ----- ---- --------\n",
241 max_stack_trace.nr_entries);
242 return 0;
243 }
244
245 if (i >= max_stack_trace.nr_entries ||
246 stack_dump_trace[i] == ULONG_MAX)
247 return 0;
248
249 if (i+1 == max_stack_trace.nr_entries ||
250 stack_dump_trace[i+1] == ULONG_MAX)
251 size = stack_dump_index[i];
252 else
253 size = stack_dump_index[i] - stack_dump_index[i+1];
254
255 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
256
257 trace_lookup_stack(m, i);
258
259 return 0;
260}
261
262static struct seq_operations stack_trace_seq_ops = {
263 .start = t_start,
264 .next = t_next,
265 .stop = t_stop,
266 .show = t_show,
267};
268
269static int stack_trace_open(struct inode *inode, struct file *file)
270{
271 int ret;
272
273 ret = seq_open(file, &stack_trace_seq_ops);
274 if (!ret) {
275 struct seq_file *m = file->private_data;
276 m->private = (void *)-1;
277 }
278
279 return ret;
280}
281
282static struct file_operations stack_trace_fops = {
283 .open = stack_trace_open,
284 .read = seq_read,
285 .llseek = seq_lseek,
286};
287
288static __init int stack_trace_init(void)
289{
290 struct dentry *d_tracer;
291 struct dentry *entry;
292
293 d_tracer = tracing_init_dentry();
294
295 entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
296 &max_stack_size, &stack_max_size_fops);
297 if (!entry)
298 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
299
300 entry = debugfs_create_file("stack_trace", 0444, d_tracer,
301 NULL, &stack_trace_fops);
302 if (!entry)
303 pr_warning("Could not create debugfs 'stack_trace' entry\n");
304
305 register_ftrace_function(&trace_ops);
306
307 return 0;
308}
309
310device_initcall(stack_trace_init);
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index db58fb66a135..9587d3bcba55 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -241,7 +241,7 @@ static void stack_reset(struct trace_array *tr)
241 tr->time_start = ftrace_now(tr->cpu); 241 tr->time_start = ftrace_now(tr->cpu);
242 242
243 for_each_online_cpu(cpu) 243 for_each_online_cpu(cpu)
244 tracing_reset(tr->data[cpu]); 244 tracing_reset(tr, cpu);
245} 245}
246 246
247static void start_stack_trace(struct trace_array *tr) 247static void start_stack_trace(struct trace_array *tr)
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
new file mode 100644
index 000000000000..f2b7c28a4708
--- /dev/null
+++ b/kernel/tracepoint.c
@@ -0,0 +1,477 @@
1/*
2 * Copyright (C) 2008 Mathieu Desnoyers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/types.h>
21#include <linux/jhash.h>
22#include <linux/list.h>
23#include <linux/rcupdate.h>
24#include <linux/tracepoint.h>
25#include <linux/err.h>
26#include <linux/slab.h>
27
28extern struct tracepoint __start___tracepoints[];
29extern struct tracepoint __stop___tracepoints[];
30
31/* Set to 1 to enable tracepoint debug output */
32static const int tracepoint_debug;
33
34/*
35 * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
36 * builtin and module tracepoints and the hash table.
37 */
38static DEFINE_MUTEX(tracepoints_mutex);
39
40/*
41 * Tracepoint hash table, containing the active tracepoints.
42 * Protected by tracepoints_mutex.
43 */
44#define TRACEPOINT_HASH_BITS 6
45#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
46
47/*
48 * Note about RCU :
49 * It is used to to delay the free of multiple probes array until a quiescent
50 * state is reached.
51 * Tracepoint entries modifications are protected by the tracepoints_mutex.
52 */
53struct tracepoint_entry {
54 struct hlist_node hlist;
55 void **funcs;
56 int refcount; /* Number of times armed. 0 if disarmed. */
57 struct rcu_head rcu;
58 void *oldptr;
59 unsigned char rcu_pending:1;
60 char name[0];
61};
62
63static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
64
65static void free_old_closure(struct rcu_head *head)
66{
67 struct tracepoint_entry *entry = container_of(head,
68 struct tracepoint_entry, rcu);
69 kfree(entry->oldptr);
70 /* Make sure we free the data before setting the pending flag to 0 */
71 smp_wmb();
72 entry->rcu_pending = 0;
73}
74
75static void tracepoint_entry_free_old(struct tracepoint_entry *entry, void *old)
76{
77 if (!old)
78 return;
79 entry->oldptr = old;
80 entry->rcu_pending = 1;
81 /* write rcu_pending before calling the RCU callback */
82 smp_wmb();
83 call_rcu_sched(&entry->rcu, free_old_closure);
84}
85
86static void debug_print_probes(struct tracepoint_entry *entry)
87{
88 int i;
89
90 if (!tracepoint_debug)
91 return;
92
93 for (i = 0; entry->funcs[i]; i++)
94 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]);
95}
96
97static void *
98tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
99{
100 int nr_probes = 0;
101 void **old, **new;
102
103 WARN_ON(!probe);
104
105 debug_print_probes(entry);
106 old = entry->funcs;
107 if (old) {
108 /* (N -> N+1), (N != 0, 1) probes */
109 for (nr_probes = 0; old[nr_probes]; nr_probes++)
110 if (old[nr_probes] == probe)
111 return ERR_PTR(-EEXIST);
112 }
113 /* + 2 : one for new probe, one for NULL func */
114 new = kzalloc((nr_probes + 2) * sizeof(void *), GFP_KERNEL);
115 if (new == NULL)
116 return ERR_PTR(-ENOMEM);
117 if (old)
118 memcpy(new, old, nr_probes * sizeof(void *));
119 new[nr_probes] = probe;
120 entry->refcount = nr_probes + 1;
121 entry->funcs = new;
122 debug_print_probes(entry);
123 return old;
124}
125
126static void *
127tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
128{
129 int nr_probes = 0, nr_del = 0, i;
130 void **old, **new;
131
132 old = entry->funcs;
133
134 debug_print_probes(entry);
135 /* (N -> M), (N > 1, M >= 0) probes */
136 for (nr_probes = 0; old[nr_probes]; nr_probes++) {
137 if ((!probe || old[nr_probes] == probe))
138 nr_del++;
139 }
140
141 if (nr_probes - nr_del == 0) {
142 /* N -> 0, (N > 1) */
143 entry->funcs = NULL;
144 entry->refcount = 0;
145 debug_print_probes(entry);
146 return old;
147 } else {
148 int j = 0;
149 /* N -> M, (N > 1, M > 0) */
150 /* + 1 for NULL */
151 new = kzalloc((nr_probes - nr_del + 1)
152 * sizeof(void *), GFP_KERNEL);
153 if (new == NULL)
154 return ERR_PTR(-ENOMEM);
155 for (i = 0; old[i]; i++)
156 if ((probe && old[i] != probe))
157 new[j++] = old[i];
158 entry->refcount = nr_probes - nr_del;
159 entry->funcs = new;
160 }
161 debug_print_probes(entry);
162 return old;
163}
164
165/*
166 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
167 * Must be called with tracepoints_mutex held.
168 * Returns NULL if not present.
169 */
170static struct tracepoint_entry *get_tracepoint(const char *name)
171{
172 struct hlist_head *head;
173 struct hlist_node *node;
174 struct tracepoint_entry *e;
175 u32 hash = jhash(name, strlen(name), 0);
176
177 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
178 hlist_for_each_entry(e, node, head, hlist) {
179 if (!strcmp(name, e->name))
180 return e;
181 }
182 return NULL;
183}
184
185/*
186 * Add the tracepoint to the tracepoint hash table. Must be called with
187 * tracepoints_mutex held.
188 */
189static struct tracepoint_entry *add_tracepoint(const char *name)
190{
191 struct hlist_head *head;
192 struct hlist_node *node;
193 struct tracepoint_entry *e;
194 size_t name_len = strlen(name) + 1;
195 u32 hash = jhash(name, name_len-1, 0);
196
197 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
198 hlist_for_each_entry(e, node, head, hlist) {
199 if (!strcmp(name, e->name)) {
200 printk(KERN_NOTICE
201 "tracepoint %s busy\n", name);
202 return ERR_PTR(-EEXIST); /* Already there */
203 }
204 }
205 /*
206 * Using kmalloc here to allocate a variable length element. Could
207 * cause some memory fragmentation if overused.
208 */
209 e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
210 if (!e)
211 return ERR_PTR(-ENOMEM);
212 memcpy(&e->name[0], name, name_len);
213 e->funcs = NULL;
214 e->refcount = 0;
215 e->rcu_pending = 0;
216 hlist_add_head(&e->hlist, head);
217 return e;
218}
219
220/*
221 * Remove the tracepoint from the tracepoint hash table. Must be called with
222 * mutex_lock held.
223 */
224static int remove_tracepoint(const char *name)
225{
226 struct hlist_head *head;
227 struct hlist_node *node;
228 struct tracepoint_entry *e;
229 int found = 0;
230 size_t len = strlen(name) + 1;
231 u32 hash = jhash(name, len-1, 0);
232
233 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
234 hlist_for_each_entry(e, node, head, hlist) {
235 if (!strcmp(name, e->name)) {
236 found = 1;
237 break;
238 }
239 }
240 if (!found)
241 return -ENOENT;
242 if (e->refcount)
243 return -EBUSY;
244 hlist_del(&e->hlist);
245 /* Make sure the call_rcu_sched has been executed */
246 if (e->rcu_pending)
247 rcu_barrier_sched();
248 kfree(e);
249 return 0;
250}
251
252/*
253 * Sets the probe callback corresponding to one tracepoint.
254 */
255static void set_tracepoint(struct tracepoint_entry **entry,
256 struct tracepoint *elem, int active)
257{
258 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
259
260 /*
261 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
262 * probe callbacks array is consistent before setting a pointer to it.
263 * This array is referenced by __DO_TRACE from
264 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
265 * is used.
266 */
267 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
268 elem->state = active;
269}
270
271/*
272 * Disable a tracepoint and its probe callback.
273 * Note: only waiting an RCU period after setting elem->call to the empty
274 * function insures that the original callback is not used anymore. This insured
275 * by preempt_disable around the call site.
276 */
277static void disable_tracepoint(struct tracepoint *elem)
278{
279 elem->state = 0;
280}
281
282/**
283 * tracepoint_update_probe_range - Update a probe range
284 * @begin: beginning of the range
285 * @end: end of the range
286 *
287 * Updates the probe callback corresponding to a range of tracepoints.
288 */
289void tracepoint_update_probe_range(struct tracepoint *begin,
290 struct tracepoint *end)
291{
292 struct tracepoint *iter;
293 struct tracepoint_entry *mark_entry;
294
295 mutex_lock(&tracepoints_mutex);
296 for (iter = begin; iter < end; iter++) {
297 mark_entry = get_tracepoint(iter->name);
298 if (mark_entry) {
299 set_tracepoint(&mark_entry, iter,
300 !!mark_entry->refcount);
301 } else {
302 disable_tracepoint(iter);
303 }
304 }
305 mutex_unlock(&tracepoints_mutex);
306}
307
308/*
309 * Update probes, removing the faulty probes.
310 */
311static void tracepoint_update_probes(void)
312{
313 /* Core kernel tracepoints */
314 tracepoint_update_probe_range(__start___tracepoints,
315 __stop___tracepoints);
316 /* tracepoints in modules. */
317 module_update_tracepoints();
318}
319
320/**
321 * tracepoint_probe_register - Connect a probe to a tracepoint
322 * @name: tracepoint name
323 * @probe: probe handler
324 *
325 * Returns 0 if ok, error value on error.
326 * The probe address must at least be aligned on the architecture pointer size.
327 */
328int tracepoint_probe_register(const char *name, void *probe)
329{
330 struct tracepoint_entry *entry;
331 int ret = 0;
332 void *old;
333
334 mutex_lock(&tracepoints_mutex);
335 entry = get_tracepoint(name);
336 if (!entry) {
337 entry = add_tracepoint(name);
338 if (IS_ERR(entry)) {
339 ret = PTR_ERR(entry);
340 goto end;
341 }
342 }
343 /*
344 * If we detect that a call_rcu_sched is pending for this tracepoint,
345 * make sure it's executed now.
346 */
347 if (entry->rcu_pending)
348 rcu_barrier_sched();
349 old = tracepoint_entry_add_probe(entry, probe);
350 if (IS_ERR(old)) {
351 ret = PTR_ERR(old);
352 goto end;
353 }
354 mutex_unlock(&tracepoints_mutex);
355 tracepoint_update_probes(); /* may update entry */
356 mutex_lock(&tracepoints_mutex);
357 entry = get_tracepoint(name);
358 WARN_ON(!entry);
359 if (entry->rcu_pending)
360 rcu_barrier_sched();
361 tracepoint_entry_free_old(entry, old);
362end:
363 mutex_unlock(&tracepoints_mutex);
364 return ret;
365}
366EXPORT_SYMBOL_GPL(tracepoint_probe_register);
367
368/**
369 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
370 * @name: tracepoint name
371 * @probe: probe function pointer
372 *
373 * We do not need to call a synchronize_sched to make sure the probes have
374 * finished running before doing a module unload, because the module unload
375 * itself uses stop_machine(), which insures that every preempt disabled section
376 * have finished.
377 */
378int tracepoint_probe_unregister(const char *name, void *probe)
379{
380 struct tracepoint_entry *entry;
381 void *old;
382 int ret = -ENOENT;
383
384 mutex_lock(&tracepoints_mutex);
385 entry = get_tracepoint(name);
386 if (!entry)
387 goto end;
388 if (entry->rcu_pending)
389 rcu_barrier_sched();
390 old = tracepoint_entry_remove_probe(entry, probe);
391 mutex_unlock(&tracepoints_mutex);
392 tracepoint_update_probes(); /* may update entry */
393 mutex_lock(&tracepoints_mutex);
394 entry = get_tracepoint(name);
395 if (!entry)
396 goto end;
397 if (entry->rcu_pending)
398 rcu_barrier_sched();
399 tracepoint_entry_free_old(entry, old);
400 remove_tracepoint(name); /* Ignore busy error message */
401 ret = 0;
402end:
403 mutex_unlock(&tracepoints_mutex);
404 return ret;
405}
406EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
407
408/**
409 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
410 * @tracepoint: current tracepoints (in), next tracepoint (out)
411 * @begin: beginning of the range
412 * @end: end of the range
413 *
414 * Returns whether a next tracepoint has been found (1) or not (0).
415 * Will return the first tracepoint in the range if the input tracepoint is
416 * NULL.
417 */
418int tracepoint_get_iter_range(struct tracepoint **tracepoint,
419 struct tracepoint *begin, struct tracepoint *end)
420{
421 if (!*tracepoint && begin != end) {
422 *tracepoint = begin;
423 return 1;
424 }
425 if (*tracepoint >= begin && *tracepoint < end)
426 return 1;
427 return 0;
428}
429EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
430
431static void tracepoint_get_iter(struct tracepoint_iter *iter)
432{
433 int found = 0;
434
435 /* Core kernel tracepoints */
436 if (!iter->module) {
437 found = tracepoint_get_iter_range(&iter->tracepoint,
438 __start___tracepoints, __stop___tracepoints);
439 if (found)
440 goto end;
441 }
442 /* tracepoints in modules. */
443 found = module_get_iter_tracepoints(iter);
444end:
445 if (!found)
446 tracepoint_iter_reset(iter);
447}
448
449void tracepoint_iter_start(struct tracepoint_iter *iter)
450{
451 tracepoint_get_iter(iter);
452}
453EXPORT_SYMBOL_GPL(tracepoint_iter_start);
454
455void tracepoint_iter_next(struct tracepoint_iter *iter)
456{
457 iter->tracepoint++;
458 /*
459 * iter->tracepoint may be invalid because we blindly incremented it.
460 * Make sure it is valid by marshalling on the tracepoints, getting the
461 * tracepoints from following modules if necessary.
462 */
463 tracepoint_get_iter(iter);
464}
465EXPORT_SYMBOL_GPL(tracepoint_iter_next);
466
467void tracepoint_iter_stop(struct tracepoint_iter *iter)
468{
469}
470EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
471
472void tracepoint_iter_reset(struct tracepoint_iter *iter)
473{
474 iter->module = NULL;
475 iter->tracepoint = NULL;
476}
477EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
diff --git a/samples/Kconfig b/samples/Kconfig
index e1fb471cc501..4b02f5a0e656 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -13,6 +13,12 @@ config SAMPLE_MARKERS
13 help 13 help
14 This build markers example modules. 14 This build markers example modules.
15 15
16config SAMPLE_TRACEPOINTS
17 tristate "Build tracepoints examples -- loadable modules only"
18 depends on TRACEPOINTS && m
19 help
20 This build tracepoints example modules.
21
16config SAMPLE_KOBJECT 22config SAMPLE_KOBJECT
17 tristate "Build kobject examples" 23 tristate "Build kobject examples"
18 help 24 help
diff --git a/samples/Makefile b/samples/Makefile
index 2e02575f7794..10eaca89fe17 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -1,3 +1,3 @@
1# Makefile for Linux samples code 1# Makefile for Linux samples code
2 2
3obj-$(CONFIG_SAMPLES) += markers/ kobject/ kprobes/ 3obj-$(CONFIG_SAMPLES) += markers/ kobject/ kprobes/ tracepoints/
diff --git a/samples/markers/probe-example.c b/samples/markers/probe-example.c
index c8e099d4d1fd..2dfb3b32937e 100644
--- a/samples/markers/probe-example.c
+++ b/samples/markers/probe-example.c
@@ -81,6 +81,7 @@ static void __exit probe_fini(void)
81 probe_array[i].probe_func, &probe_array[i]); 81 probe_array[i].probe_func, &probe_array[i]);
82 printk(KERN_INFO "Number of event b : %u\n", 82 printk(KERN_INFO "Number of event b : %u\n",
83 atomic_read(&eventb_count)); 83 atomic_read(&eventb_count));
84 marker_synchronize_unregister();
84} 85}
85 86
86module_init(probe_init); 87module_init(probe_init);
diff --git a/samples/tracepoints/Makefile b/samples/tracepoints/Makefile
new file mode 100644
index 000000000000..36479ad9ae14
--- /dev/null
+++ b/samples/tracepoints/Makefile
@@ -0,0 +1,6 @@
1# builds the tracepoint example kernel modules;
2# then to use one (as root): insmod <module_name.ko>
3
4obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-sample.o
5obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample.o
6obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample2.o
diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h
new file mode 100644
index 000000000000..0216b55bd640
--- /dev/null
+++ b/samples/tracepoints/tp-samples-trace.h
@@ -0,0 +1,13 @@
1#ifndef _TP_SAMPLES_TRACE_H
2#define _TP_SAMPLES_TRACE_H
3
4#include <linux/proc_fs.h> /* for struct inode and struct file */
5#include <linux/tracepoint.h>
6
7DEFINE_TRACE(subsys_event,
8 TPPROTO(struct inode *inode, struct file *file),
9 TPARGS(inode, file));
10DEFINE_TRACE(subsys_eventb,
11 TPPROTO(void),
12 TPARGS());
13#endif
diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c
new file mode 100644
index 000000000000..55abfdda4bd4
--- /dev/null
+++ b/samples/tracepoints/tracepoint-probe-sample.c
@@ -0,0 +1,55 @@
1/*
2 * tracepoint-probe-sample.c
3 *
4 * sample tracepoint probes.
5 */
6
7#include <linux/module.h>
8#include <linux/file.h>
9#include <linux/dcache.h>
10#include "tp-samples-trace.h"
11
12/*
13 * Here the caller only guarantees locking for struct file and struct inode.
14 * Locking must therefore be done in the probe to use the dentry.
15 */
16static void probe_subsys_event(struct inode *inode, struct file *file)
17{
18 path_get(&file->f_path);
19 dget(file->f_path.dentry);
20 printk(KERN_INFO "Event is encountered with filename %s\n",
21 file->f_path.dentry->d_name.name);
22 dput(file->f_path.dentry);
23 path_put(&file->f_path);
24}
25
26static void probe_subsys_eventb(void)
27{
28 printk(KERN_INFO "Event B is encountered\n");
29}
30
31int __init tp_sample_trace_init(void)
32{
33 int ret;
34
35 ret = register_trace_subsys_event(probe_subsys_event);
36 WARN_ON(ret);
37 ret = register_trace_subsys_eventb(probe_subsys_eventb);
38 WARN_ON(ret);
39
40 return 0;
41}
42
43module_init(tp_sample_trace_init);
44
45void __exit tp_sample_trace_exit(void)
46{
47 unregister_trace_subsys_eventb(probe_subsys_eventb);
48 unregister_trace_subsys_event(probe_subsys_event);
49}
50
51module_exit(tp_sample_trace_exit);
52
53MODULE_LICENSE("GPL");
54MODULE_AUTHOR("Mathieu Desnoyers");
55MODULE_DESCRIPTION("Tracepoint Probes Samples");
diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c
new file mode 100644
index 000000000000..5e9fcf4afffe
--- /dev/null
+++ b/samples/tracepoints/tracepoint-probe-sample2.c
@@ -0,0 +1,42 @@
1/*
2 * tracepoint-probe-sample2.c
3 *
4 * 2nd sample tracepoint probes.
5 */
6
7#include <linux/module.h>
8#include <linux/fs.h>
9#include "tp-samples-trace.h"
10
11/*
12 * Here the caller only guarantees locking for struct file and struct inode.
13 * Locking must therefore be done in the probe to use the dentry.
14 */
15static void probe_subsys_event(struct inode *inode, struct file *file)
16{
17 printk(KERN_INFO "Event is encountered with inode number %lu\n",
18 inode->i_ino);
19}
20
21int __init tp_sample_trace_init(void)
22{
23 int ret;
24
25 ret = register_trace_subsys_event(probe_subsys_event);
26 WARN_ON(ret);
27
28 return 0;
29}
30
31module_init(tp_sample_trace_init);
32
33void __exit tp_sample_trace_exit(void)
34{
35 unregister_trace_subsys_event(probe_subsys_event);
36}
37
38module_exit(tp_sample_trace_exit);
39
40MODULE_LICENSE("GPL");
41MODULE_AUTHOR("Mathieu Desnoyers");
42MODULE_DESCRIPTION("Tracepoint Probes Samples");
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c
new file mode 100644
index 000000000000..4ae4b7fcc043
--- /dev/null
+++ b/samples/tracepoints/tracepoint-sample.c
@@ -0,0 +1,53 @@
1/* tracepoint-sample.c
2 *
3 * Executes a tracepoint when /proc/tracepoint-example is opened.
4 *
5 * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
6 *
7 * This file is released under the GPLv2.
8 * See the file COPYING for more details.
9 */
10
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/proc_fs.h>
14#include "tp-samples-trace.h"
15
16struct proc_dir_entry *pentry_example;
17
18static int my_open(struct inode *inode, struct file *file)
19{
20 int i;
21
22 trace_subsys_event(inode, file);
23 for (i = 0; i < 10; i++)
24 trace_subsys_eventb();
25 return -EPERM;
26}
27
28static struct file_operations mark_ops = {
29 .open = my_open,
30};
31
32static int example_init(void)
33{
34 printk(KERN_ALERT "example init\n");
35 pentry_example = proc_create("tracepoint-example", 0444, NULL,
36 &mark_ops);
37 if (!pentry_example)
38 return -EPERM;
39 return 0;
40}
41
42static void example_exit(void)
43{
44 printk(KERN_ALERT "example exit\n");
45 remove_proc_entry("tracepoint-example", NULL);
46}
47
48module_init(example_init)
49module_exit(example_exit)
50
51MODULE_LICENSE("GPL");
52MODULE_AUTHOR("Mathieu Desnoyers");
53MODULE_DESCRIPTION("Tracepoint example");
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 277cfe0b7100..5ed4cbf1e0e1 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -198,10 +198,17 @@ cmd_modversions = \
198 fi; 198 fi;
199endif 199endif
200 200
201ifdef CONFIG_FTRACE_MCOUNT_RECORD
202cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl \
203 "$(ARCH)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" \
204 "$(MV)" "$(@)";
205endif
206
201define rule_cc_o_c 207define rule_cc_o_c
202 $(call echo-cmd,checksrc) $(cmd_checksrc) \ 208 $(call echo-cmd,checksrc) $(cmd_checksrc) \
203 $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \ 209 $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \
204 $(cmd_modversions) \ 210 $(cmd_modversions) \
211 $(cmd_record_mcount) \
205 scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \ 212 scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \
206 $(dot-target).tmp; \ 213 $(dot-target).tmp; \
207 rm -f $(depfile); \ 214 rm -f $(depfile); \
diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
index 2243353fe55d..5e7316e5aa39 100644
--- a/scripts/bootgraph.pl
+++ b/scripts/bootgraph.pl
@@ -37,13 +37,13 @@
37# dmesg | perl scripts/bootgraph.pl > output.svg 37# dmesg | perl scripts/bootgraph.pl > output.svg
38# 38#
39 39
40my @rows; 40my %start, %end;
41my %start, %end, %row;
42my $done = 0; 41my $done = 0;
43my $rowcount = 0;
44my $maxtime = 0; 42my $maxtime = 0;
45my $firsttime = 100; 43my $firsttime = 100;
46my $count = 0; 44my $count = 0;
45my %pids;
46
47while (<>) { 47while (<>) {
48 my $line = $_; 48 my $line = $_;
49 if ($line =~ /([0-9\.]+)\] calling ([a-zA-Z0-9\_]+)\+/) { 49 if ($line =~ /([0-9\.]+)\] calling ([a-zA-Z0-9\_]+)\+/) {
@@ -54,14 +54,8 @@ while (<>) {
54 $firsttime = $1; 54 $firsttime = $1;
55 } 55 }
56 } 56 }
57 $row{$func} = 1;
58 if ($line =~ /\@ ([0-9]+)/) { 57 if ($line =~ /\@ ([0-9]+)/) {
59 my $pid = $1; 58 $pids{$func} = $1;
60 if (!defined($rows[$pid])) {
61 $rowcount = $rowcount + 1;
62 $rows[$pid] = $rowcount;
63 }
64 $row{$func} = $rows[$pid];
65 } 59 }
66 $count = $count + 1; 60 $count = $count + 1;
67 } 61 }
@@ -109,17 +103,25 @@ $styles[11] = "fill:rgb(128,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(
109my $mult = 950.0 / ($maxtime - $firsttime); 103my $mult = 950.0 / ($maxtime - $firsttime);
110my $threshold = ($maxtime - $firsttime) / 60.0; 104my $threshold = ($maxtime - $firsttime) / 60.0;
111my $stylecounter = 0; 105my $stylecounter = 0;
106my %rows;
107my $rowscount = 1;
112while (($key,$value) = each %start) { 108while (($key,$value) = each %start) {
113 my $duration = $end{$key} - $start{$key}; 109 my $duration = $end{$key} - $start{$key};
114 110
115 if ($duration >= $threshold) { 111 if ($duration >= $threshold) {
116 my $s, $s2, $e, $y; 112 my $s, $s2, $e, $y;
113 $pid = $pids{$key};
114
115 if (!defined($rows{$pid})) {
116 $rows{$pid} = $rowscount;
117 $rowscount = $rowscount + 1;
118 }
117 $s = ($value - $firsttime) * $mult; 119 $s = ($value - $firsttime) * $mult;
118 $s2 = $s + 6; 120 $s2 = $s + 6;
119 $e = ($end{$key} - $firsttime) * $mult; 121 $e = ($end{$key} - $firsttime) * $mult;
120 $w = $e - $s; 122 $w = $e - $s;
121 123
122 $y = $row{$key} * 150; 124 $y = $rows{$pid} * 150;
123 $y2 = $y + 4; 125 $y2 = $y + 4;
124 126
125 $style = $styles[$stylecounter]; 127 $style = $styles[$stylecounter];
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
new file mode 100755
index 000000000000..f56d760bd589
--- /dev/null
+++ b/scripts/recordmcount.pl
@@ -0,0 +1,395 @@
1#!/usr/bin/perl -w
2# (c) 2008, Steven Rostedt <srostedt@redhat.com>
3# Licensed under the terms of the GNU GPL License version 2
4#
5# recordmcount.pl - makes a section called __mcount_loc that holds
6# all the offsets to the calls to mcount.
7#
8#
9# What we want to end up with is a section in vmlinux called
10# __mcount_loc that contains a list of pointers to all the
11# call sites in the kernel that call mcount. Later on boot up, the kernel
12# will read this list, save the locations and turn them into nops.
13# When tracing or profiling is later enabled, these locations will then
14# be converted back to pointers to some function.
15#
16# This is no easy feat. This script is called just after the original
17# object is compiled and before it is linked.
18#
19# The references to the call sites are offsets from the section of text
20# that the call site is in. Hence, all functions in a section that
21# has a call site to mcount, will have the offset from the beginning of
22# the section and not the beginning of the function.
23#
24# The trick is to find a way to record the beginning of the section.
25# The way we do this is to look at the first function in the section
26# which will also be the location of that section after final link.
27# e.g.
28#
29# .section ".text.sched"
30# .globl my_func
31# my_func:
32# [...]
33# call mcount (offset: 0x5)
34# [...]
35# ret
36# other_func:
37# [...]
38# call mcount (offset: 0x1b)
39# [...]
40#
41# Both relocation offsets for the mcounts in the above example will be
42# offset from .text.sched. If we make another file called tmp.s with:
43#
44# .section __mcount_loc
45# .quad my_func + 0x5
46# .quad my_func + 0x1b
47#
48# We can then compile this tmp.s into tmp.o, and link it to the original
49# object.
50#
51# But this gets hard if my_func is not globl (a static function).
52# In such a case we have:
53#
54# .section ".text.sched"
55# my_func:
56# [...]
57# call mcount (offset: 0x5)
58# [...]
59# ret
60# .globl my_func
61# other_func:
62# [...]
63# call mcount (offset: 0x1b)
64# [...]
65#
66# If we make the tmp.s the same as above, when we link together with
67# the original object, we will end up with two symbols for my_func:
68# one local, one global. After final compile, we will end up with
69# an undefined reference to my_func.
70#
71# Since local objects can reference local variables, we need to find
72# a way to make tmp.o reference the local objects of the original object
73# file after it is linked together. To do this, we convert the my_func
74# into a global symbol before linking tmp.o. Then after we link tmp.o
75# we will only have a single symbol for my_func that is global.
76# We can convert my_func back into a local symbol and we are done.
77#
78# Here are the steps we take:
79#
80# 1) Record all the local symbols by using 'nm'
81# 2) Use objdump to find all the call site offsets and sections for
82# mcount.
83# 3) Compile the list into its own object.
84# 4) Do we have to deal with local functions? If not, go to step 8.
85# 5) Make an object that converts these local functions to global symbols
86# with objcopy.
87# 6) Link together this new object with the list object.
88# 7) Convert the local functions back to local symbols and rename
89# the result as the original object.
90# End.
91# 8) Link the object with the list object.
92# 9) Move the result back to the original object.
93# End.
94#
95
96use strict;
97
98my $P = $0;
99$P =~ s@.*/@@g;
100
101my $V = '0.1';
102
103if ($#ARGV < 6) {
104 print "usage: $P arch objdump objcopy cc ld nm rm mv inputfile\n";
105 print "version: $V\n";
106 exit(1);
107}
108
109my ($arch, $objdump, $objcopy, $cc, $ld, $nm, $rm, $mv, $inputfile) = @ARGV;
110
111$objdump = "objdump" if ((length $objdump) == 0);
112$objcopy = "objcopy" if ((length $objcopy) == 0);
113$cc = "gcc" if ((length $cc) == 0);
114$ld = "ld" if ((length $ld) == 0);
115$nm = "nm" if ((length $nm) == 0);
116$rm = "rm" if ((length $rm) == 0);
117$mv = "mv" if ((length $mv) == 0);
118
119#print STDERR "running: $P '$arch' '$objdump' '$objcopy' '$cc' '$ld' " .
120# "'$nm' '$rm' '$mv' '$inputfile'\n";
121
122my %locals; # List of local (static) functions
123my %weak; # List of weak functions
124my %convert; # List of local functions used that needs conversion
125
126my $type;
127my $section_regex; # Find the start of a section
128my $function_regex; # Find the name of a function
129 # (return offset and func name)
130my $mcount_regex; # Find the call site to mcount (return offset)
131
132if ($arch eq "x86_64") {
133 $section_regex = "Disassembly of section";
134 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
135 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$";
136 $type = ".quad";
137
138 # force flags for this arch
139 $ld .= " -m elf_x86_64";
140 $objdump .= " -M x86-64";
141 $objcopy .= " -O elf64-x86-64";
142 $cc .= " -m64";
143
144} elsif ($arch eq "i386") {
145 $section_regex = "Disassembly of section";
146 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
147 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
148 $type = ".long";
149
150 # force flags for this arch
151 $ld .= " -m elf_i386";
152 $objdump .= " -M i386";
153 $objcopy .= " -O elf32-i386";
154 $cc .= " -m32";
155
156} else {
157 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
158}
159
160my $text_found = 0;
161my $read_function = 0;
162my $opened = 0;
163my $mcount_section = "__mcount_loc";
164
165my $dirname;
166my $filename;
167my $prefix;
168my $ext;
169
170if ($inputfile =~ m,^(.*)/([^/]*)$,) {
171 $dirname = $1;
172 $filename = $2;
173} else {
174 $dirname = ".";
175 $filename = $inputfile;
176}
177
178if ($filename =~ m,^(.*)(\.\S),) {
179 $prefix = $1;
180 $ext = $2;
181} else {
182 $prefix = $filename;
183 $ext = "";
184}
185
186my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s";
187my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o";
188
189#
190# --globalize-symbols came out in 2.17, we must test the version
191# of objcopy, and if it is less than 2.17, then we can not
192# record local functions.
193my $use_locals = 01;
194my $local_warn_once = 0;
195my $found_version = 0;
196
197open (IN, "$objcopy --version |") || die "error running $objcopy";
198while (<IN>) {
199 if (/objcopy.*\s(\d+)\.(\d+)/) {
200 my $major = $1;
201 my $minor = $2;
202
203 $found_version = 1;
204 if ($major < 2 ||
205 ($major == 2 && $minor < 17)) {
206 $use_locals = 0;
207 }
208 last;
209 }
210}
211close (IN);
212
213if (!$found_version) {
214 print STDERR "WARNING: could not find objcopy version.\n" .
215 "\tDisabling local function references.\n";
216}
217
218
219#
220# Step 1: find all the local (static functions) and weak symbols.
221# 't' is local, 'w/W' is weak (we never use a weak function)
222#
223open (IN, "$nm $inputfile|") || die "error running $nm";
224while (<IN>) {
225 if (/^[0-9a-fA-F]+\s+t\s+(\S+)/) {
226 $locals{$1} = 1;
227 } elsif (/^[0-9a-fA-F]+\s+([wW])\s+(\S+)/) {
228 $weak{$2} = $1;
229 }
230}
231close(IN);
232
233my @offsets; # Array of offsets of mcount callers
234my $ref_func; # reference function to use for offsets
235my $offset = 0; # offset of ref_func to section beginning
236
237##
238# update_funcs - print out the current mcount callers
239#
240# Go through the list of offsets to callers and write them to
241# the output file in a format that can be read by an assembler.
242#
243sub update_funcs
244{
245 return if ($#offsets < 0);
246
247 defined($ref_func) || die "No function to reference";
248
249 # A section only had a weak function, to represent it.
250 # Unfortunately, a weak function may be overwritten by another
251 # function of the same name, making all these offsets incorrect.
252 # To be safe, we simply print a warning and bail.
253 if (defined $weak{$ref_func}) {
254 print STDERR
255 "$inputfile: WARNING: referencing weak function" .
256 " $ref_func for mcount\n";
257 return;
258 }
259
260 # is this function static? If so, note this fact.
261 if (defined $locals{$ref_func}) {
262
263 # only use locals if objcopy supports globalize-symbols
264 if (!$use_locals) {
265 return;
266 }
267 $convert{$ref_func} = 1;
268 }
269
270 # Loop through all the mcount caller offsets and print a reference
271 # to the caller based from the ref_func.
272 for (my $i=0; $i <= $#offsets; $i++) {
273 if (!$opened) {
274 open(FILE, ">$mcount_s") || die "can't create $mcount_s\n";
275 $opened = 1;
276 print FILE "\t.section $mcount_section,\"a\",\@progbits\n";
277 }
278 printf FILE "\t%s %s + %d\n", $type, $ref_func, $offsets[$i] - $offset;
279 }
280}
281
282#
283# Step 2: find the sections and mcount call sites
284#
285open(IN, "$objdump -dr $inputfile|") || die "error running $objdump";
286
287my $text;
288
289while (<IN>) {
290 # is it a section?
291 if (/$section_regex/) {
292 $read_function = 1;
293 # print out any recorded offsets
294 update_funcs() if ($text_found);
295
296 # reset all markers and arrays
297 $text_found = 0;
298 undef($ref_func);
299 undef(@offsets);
300
301 # section found, now is this a start of a function?
302 } elsif ($read_function && /$function_regex/) {
303 $text_found = 1;
304 $offset = hex $1;
305 $text = $2;
306
307 # if this is either a local function or a weak function
308 # keep looking for functions that are global that
309 # we can use safely.
310 if (!defined($locals{$text}) && !defined($weak{$text})) {
311 $ref_func = $text;
312 $read_function = 0;
313 } else {
314 # if we already have a function, and this is weak, skip it
315 if (!defined($ref_func) || !defined($weak{$text})) {
316 $ref_func = $text;
317 }
318 }
319 }
320
321 # is this a call site to mcount? If so, record it to print later
322 if ($text_found && /$mcount_regex/) {
323 $offsets[$#offsets + 1] = hex $1;
324 }
325}
326
327# dump out anymore offsets that may have been found
328update_funcs() if ($text_found);
329
330# If we did not find any mcount callers, we are done (do nothing).
331if (!$opened) {
332 exit(0);
333}
334
335close(FILE);
336
337#
338# Step 3: Compile the file that holds the list of call sites to mcount.
339#
340`$cc -o $mcount_o -c $mcount_s`;
341
342my @converts = keys %convert;
343
344#
345# Step 4: Do we have sections that started with local functions?
346#
347if ($#converts >= 0) {
348 my $globallist = "";
349 my $locallist = "";
350
351 foreach my $con (@converts) {
352 $globallist .= " --globalize-symbol $con";
353 $locallist .= " --localize-symbol $con";
354 }
355
356 my $globalobj = $dirname . "/.tmp_gl_" . $filename;
357 my $globalmix = $dirname . "/.tmp_mx_" . $filename;
358
359 #
360 # Step 5: set up each local function as a global
361 #
362 `$objcopy $globallist $inputfile $globalobj`;
363
364 #
365 # Step 6: Link the global version to our list.
366 #
367 `$ld -r $globalobj $mcount_o -o $globalmix`;
368
369 #
370 # Step 7: Convert the local functions back into local symbols
371 #
372 `$objcopy $locallist $globalmix $inputfile`;
373
374 # Remove the temp files
375 `$rm $globalobj $globalmix`;
376
377} else {
378
379 my $mix = $dirname . "/.tmp_mx_" . $filename;
380
381 #
382 # Step 8: Link the object with our list of call sites object.
383 #
384 `$ld -r $inputfile $mcount_o -o $mix`;
385
386 #
387 # Step 9: Move the result back to the original object.
388 #
389 `$mv $mix $inputfile`;
390}
391
392# Clean up the temp files
393`$rm $mcount_o $mcount_s`;
394
395exit(0);