diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-12 06:43:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-12 06:43:05 -0500 |
commit | 81444a799550214f549caf579cf65a0ca55e70b7 (patch) | |
tree | 3288dac0740be2e1e7d1af4ee51d792a6e91edf3 /kernel | |
parent | a64d31baed104be25305e9c71585d3ea4ee9a418 (diff) | |
parent | da485e0cb16726797e99a595a399b9fc721b91bc (diff) |
Merge branch 'tracing/fastboot' into cpus4096
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 4 | ||||
-rw-r--r-- | kernel/extable.c | 5 | ||||
-rw-r--r-- | kernel/fork.c | 11 | ||||
-rw-r--r-- | kernel/lockdep.c | 1 | ||||
-rw-r--r-- | kernel/module.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 42 | ||||
-rw-r--r-- | kernel/trace/Makefile | 4 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 663 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 311 | ||||
-rw-r--r-- | kernel/trace/trace.c | 148 | ||||
-rw-r--r-- | kernel/trace/trace.h | 103 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_bts.c | 276 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 611 | ||||
-rw-r--r-- | kernel/trace/trace_functions_return.c | 98 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_power.c | 179 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 13 |
19 files changed, 2132 insertions, 348 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 010ccb311166..6a212b842d86 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -20,10 +20,6 @@ CFLAGS_REMOVE_rtmutex-debug.o = -pg | |||
20 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 20 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
21 | CFLAGS_REMOVE_sched_clock.o = -pg | 21 | CFLAGS_REMOVE_sched_clock.o = -pg |
22 | endif | 22 | endif |
23 | ifdef CONFIG_FUNCTION_RET_TRACER | ||
24 | CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address() | ||
25 | CFLAGS_REMOVE_module.o = -pg # For __module_text_address() | ||
26 | endif | ||
27 | 23 | ||
28 | obj-$(CONFIG_FREEZER) += freezer.o | 24 | obj-$(CONFIG_FREEZER) += freezer.o |
29 | obj-$(CONFIG_PROFILING) += profile.o | 25 | obj-$(CONFIG_PROFILING) += profile.o |
diff --git a/kernel/extable.c b/kernel/extable.c index adf0cc9c02d6..e136ed8d82ba 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/ftrace.h> | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/sections.h> | 22 | #include <asm/sections.h> |
22 | 23 | ||
@@ -40,7 +41,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) | |||
40 | return e; | 41 | return e; |
41 | } | 42 | } |
42 | 43 | ||
43 | int core_kernel_text(unsigned long addr) | 44 | __notrace_funcgraph int core_kernel_text(unsigned long addr) |
44 | { | 45 | { |
45 | if (addr >= (unsigned long)_stext && | 46 | if (addr >= (unsigned long)_stext && |
46 | addr <= (unsigned long)_etext) | 47 | addr <= (unsigned long)_etext) |
@@ -53,7 +54,7 @@ int core_kernel_text(unsigned long addr) | |||
53 | return 0; | 54 | return 0; |
54 | } | 55 | } |
55 | 56 | ||
56 | int __kernel_text_address(unsigned long addr) | 57 | __notrace_funcgraph int __kernel_text_address(unsigned long addr) |
57 | { | 58 | { |
58 | if (core_kernel_text(addr)) | 59 | if (core_kernel_text(addr)) |
59 | return 1; | 60 | return 1; |
diff --git a/kernel/fork.c b/kernel/fork.c index d6e1a3205f62..7407ab319875 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -140,7 +140,7 @@ void free_task(struct task_struct *tsk) | |||
140 | prop_local_destroy_single(&tsk->dirties); | 140 | prop_local_destroy_single(&tsk->dirties); |
141 | free_thread_info(tsk->stack); | 141 | free_thread_info(tsk->stack); |
142 | rt_mutex_debug_task_free(tsk); | 142 | rt_mutex_debug_task_free(tsk); |
143 | ftrace_retfunc_exit_task(tsk); | 143 | ftrace_graph_exit_task(tsk); |
144 | free_task_struct(tsk); | 144 | free_task_struct(tsk); |
145 | } | 145 | } |
146 | EXPORT_SYMBOL(free_task); | 146 | EXPORT_SYMBOL(free_task); |
@@ -1137,6 +1137,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1137 | } | 1137 | } |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | ftrace_graph_init_task(p); | ||
1141 | |||
1140 | p->pid = pid_nr(pid); | 1142 | p->pid = pid_nr(pid); |
1141 | p->tgid = p->pid; | 1143 | p->tgid = p->pid; |
1142 | if (clone_flags & CLONE_THREAD) | 1144 | if (clone_flags & CLONE_THREAD) |
@@ -1145,7 +1147,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1145 | if (current->nsproxy != p->nsproxy) { | 1147 | if (current->nsproxy != p->nsproxy) { |
1146 | retval = ns_cgroup_clone(p, pid); | 1148 | retval = ns_cgroup_clone(p, pid); |
1147 | if (retval) | 1149 | if (retval) |
1148 | goto bad_fork_free_pid; | 1150 | goto bad_fork_free_graph; |
1149 | } | 1151 | } |
1150 | 1152 | ||
1151 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1153 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
@@ -1238,7 +1240,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1238 | spin_unlock(¤t->sighand->siglock); | 1240 | spin_unlock(¤t->sighand->siglock); |
1239 | write_unlock_irq(&tasklist_lock); | 1241 | write_unlock_irq(&tasklist_lock); |
1240 | retval = -ERESTARTNOINTR; | 1242 | retval = -ERESTARTNOINTR; |
1241 | goto bad_fork_free_pid; | 1243 | goto bad_fork_free_graph; |
1242 | } | 1244 | } |
1243 | 1245 | ||
1244 | if (clone_flags & CLONE_THREAD) { | 1246 | if (clone_flags & CLONE_THREAD) { |
@@ -1271,11 +1273,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1271 | total_forks++; | 1273 | total_forks++; |
1272 | spin_unlock(¤t->sighand->siglock); | 1274 | spin_unlock(¤t->sighand->siglock); |
1273 | write_unlock_irq(&tasklist_lock); | 1275 | write_unlock_irq(&tasklist_lock); |
1274 | ftrace_retfunc_init_task(p); | ||
1275 | proc_fork_connector(p); | 1276 | proc_fork_connector(p); |
1276 | cgroup_post_fork(p); | 1277 | cgroup_post_fork(p); |
1277 | return p; | 1278 | return p; |
1278 | 1279 | ||
1280 | bad_fork_free_graph: | ||
1281 | ftrace_graph_exit_task(p); | ||
1279 | bad_fork_free_pid: | 1282 | bad_fork_free_pid: |
1280 | if (pid != &init_struct_pid) | 1283 | if (pid != &init_struct_pid) |
1281 | free_pid(pid); | 1284 | free_pid(pid); |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e4bdda8dcf04..c4c7df23f8c7 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -25,6 +25,7 @@ | |||
25 | * Thanks to Arjan van de Ven for coming up with the initial idea of | 25 | * Thanks to Arjan van de Ven for coming up with the initial idea of |
26 | * mapping lock dependencies runtime. | 26 | * mapping lock dependencies runtime. |
27 | */ | 27 | */ |
28 | #define DISABLE_BRANCH_PROFILING | ||
28 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
29 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
30 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
diff --git a/kernel/module.c b/kernel/module.c index 89bcf7c1327d..dd2a54155b54 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2704,7 +2704,7 @@ int is_module_address(unsigned long addr) | |||
2704 | 2704 | ||
2705 | 2705 | ||
2706 | /* Is this a valid kernel address? */ | 2706 | /* Is this a valid kernel address? */ |
2707 | struct module *__module_text_address(unsigned long addr) | 2707 | __notrace_funcgraph struct module *__module_text_address(unsigned long addr) |
2708 | { | 2708 | { |
2709 | struct module *mod; | 2709 | struct module *mod; |
2710 | 2710 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 8050a61a7adb..4ed9f588faa6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5893,7 +5893,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5893 | * The idle tasks have their own, simple scheduling class: | 5893 | * The idle tasks have their own, simple scheduling class: |
5894 | */ | 5894 | */ |
5895 | idle->sched_class = &idle_sched_class; | 5895 | idle->sched_class = &idle_sched_class; |
5896 | ftrace_retfunc_init_task(idle); | 5896 | ftrace_graph_init_task(idle); |
5897 | } | 5897 | } |
5898 | 5898 | ||
5899 | /* | 5899 | /* |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9cbf7761f498..bde6f03512d5 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -12,7 +12,7 @@ config NOP_TRACER | |||
12 | config HAVE_FUNCTION_TRACER | 12 | config HAVE_FUNCTION_TRACER |
13 | bool | 13 | bool |
14 | 14 | ||
15 | config HAVE_FUNCTION_RET_TRACER | 15 | config HAVE_FUNCTION_GRAPH_TRACER |
16 | bool | 16 | bool |
17 | 17 | ||
18 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | 18 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
@@ -28,6 +28,9 @@ config HAVE_DYNAMIC_FTRACE | |||
28 | config HAVE_FTRACE_MCOUNT_RECORD | 28 | config HAVE_FTRACE_MCOUNT_RECORD |
29 | bool | 29 | bool |
30 | 30 | ||
31 | config HAVE_HW_BRANCH_TRACER | ||
32 | bool | ||
33 | |||
31 | config TRACER_MAX_TRACE | 34 | config TRACER_MAX_TRACE |
32 | bool | 35 | bool |
33 | 36 | ||
@@ -60,15 +63,19 @@ config FUNCTION_TRACER | |||
60 | (the bootup default), then the overhead of the instructions is very | 63 | (the bootup default), then the overhead of the instructions is very |
61 | small and not measurable even in micro-benchmarks. | 64 | small and not measurable even in micro-benchmarks. |
62 | 65 | ||
63 | config FUNCTION_RET_TRACER | 66 | config FUNCTION_GRAPH_TRACER |
64 | bool "Kernel Function return Tracer" | 67 | bool "Kernel Function Graph Tracer" |
65 | depends on HAVE_FUNCTION_RET_TRACER | 68 | depends on HAVE_FUNCTION_GRAPH_TRACER |
66 | depends on FUNCTION_TRACER | 69 | depends on FUNCTION_TRACER |
70 | default y | ||
67 | help | 71 | help |
68 | Enable the kernel to trace a function at its return. | 72 | Enable the kernel to trace a function at both its return |
69 | It's first purpose is to trace the duration of functions. | 73 | and its entry. |
70 | This is done by setting the current return address on the thread | 74 | It's first purpose is to trace the duration of functions and |
71 | info structure of the current task. | 75 | draw a call graph for each thread with some informations like |
76 | the return value. | ||
77 | This is done by setting the current return address on the current | ||
78 | task structure into a stack of calls. | ||
72 | 79 | ||
73 | config IRQSOFF_TRACER | 80 | config IRQSOFF_TRACER |
74 | bool "Interrupts-off Latency Tracer" | 81 | bool "Interrupts-off Latency Tracer" |
@@ -214,6 +221,17 @@ config BRANCH_TRACER | |||
214 | 221 | ||
215 | Say N if unsure. | 222 | Say N if unsure. |
216 | 223 | ||
224 | config POWER_TRACER | ||
225 | bool "Trace power consumption behavior" | ||
226 | depends on DEBUG_KERNEL | ||
227 | depends on X86 | ||
228 | select TRACING | ||
229 | help | ||
230 | This tracer helps developers to analyze and optimize the kernels | ||
231 | power management decisions, specifically the C-state and P-state | ||
232 | behavior. | ||
233 | |||
234 | |||
217 | config STACK_TRACER | 235 | config STACK_TRACER |
218 | bool "Trace max stack" | 236 | bool "Trace max stack" |
219 | depends on HAVE_FUNCTION_TRACER | 237 | depends on HAVE_FUNCTION_TRACER |
@@ -233,6 +251,14 @@ config STACK_TRACER | |||
233 | 251 | ||
234 | Say N if unsure. | 252 | Say N if unsure. |
235 | 253 | ||
254 | config BTS_TRACER | ||
255 | depends on HAVE_HW_BRANCH_TRACER | ||
256 | bool "Trace branches" | ||
257 | select TRACING | ||
258 | help | ||
259 | This tracer records all branches on the system in a circular | ||
260 | buffer giving access to the last N branches for each cpu. | ||
261 | |||
236 | config DYNAMIC_FTRACE | 262 | config DYNAMIC_FTRACE |
237 | bool "enable/disable ftrace tracepoints dynamically" | 263 | bool "enable/disable ftrace tracepoints dynamically" |
238 | depends on FUNCTION_TRACER | 264 | depends on FUNCTION_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 1a8c9259dc69..62dc561b6676 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -29,7 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o | |||
29 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o | 29 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o |
30 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | 30 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o |
31 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | 31 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o |
32 | obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o | 32 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o |
33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
34 | obj-$(CONFIG_BTS_TRACER) += trace_bts.o | ||
35 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | ||
34 | 36 | ||
35 | libftrace-y := ftrace.o | 37 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 53042f118f23..a12f80efceaa 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -47,12 +47,13 @@ | |||
47 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
48 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
49 | 49 | ||
50 | /* set when tracing only a pid */ | ||
51 | struct pid *ftrace_pid_trace; | ||
52 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | ||
53 | |||
50 | /* Quick disabling of function tracer. */ | 54 | /* Quick disabling of function tracer. */ |
51 | int function_trace_stop; | 55 | int function_trace_stop; |
52 | 56 | ||
53 | /* By default, current tracing type is normal tracing. */ | ||
54 | enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
55 | |||
56 | /* | 57 | /* |
57 | * ftrace_disabled is set when an anomaly is discovered. | 58 | * ftrace_disabled is set when an anomaly is discovered. |
58 | * ftrace_disabled is much stronger than ftrace_enabled. | 59 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -61,6 +62,7 @@ static int ftrace_disabled __read_mostly; | |||
61 | 62 | ||
62 | static DEFINE_SPINLOCK(ftrace_lock); | 63 | static DEFINE_SPINLOCK(ftrace_lock); |
63 | static DEFINE_MUTEX(ftrace_sysctl_lock); | 64 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
65 | static DEFINE_MUTEX(ftrace_start_lock); | ||
64 | 66 | ||
65 | static struct ftrace_ops ftrace_list_end __read_mostly = | 67 | static struct ftrace_ops ftrace_list_end __read_mostly = |
66 | { | 68 | { |
@@ -70,6 +72,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
70 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 72 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
71 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 73 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
72 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 74 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
75 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | ||
73 | 76 | ||
74 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 77 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
75 | { | 78 | { |
@@ -86,6 +89,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
86 | }; | 89 | }; |
87 | } | 90 | } |
88 | 91 | ||
92 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | ||
93 | { | ||
94 | if (!test_tsk_trace_trace(current)) | ||
95 | return; | ||
96 | |||
97 | ftrace_pid_function(ip, parent_ip); | ||
98 | } | ||
99 | |||
100 | static void set_ftrace_pid_function(ftrace_func_t func) | ||
101 | { | ||
102 | /* do not set ftrace_pid_function to itself! */ | ||
103 | if (func != ftrace_pid_func) | ||
104 | ftrace_pid_function = func; | ||
105 | } | ||
106 | |||
89 | /** | 107 | /** |
90 | * clear_ftrace_function - reset the ftrace function | 108 | * clear_ftrace_function - reset the ftrace function |
91 | * | 109 | * |
@@ -96,6 +114,7 @@ void clear_ftrace_function(void) | |||
96 | { | 114 | { |
97 | ftrace_trace_function = ftrace_stub; | 115 | ftrace_trace_function = ftrace_stub; |
98 | __ftrace_trace_function = ftrace_stub; | 116 | __ftrace_trace_function = ftrace_stub; |
117 | ftrace_pid_function = ftrace_stub; | ||
99 | } | 118 | } |
100 | 119 | ||
101 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 120 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
@@ -128,20 +147,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
128 | ftrace_list = ops; | 147 | ftrace_list = ops; |
129 | 148 | ||
130 | if (ftrace_enabled) { | 149 | if (ftrace_enabled) { |
150 | ftrace_func_t func; | ||
151 | |||
152 | if (ops->next == &ftrace_list_end) | ||
153 | func = ops->func; | ||
154 | else | ||
155 | func = ftrace_list_func; | ||
156 | |||
157 | if (ftrace_pid_trace) { | ||
158 | set_ftrace_pid_function(func); | ||
159 | func = ftrace_pid_func; | ||
160 | } | ||
161 | |||
131 | /* | 162 | /* |
132 | * For one func, simply call it directly. | 163 | * For one func, simply call it directly. |
133 | * For more than one func, call the chain. | 164 | * For more than one func, call the chain. |
134 | */ | 165 | */ |
135 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 166 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
136 | if (ops->next == &ftrace_list_end) | 167 | ftrace_trace_function = func; |
137 | ftrace_trace_function = ops->func; | ||
138 | else | ||
139 | ftrace_trace_function = ftrace_list_func; | ||
140 | #else | 168 | #else |
141 | if (ops->next == &ftrace_list_end) | 169 | __ftrace_trace_function = func; |
142 | __ftrace_trace_function = ops->func; | ||
143 | else | ||
144 | __ftrace_trace_function = ftrace_list_func; | ||
145 | ftrace_trace_function = ftrace_test_stop_func; | 170 | ftrace_trace_function = ftrace_test_stop_func; |
146 | #endif | 171 | #endif |
147 | } | 172 | } |
@@ -182,8 +207,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
182 | 207 | ||
183 | if (ftrace_enabled) { | 208 | if (ftrace_enabled) { |
184 | /* If we only have one func left, then call that directly */ | 209 | /* If we only have one func left, then call that directly */ |
185 | if (ftrace_list->next == &ftrace_list_end) | 210 | if (ftrace_list->next == &ftrace_list_end) { |
186 | ftrace_trace_function = ftrace_list->func; | 211 | ftrace_func_t func = ftrace_list->func; |
212 | |||
213 | if (ftrace_pid_trace) { | ||
214 | set_ftrace_pid_function(func); | ||
215 | func = ftrace_pid_func; | ||
216 | } | ||
217 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
218 | ftrace_trace_function = func; | ||
219 | #else | ||
220 | __ftrace_trace_function = func; | ||
221 | #endif | ||
222 | } | ||
187 | } | 223 | } |
188 | 224 | ||
189 | out: | 225 | out: |
@@ -192,6 +228,36 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
192 | return ret; | 228 | return ret; |
193 | } | 229 | } |
194 | 230 | ||
231 | static void ftrace_update_pid_func(void) | ||
232 | { | ||
233 | ftrace_func_t func; | ||
234 | |||
235 | /* should not be called from interrupt context */ | ||
236 | spin_lock(&ftrace_lock); | ||
237 | |||
238 | if (ftrace_trace_function == ftrace_stub) | ||
239 | goto out; | ||
240 | |||
241 | func = ftrace_trace_function; | ||
242 | |||
243 | if (ftrace_pid_trace) { | ||
244 | set_ftrace_pid_function(func); | ||
245 | func = ftrace_pid_func; | ||
246 | } else { | ||
247 | if (func == ftrace_pid_func) | ||
248 | func = ftrace_pid_function; | ||
249 | } | ||
250 | |||
251 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
252 | ftrace_trace_function = func; | ||
253 | #else | ||
254 | __ftrace_trace_function = func; | ||
255 | #endif | ||
256 | |||
257 | out: | ||
258 | spin_unlock(&ftrace_lock); | ||
259 | } | ||
260 | |||
195 | #ifdef CONFIG_DYNAMIC_FTRACE | 261 | #ifdef CONFIG_DYNAMIC_FTRACE |
196 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 262 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
197 | # error Dynamic ftrace depends on MCOUNT_RECORD | 263 | # error Dynamic ftrace depends on MCOUNT_RECORD |
@@ -211,6 +277,8 @@ enum { | |||
211 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 277 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
212 | FTRACE_ENABLE_MCOUNT = (1 << 3), | 278 | FTRACE_ENABLE_MCOUNT = (1 << 3), |
213 | FTRACE_DISABLE_MCOUNT = (1 << 4), | 279 | FTRACE_DISABLE_MCOUNT = (1 << 4), |
280 | FTRACE_START_FUNC_RET = (1 << 5), | ||
281 | FTRACE_STOP_FUNC_RET = (1 << 6), | ||
214 | }; | 282 | }; |
215 | 283 | ||
216 | static int ftrace_filtered; | 284 | static int ftrace_filtered; |
@@ -395,14 +463,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
395 | unsigned long ip, fl; | 463 | unsigned long ip, fl; |
396 | unsigned long ftrace_addr; | 464 | unsigned long ftrace_addr; |
397 | 465 | ||
398 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
399 | if (ftrace_tracing_type == FTRACE_TYPE_ENTER) | ||
400 | ftrace_addr = (unsigned long)ftrace_caller; | ||
401 | else | ||
402 | ftrace_addr = (unsigned long)ftrace_return_caller; | ||
403 | #else | ||
404 | ftrace_addr = (unsigned long)ftrace_caller; | 466 | ftrace_addr = (unsigned long)ftrace_caller; |
405 | #endif | ||
406 | 467 | ||
407 | ip = rec->ip; | 468 | ip = rec->ip; |
408 | 469 | ||
@@ -535,6 +596,11 @@ static int __ftrace_modify_code(void *data) | |||
535 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 596 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
536 | ftrace_update_ftrace_func(ftrace_trace_function); | 597 | ftrace_update_ftrace_func(ftrace_trace_function); |
537 | 598 | ||
599 | if (*command & FTRACE_START_FUNC_RET) | ||
600 | ftrace_enable_ftrace_graph_caller(); | ||
601 | else if (*command & FTRACE_STOP_FUNC_RET) | ||
602 | ftrace_disable_ftrace_graph_caller(); | ||
603 | |||
538 | return 0; | 604 | return 0; |
539 | } | 605 | } |
540 | 606 | ||
@@ -545,12 +611,22 @@ static void ftrace_run_update_code(int command) | |||
545 | 611 | ||
546 | static ftrace_func_t saved_ftrace_func; | 612 | static ftrace_func_t saved_ftrace_func; |
547 | static int ftrace_start_up; | 613 | static int ftrace_start_up; |
548 | static DEFINE_MUTEX(ftrace_start_lock); | ||
549 | 614 | ||
550 | static void ftrace_startup(void) | 615 | static void ftrace_startup_enable(int command) |
551 | { | 616 | { |
552 | int command = 0; | 617 | if (saved_ftrace_func != ftrace_trace_function) { |
618 | saved_ftrace_func = ftrace_trace_function; | ||
619 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
620 | } | ||
553 | 621 | ||
622 | if (!command || !ftrace_enabled) | ||
623 | return; | ||
624 | |||
625 | ftrace_run_update_code(command); | ||
626 | } | ||
627 | |||
628 | static void ftrace_startup(int command) | ||
629 | { | ||
554 | if (unlikely(ftrace_disabled)) | 630 | if (unlikely(ftrace_disabled)) |
555 | return; | 631 | return; |
556 | 632 | ||
@@ -558,23 +634,13 @@ static void ftrace_startup(void) | |||
558 | ftrace_start_up++; | 634 | ftrace_start_up++; |
559 | command |= FTRACE_ENABLE_CALLS; | 635 | command |= FTRACE_ENABLE_CALLS; |
560 | 636 | ||
561 | if (saved_ftrace_func != ftrace_trace_function) { | 637 | ftrace_startup_enable(command); |
562 | saved_ftrace_func = ftrace_trace_function; | ||
563 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
564 | } | ||
565 | 638 | ||
566 | if (!command || !ftrace_enabled) | ||
567 | goto out; | ||
568 | |||
569 | ftrace_run_update_code(command); | ||
570 | out: | ||
571 | mutex_unlock(&ftrace_start_lock); | 639 | mutex_unlock(&ftrace_start_lock); |
572 | } | 640 | } |
573 | 641 | ||
574 | static void ftrace_shutdown(void) | 642 | static void ftrace_shutdown(int command) |
575 | { | 643 | { |
576 | int command = 0; | ||
577 | |||
578 | if (unlikely(ftrace_disabled)) | 644 | if (unlikely(ftrace_disabled)) |
579 | return; | 645 | return; |
580 | 646 | ||
@@ -719,7 +785,6 @@ enum { | |||
719 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 785 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
720 | 786 | ||
721 | struct ftrace_iterator { | 787 | struct ftrace_iterator { |
722 | loff_t pos; | ||
723 | struct ftrace_page *pg; | 788 | struct ftrace_page *pg; |
724 | unsigned idx; | 789 | unsigned idx; |
725 | unsigned flags; | 790 | unsigned flags; |
@@ -744,6 +809,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
744 | iter->pg = iter->pg->next; | 809 | iter->pg = iter->pg->next; |
745 | iter->idx = 0; | 810 | iter->idx = 0; |
746 | goto retry; | 811 | goto retry; |
812 | } else { | ||
813 | iter->idx = -1; | ||
747 | } | 814 | } |
748 | } else { | 815 | } else { |
749 | rec = &iter->pg->records[iter->idx++]; | 816 | rec = &iter->pg->records[iter->idx++]; |
@@ -766,8 +833,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
766 | } | 833 | } |
767 | spin_unlock(&ftrace_lock); | 834 | spin_unlock(&ftrace_lock); |
768 | 835 | ||
769 | iter->pos = *pos; | ||
770 | |||
771 | return rec; | 836 | return rec; |
772 | } | 837 | } |
773 | 838 | ||
@@ -775,13 +840,15 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
775 | { | 840 | { |
776 | struct ftrace_iterator *iter = m->private; | 841 | struct ftrace_iterator *iter = m->private; |
777 | void *p = NULL; | 842 | void *p = NULL; |
778 | loff_t l = -1; | ||
779 | 843 | ||
780 | if (*pos > iter->pos) | 844 | if (*pos > 0) { |
781 | *pos = iter->pos; | 845 | if (iter->idx < 0) |
846 | return p; | ||
847 | (*pos)--; | ||
848 | iter->idx--; | ||
849 | } | ||
782 | 850 | ||
783 | l = *pos; | 851 | p = t_next(m, p, pos); |
784 | p = t_next(m, p, &l); | ||
785 | 852 | ||
786 | return p; | 853 | return p; |
787 | } | 854 | } |
@@ -792,21 +859,15 @@ static void t_stop(struct seq_file *m, void *p) | |||
792 | 859 | ||
793 | static int t_show(struct seq_file *m, void *v) | 860 | static int t_show(struct seq_file *m, void *v) |
794 | { | 861 | { |
795 | struct ftrace_iterator *iter = m->private; | ||
796 | struct dyn_ftrace *rec = v; | 862 | struct dyn_ftrace *rec = v; |
797 | char str[KSYM_SYMBOL_LEN]; | 863 | char str[KSYM_SYMBOL_LEN]; |
798 | int ret = 0; | ||
799 | 864 | ||
800 | if (!rec) | 865 | if (!rec) |
801 | return 0; | 866 | return 0; |
802 | 867 | ||
803 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 868 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
804 | 869 | ||
805 | ret = seq_printf(m, "%s\n", str); | 870 | seq_printf(m, "%s\n", str); |
806 | if (ret < 0) { | ||
807 | iter->pos--; | ||
808 | iter->idx--; | ||
809 | } | ||
810 | 871 | ||
811 | return 0; | 872 | return 0; |
812 | } | 873 | } |
@@ -832,7 +893,6 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
832 | return -ENOMEM; | 893 | return -ENOMEM; |
833 | 894 | ||
834 | iter->pg = ftrace_pages_start; | 895 | iter->pg = ftrace_pages_start; |
835 | iter->pos = 0; | ||
836 | 896 | ||
837 | ret = seq_open(file, &show_ftrace_seq_ops); | 897 | ret = seq_open(file, &show_ftrace_seq_ops); |
838 | if (!ret) { | 898 | if (!ret) { |
@@ -919,7 +979,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
919 | 979 | ||
920 | if (file->f_mode & FMODE_READ) { | 980 | if (file->f_mode & FMODE_READ) { |
921 | iter->pg = ftrace_pages_start; | 981 | iter->pg = ftrace_pages_start; |
922 | iter->pos = 0; | ||
923 | iter->flags = enable ? FTRACE_ITER_FILTER : | 982 | iter->flags = enable ? FTRACE_ITER_FILTER : |
924 | FTRACE_ITER_NOTRACE; | 983 | FTRACE_ITER_NOTRACE; |
925 | 984 | ||
@@ -1262,12 +1321,233 @@ static struct file_operations ftrace_notrace_fops = { | |||
1262 | .release = ftrace_notrace_release, | 1321 | .release = ftrace_notrace_release, |
1263 | }; | 1322 | }; |
1264 | 1323 | ||
1265 | static __init int ftrace_init_debugfs(void) | 1324 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1325 | |||
1326 | static DEFINE_MUTEX(graph_lock); | ||
1327 | |||
1328 | int ftrace_graph_count; | ||
1329 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | ||
1330 | |||
1331 | static void * | ||
1332 | g_next(struct seq_file *m, void *v, loff_t *pos) | ||
1266 | { | 1333 | { |
1267 | struct dentry *d_tracer; | 1334 | unsigned long *array = m->private; |
1268 | struct dentry *entry; | 1335 | int index = *pos; |
1269 | 1336 | ||
1270 | d_tracer = tracing_init_dentry(); | 1337 | (*pos)++; |
1338 | |||
1339 | if (index >= ftrace_graph_count) | ||
1340 | return NULL; | ||
1341 | |||
1342 | return &array[index]; | ||
1343 | } | ||
1344 | |||
1345 | static void *g_start(struct seq_file *m, loff_t *pos) | ||
1346 | { | ||
1347 | void *p = NULL; | ||
1348 | |||
1349 | mutex_lock(&graph_lock); | ||
1350 | |||
1351 | p = g_next(m, p, pos); | ||
1352 | |||
1353 | return p; | ||
1354 | } | ||
1355 | |||
1356 | static void g_stop(struct seq_file *m, void *p) | ||
1357 | { | ||
1358 | mutex_unlock(&graph_lock); | ||
1359 | } | ||
1360 | |||
1361 | static int g_show(struct seq_file *m, void *v) | ||
1362 | { | ||
1363 | unsigned long *ptr = v; | ||
1364 | char str[KSYM_SYMBOL_LEN]; | ||
1365 | |||
1366 | if (!ptr) | ||
1367 | return 0; | ||
1368 | |||
1369 | kallsyms_lookup(*ptr, NULL, NULL, NULL, str); | ||
1370 | |||
1371 | seq_printf(m, "%s\n", str); | ||
1372 | |||
1373 | return 0; | ||
1374 | } | ||
1375 | |||
1376 | static struct seq_operations ftrace_graph_seq_ops = { | ||
1377 | .start = g_start, | ||
1378 | .next = g_next, | ||
1379 | .stop = g_stop, | ||
1380 | .show = g_show, | ||
1381 | }; | ||
1382 | |||
1383 | static int | ||
1384 | ftrace_graph_open(struct inode *inode, struct file *file) | ||
1385 | { | ||
1386 | int ret = 0; | ||
1387 | |||
1388 | if (unlikely(ftrace_disabled)) | ||
1389 | return -ENODEV; | ||
1390 | |||
1391 | mutex_lock(&graph_lock); | ||
1392 | if ((file->f_mode & FMODE_WRITE) && | ||
1393 | !(file->f_flags & O_APPEND)) { | ||
1394 | ftrace_graph_count = 0; | ||
1395 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | ||
1396 | } | ||
1397 | |||
1398 | if (file->f_mode & FMODE_READ) { | ||
1399 | ret = seq_open(file, &ftrace_graph_seq_ops); | ||
1400 | if (!ret) { | ||
1401 | struct seq_file *m = file->private_data; | ||
1402 | m->private = ftrace_graph_funcs; | ||
1403 | } | ||
1404 | } else | ||
1405 | file->private_data = ftrace_graph_funcs; | ||
1406 | mutex_unlock(&graph_lock); | ||
1407 | |||
1408 | return ret; | ||
1409 | } | ||
1410 | |||
1411 | static ssize_t | ||
1412 | ftrace_graph_read(struct file *file, char __user *ubuf, | ||
1413 | size_t cnt, loff_t *ppos) | ||
1414 | { | ||
1415 | if (file->f_mode & FMODE_READ) | ||
1416 | return seq_read(file, ubuf, cnt, ppos); | ||
1417 | else | ||
1418 | return -EPERM; | ||
1419 | } | ||
1420 | |||
1421 | static int | ||
1422 | ftrace_set_func(unsigned long *array, int idx, char *buffer) | ||
1423 | { | ||
1424 | char str[KSYM_SYMBOL_LEN]; | ||
1425 | struct dyn_ftrace *rec; | ||
1426 | struct ftrace_page *pg; | ||
1427 | int found = 0; | ||
1428 | int i, j; | ||
1429 | |||
1430 | if (ftrace_disabled) | ||
1431 | return -ENODEV; | ||
1432 | |||
1433 | /* should not be called from interrupt context */ | ||
1434 | spin_lock(&ftrace_lock); | ||
1435 | |||
1436 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | ||
1437 | for (i = 0; i < pg->index; i++) { | ||
1438 | rec = &pg->records[i]; | ||
1439 | |||
1440 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | ||
1441 | continue; | ||
1442 | |||
1443 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
1444 | if (strcmp(str, buffer) == 0) { | ||
1445 | found = 1; | ||
1446 | for (j = 0; j < idx; j++) | ||
1447 | if (array[j] == rec->ip) { | ||
1448 | found = 0; | ||
1449 | break; | ||
1450 | } | ||
1451 | if (found) | ||
1452 | array[idx] = rec->ip; | ||
1453 | break; | ||
1454 | } | ||
1455 | } | ||
1456 | } | ||
1457 | spin_unlock(&ftrace_lock); | ||
1458 | |||
1459 | return found ? 0 : -EINVAL; | ||
1460 | } | ||
1461 | |||
1462 | static ssize_t | ||
1463 | ftrace_graph_write(struct file *file, const char __user *ubuf, | ||
1464 | size_t cnt, loff_t *ppos) | ||
1465 | { | ||
1466 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | ||
1467 | unsigned long *array; | ||
1468 | size_t read = 0; | ||
1469 | ssize_t ret; | ||
1470 | int index = 0; | ||
1471 | char ch; | ||
1472 | |||
1473 | if (!cnt || cnt < 0) | ||
1474 | return 0; | ||
1475 | |||
1476 | mutex_lock(&graph_lock); | ||
1477 | |||
1478 | if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { | ||
1479 | ret = -EBUSY; | ||
1480 | goto out; | ||
1481 | } | ||
1482 | |||
1483 | if (file->f_mode & FMODE_READ) { | ||
1484 | struct seq_file *m = file->private_data; | ||
1485 | array = m->private; | ||
1486 | } else | ||
1487 | array = file->private_data; | ||
1488 | |||
1489 | ret = get_user(ch, ubuf++); | ||
1490 | if (ret) | ||
1491 | goto out; | ||
1492 | read++; | ||
1493 | cnt--; | ||
1494 | |||
1495 | /* skip white space */ | ||
1496 | while (cnt && isspace(ch)) { | ||
1497 | ret = get_user(ch, ubuf++); | ||
1498 | if (ret) | ||
1499 | goto out; | ||
1500 | read++; | ||
1501 | cnt--; | ||
1502 | } | ||
1503 | |||
1504 | if (isspace(ch)) { | ||
1505 | *ppos += read; | ||
1506 | ret = read; | ||
1507 | goto out; | ||
1508 | } | ||
1509 | |||
1510 | while (cnt && !isspace(ch)) { | ||
1511 | if (index < FTRACE_BUFF_MAX) | ||
1512 | buffer[index++] = ch; | ||
1513 | else { | ||
1514 | ret = -EINVAL; | ||
1515 | goto out; | ||
1516 | } | ||
1517 | ret = get_user(ch, ubuf++); | ||
1518 | if (ret) | ||
1519 | goto out; | ||
1520 | read++; | ||
1521 | cnt--; | ||
1522 | } | ||
1523 | buffer[index] = 0; | ||
1524 | |||
1525 | /* we allow only one at a time */ | ||
1526 | ret = ftrace_set_func(array, ftrace_graph_count, buffer); | ||
1527 | if (ret) | ||
1528 | goto out; | ||
1529 | |||
1530 | ftrace_graph_count++; | ||
1531 | |||
1532 | file->f_pos += read; | ||
1533 | |||
1534 | ret = read; | ||
1535 | out: | ||
1536 | mutex_unlock(&graph_lock); | ||
1537 | |||
1538 | return ret; | ||
1539 | } | ||
1540 | |||
1541 | static const struct file_operations ftrace_graph_fops = { | ||
1542 | .open = ftrace_graph_open, | ||
1543 | .read = ftrace_graph_read, | ||
1544 | .write = ftrace_graph_write, | ||
1545 | }; | ||
1546 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1547 | |||
1548 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | ||
1549 | { | ||
1550 | struct dentry *entry; | ||
1271 | 1551 | ||
1272 | entry = debugfs_create_file("available_filter_functions", 0444, | 1552 | entry = debugfs_create_file("available_filter_functions", 0444, |
1273 | d_tracer, NULL, &ftrace_avail_fops); | 1553 | d_tracer, NULL, &ftrace_avail_fops); |
@@ -1292,11 +1572,18 @@ static __init int ftrace_init_debugfs(void) | |||
1292 | pr_warning("Could not create debugfs " | 1572 | pr_warning("Could not create debugfs " |
1293 | "'set_ftrace_notrace' entry\n"); | 1573 | "'set_ftrace_notrace' entry\n"); |
1294 | 1574 | ||
1575 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1576 | entry = debugfs_create_file("set_graph_function", 0444, d_tracer, | ||
1577 | NULL, | ||
1578 | &ftrace_graph_fops); | ||
1579 | if (!entry) | ||
1580 | pr_warning("Could not create debugfs " | ||
1581 | "'set_graph_function' entry\n"); | ||
1582 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1583 | |||
1295 | return 0; | 1584 | return 0; |
1296 | } | 1585 | } |
1297 | 1586 | ||
1298 | fs_initcall(ftrace_init_debugfs); | ||
1299 | |||
1300 | static int ftrace_convert_nops(struct module *mod, | 1587 | static int ftrace_convert_nops(struct module *mod, |
1301 | unsigned long *start, | 1588 | unsigned long *start, |
1302 | unsigned long *end) | 1589 | unsigned long *end) |
@@ -1382,12 +1669,186 @@ static int __init ftrace_nodyn_init(void) | |||
1382 | } | 1669 | } |
1383 | device_initcall(ftrace_nodyn_init); | 1670 | device_initcall(ftrace_nodyn_init); |
1384 | 1671 | ||
1385 | # define ftrace_startup() do { } while (0) | 1672 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
1386 | # define ftrace_shutdown() do { } while (0) | 1673 | static inline void ftrace_startup_enable(int command) { } |
1674 | /* Keep as macros so we do not need to define the commands */ | ||
1675 | # define ftrace_startup(command) do { } while (0) | ||
1676 | # define ftrace_shutdown(command) do { } while (0) | ||
1387 | # define ftrace_startup_sysctl() do { } while (0) | 1677 | # define ftrace_startup_sysctl() do { } while (0) |
1388 | # define ftrace_shutdown_sysctl() do { } while (0) | 1678 | # define ftrace_shutdown_sysctl() do { } while (0) |
1389 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1679 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1390 | 1680 | ||
1681 | static ssize_t | ||
1682 | ftrace_pid_read(struct file *file, char __user *ubuf, | ||
1683 | size_t cnt, loff_t *ppos) | ||
1684 | { | ||
1685 | char buf[64]; | ||
1686 | int r; | ||
1687 | |||
1688 | if (ftrace_pid_trace == ftrace_swapper_pid) | ||
1689 | r = sprintf(buf, "swapper tasks\n"); | ||
1690 | else if (ftrace_pid_trace) | ||
1691 | r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace)); | ||
1692 | else | ||
1693 | r = sprintf(buf, "no pid\n"); | ||
1694 | |||
1695 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
1696 | } | ||
1697 | |||
1698 | static void clear_ftrace_swapper(void) | ||
1699 | { | ||
1700 | struct task_struct *p; | ||
1701 | int cpu; | ||
1702 | |||
1703 | get_online_cpus(); | ||
1704 | for_each_online_cpu(cpu) { | ||
1705 | p = idle_task(cpu); | ||
1706 | clear_tsk_trace_trace(p); | ||
1707 | } | ||
1708 | put_online_cpus(); | ||
1709 | } | ||
1710 | |||
1711 | static void set_ftrace_swapper(void) | ||
1712 | { | ||
1713 | struct task_struct *p; | ||
1714 | int cpu; | ||
1715 | |||
1716 | get_online_cpus(); | ||
1717 | for_each_online_cpu(cpu) { | ||
1718 | p = idle_task(cpu); | ||
1719 | set_tsk_trace_trace(p); | ||
1720 | } | ||
1721 | put_online_cpus(); | ||
1722 | } | ||
1723 | |||
1724 | static void clear_ftrace_pid(struct pid *pid) | ||
1725 | { | ||
1726 | struct task_struct *p; | ||
1727 | |||
1728 | do_each_pid_task(pid, PIDTYPE_PID, p) { | ||
1729 | clear_tsk_trace_trace(p); | ||
1730 | } while_each_pid_task(pid, PIDTYPE_PID, p); | ||
1731 | put_pid(pid); | ||
1732 | } | ||
1733 | |||
1734 | static void set_ftrace_pid(struct pid *pid) | ||
1735 | { | ||
1736 | struct task_struct *p; | ||
1737 | |||
1738 | do_each_pid_task(pid, PIDTYPE_PID, p) { | ||
1739 | set_tsk_trace_trace(p); | ||
1740 | } while_each_pid_task(pid, PIDTYPE_PID, p); | ||
1741 | } | ||
1742 | |||
1743 | static void clear_ftrace_pid_task(struct pid **pid) | ||
1744 | { | ||
1745 | if (*pid == ftrace_swapper_pid) | ||
1746 | clear_ftrace_swapper(); | ||
1747 | else | ||
1748 | clear_ftrace_pid(*pid); | ||
1749 | |||
1750 | *pid = NULL; | ||
1751 | } | ||
1752 | |||
1753 | static void set_ftrace_pid_task(struct pid *pid) | ||
1754 | { | ||
1755 | if (pid == ftrace_swapper_pid) | ||
1756 | set_ftrace_swapper(); | ||
1757 | else | ||
1758 | set_ftrace_pid(pid); | ||
1759 | } | ||
1760 | |||
1761 | static ssize_t | ||
1762 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | ||
1763 | size_t cnt, loff_t *ppos) | ||
1764 | { | ||
1765 | struct pid *pid; | ||
1766 | char buf[64]; | ||
1767 | long val; | ||
1768 | int ret; | ||
1769 | |||
1770 | if (cnt >= sizeof(buf)) | ||
1771 | return -EINVAL; | ||
1772 | |||
1773 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1774 | return -EFAULT; | ||
1775 | |||
1776 | buf[cnt] = 0; | ||
1777 | |||
1778 | ret = strict_strtol(buf, 10, &val); | ||
1779 | if (ret < 0) | ||
1780 | return ret; | ||
1781 | |||
1782 | mutex_lock(&ftrace_start_lock); | ||
1783 | if (val < 0) { | ||
1784 | /* disable pid tracing */ | ||
1785 | if (!ftrace_pid_trace) | ||
1786 | goto out; | ||
1787 | |||
1788 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
1789 | |||
1790 | } else { | ||
1791 | /* swapper task is special */ | ||
1792 | if (!val) { | ||
1793 | pid = ftrace_swapper_pid; | ||
1794 | if (pid == ftrace_pid_trace) | ||
1795 | goto out; | ||
1796 | } else { | ||
1797 | pid = find_get_pid(val); | ||
1798 | |||
1799 | if (pid == ftrace_pid_trace) { | ||
1800 | put_pid(pid); | ||
1801 | goto out; | ||
1802 | } | ||
1803 | } | ||
1804 | |||
1805 | if (ftrace_pid_trace) | ||
1806 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
1807 | |||
1808 | if (!pid) | ||
1809 | goto out; | ||
1810 | |||
1811 | ftrace_pid_trace = pid; | ||
1812 | |||
1813 | set_ftrace_pid_task(ftrace_pid_trace); | ||
1814 | } | ||
1815 | |||
1816 | /* update the function call */ | ||
1817 | ftrace_update_pid_func(); | ||
1818 | ftrace_startup_enable(0); | ||
1819 | |||
1820 | out: | ||
1821 | mutex_unlock(&ftrace_start_lock); | ||
1822 | |||
1823 | return cnt; | ||
1824 | } | ||
1825 | |||
1826 | static struct file_operations ftrace_pid_fops = { | ||
1827 | .read = ftrace_pid_read, | ||
1828 | .write = ftrace_pid_write, | ||
1829 | }; | ||
1830 | |||
1831 | static __init int ftrace_init_debugfs(void) | ||
1832 | { | ||
1833 | struct dentry *d_tracer; | ||
1834 | struct dentry *entry; | ||
1835 | |||
1836 | d_tracer = tracing_init_dentry(); | ||
1837 | if (!d_tracer) | ||
1838 | return 0; | ||
1839 | |||
1840 | ftrace_init_dyn_debugfs(d_tracer); | ||
1841 | |||
1842 | entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, | ||
1843 | NULL, &ftrace_pid_fops); | ||
1844 | if (!entry) | ||
1845 | pr_warning("Could not create debugfs " | ||
1846 | "'set_ftrace_pid' entry\n"); | ||
1847 | return 0; | ||
1848 | } | ||
1849 | |||
1850 | fs_initcall(ftrace_init_debugfs); | ||
1851 | |||
1391 | /** | 1852 | /** |
1392 | * ftrace_kill - kill ftrace | 1853 | * ftrace_kill - kill ftrace |
1393 | * | 1854 | * |
@@ -1422,15 +1883,9 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
1422 | 1883 | ||
1423 | mutex_lock(&ftrace_sysctl_lock); | 1884 | mutex_lock(&ftrace_sysctl_lock); |
1424 | 1885 | ||
1425 | if (ftrace_tracing_type == FTRACE_TYPE_RETURN) { | ||
1426 | ret = -EBUSY; | ||
1427 | goto out; | ||
1428 | } | ||
1429 | |||
1430 | ret = __register_ftrace_function(ops); | 1886 | ret = __register_ftrace_function(ops); |
1431 | ftrace_startup(); | 1887 | ftrace_startup(0); |
1432 | 1888 | ||
1433 | out: | ||
1434 | mutex_unlock(&ftrace_sysctl_lock); | 1889 | mutex_unlock(&ftrace_sysctl_lock); |
1435 | return ret; | 1890 | return ret; |
1436 | } | 1891 | } |
@@ -1447,7 +1902,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
1447 | 1902 | ||
1448 | mutex_lock(&ftrace_sysctl_lock); | 1903 | mutex_lock(&ftrace_sysctl_lock); |
1449 | ret = __unregister_ftrace_function(ops); | 1904 | ret = __unregister_ftrace_function(ops); |
1450 | ftrace_shutdown(); | 1905 | ftrace_shutdown(0); |
1451 | mutex_unlock(&ftrace_sysctl_lock); | 1906 | mutex_unlock(&ftrace_sysctl_lock); |
1452 | 1907 | ||
1453 | return ret; | 1908 | return ret; |
@@ -1496,14 +1951,19 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1496 | return ret; | 1951 | return ret; |
1497 | } | 1952 | } |
1498 | 1953 | ||
1499 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1954 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1500 | 1955 | ||
1501 | static atomic_t ftrace_retfunc_active; | 1956 | static atomic_t ftrace_graph_active; |
1502 | 1957 | ||
1503 | /* The callback that hooks the return of a function */ | 1958 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
1504 | trace_function_return_t ftrace_function_return = | 1959 | { |
1505 | (trace_function_return_t)ftrace_stub; | 1960 | return 0; |
1961 | } | ||
1506 | 1962 | ||
1963 | /* The callbacks that hook a function */ | ||
1964 | trace_func_graph_ret_t ftrace_graph_return = | ||
1965 | (trace_func_graph_ret_t)ftrace_stub; | ||
1966 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; | ||
1507 | 1967 | ||
1508 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | 1968 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
1509 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | 1969 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
@@ -1534,8 +1994,11 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | |||
1534 | } | 1994 | } |
1535 | 1995 | ||
1536 | if (t->ret_stack == NULL) { | 1996 | if (t->ret_stack == NULL) { |
1537 | t->ret_stack = ret_stack_list[start++]; | ||
1538 | t->curr_ret_stack = -1; | 1997 | t->curr_ret_stack = -1; |
1998 | /* Make sure IRQs see the -1 first: */ | ||
1999 | barrier(); | ||
2000 | t->ret_stack = ret_stack_list[start++]; | ||
2001 | atomic_set(&t->tracing_graph_pause, 0); | ||
1539 | atomic_set(&t->trace_overrun, 0); | 2002 | atomic_set(&t->trace_overrun, 0); |
1540 | } | 2003 | } |
1541 | } while_each_thread(g, t); | 2004 | } while_each_thread(g, t); |
@@ -1549,7 +2012,7 @@ free: | |||
1549 | } | 2012 | } |
1550 | 2013 | ||
1551 | /* Allocate a return stack for each task */ | 2014 | /* Allocate a return stack for each task */ |
1552 | static int start_return_tracing(void) | 2015 | static int start_graph_tracing(void) |
1553 | { | 2016 | { |
1554 | struct ftrace_ret_stack **ret_stack_list; | 2017 | struct ftrace_ret_stack **ret_stack_list; |
1555 | int ret; | 2018 | int ret; |
@@ -1569,64 +2032,59 @@ static int start_return_tracing(void) | |||
1569 | return ret; | 2032 | return ret; |
1570 | } | 2033 | } |
1571 | 2034 | ||
1572 | int register_ftrace_return(trace_function_return_t func) | 2035 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
2036 | trace_func_graph_ent_t entryfunc) | ||
1573 | { | 2037 | { |
1574 | int ret = 0; | 2038 | int ret = 0; |
1575 | 2039 | ||
1576 | mutex_lock(&ftrace_sysctl_lock); | 2040 | mutex_lock(&ftrace_sysctl_lock); |
1577 | 2041 | ||
1578 | /* | 2042 | atomic_inc(&ftrace_graph_active); |
1579 | * Don't launch return tracing if normal function | 2043 | ret = start_graph_tracing(); |
1580 | * tracing is already running. | ||
1581 | */ | ||
1582 | if (ftrace_trace_function != ftrace_stub) { | ||
1583 | ret = -EBUSY; | ||
1584 | goto out; | ||
1585 | } | ||
1586 | atomic_inc(&ftrace_retfunc_active); | ||
1587 | ret = start_return_tracing(); | ||
1588 | if (ret) { | 2044 | if (ret) { |
1589 | atomic_dec(&ftrace_retfunc_active); | 2045 | atomic_dec(&ftrace_graph_active); |
1590 | goto out; | 2046 | goto out; |
1591 | } | 2047 | } |
1592 | ftrace_tracing_type = FTRACE_TYPE_RETURN; | 2048 | |
1593 | ftrace_function_return = func; | 2049 | ftrace_graph_return = retfunc; |
1594 | ftrace_startup(); | 2050 | ftrace_graph_entry = entryfunc; |
2051 | |||
2052 | ftrace_startup(FTRACE_START_FUNC_RET); | ||
1595 | 2053 | ||
1596 | out: | 2054 | out: |
1597 | mutex_unlock(&ftrace_sysctl_lock); | 2055 | mutex_unlock(&ftrace_sysctl_lock); |
1598 | return ret; | 2056 | return ret; |
1599 | } | 2057 | } |
1600 | 2058 | ||
1601 | void unregister_ftrace_return(void) | 2059 | void unregister_ftrace_graph(void) |
1602 | { | 2060 | { |
1603 | mutex_lock(&ftrace_sysctl_lock); | 2061 | mutex_lock(&ftrace_sysctl_lock); |
1604 | 2062 | ||
1605 | atomic_dec(&ftrace_retfunc_active); | 2063 | atomic_dec(&ftrace_graph_active); |
1606 | ftrace_function_return = (trace_function_return_t)ftrace_stub; | 2064 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
1607 | ftrace_shutdown(); | 2065 | ftrace_graph_entry = ftrace_graph_entry_stub; |
1608 | /* Restore normal tracing type */ | 2066 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
1609 | ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
1610 | 2067 | ||
1611 | mutex_unlock(&ftrace_sysctl_lock); | 2068 | mutex_unlock(&ftrace_sysctl_lock); |
1612 | } | 2069 | } |
1613 | 2070 | ||
1614 | /* Allocate a return stack for newly created task */ | 2071 | /* Allocate a return stack for newly created task */ |
1615 | void ftrace_retfunc_init_task(struct task_struct *t) | 2072 | void ftrace_graph_init_task(struct task_struct *t) |
1616 | { | 2073 | { |
1617 | if (atomic_read(&ftrace_retfunc_active)) { | 2074 | if (atomic_read(&ftrace_graph_active)) { |
1618 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 2075 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
1619 | * sizeof(struct ftrace_ret_stack), | 2076 | * sizeof(struct ftrace_ret_stack), |
1620 | GFP_KERNEL); | 2077 | GFP_KERNEL); |
1621 | if (!t->ret_stack) | 2078 | if (!t->ret_stack) |
1622 | return; | 2079 | return; |
1623 | t->curr_ret_stack = -1; | 2080 | t->curr_ret_stack = -1; |
2081 | atomic_set(&t->tracing_graph_pause, 0); | ||
1624 | atomic_set(&t->trace_overrun, 0); | 2082 | atomic_set(&t->trace_overrun, 0); |
1625 | } else | 2083 | } else |
1626 | t->ret_stack = NULL; | 2084 | t->ret_stack = NULL; |
1627 | } | 2085 | } |
1628 | 2086 | ||
1629 | void ftrace_retfunc_exit_task(struct task_struct *t) | 2087 | void ftrace_graph_exit_task(struct task_struct *t) |
1630 | { | 2088 | { |
1631 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | 2089 | struct ftrace_ret_stack *ret_stack = t->ret_stack; |
1632 | 2090 | ||
@@ -1636,7 +2094,10 @@ void ftrace_retfunc_exit_task(struct task_struct *t) | |||
1636 | 2094 | ||
1637 | kfree(ret_stack); | 2095 | kfree(ret_stack); |
1638 | } | 2096 | } |
1639 | #endif | ||
1640 | |||
1641 | 2097 | ||
2098 | void ftrace_graph_stop(void) | ||
2099 | { | ||
2100 | ftrace_stop(); | ||
2101 | } | ||
2102 | #endif | ||
1642 | 2103 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e206951603c1..7f69cfeaadf7 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -195,20 +195,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
196 | #define TS_DELTA_TEST (~TS_MASK) | 196 | #define TS_DELTA_TEST (~TS_MASK) |
197 | 197 | ||
198 | /* | 198 | struct buffer_data_page { |
199 | * This hack stolen from mm/slob.c. | ||
200 | * We can store per page timing information in the page frame of the page. | ||
201 | * Thanks to Peter Zijlstra for suggesting this idea. | ||
202 | */ | ||
203 | struct buffer_page { | ||
204 | u64 time_stamp; /* page time stamp */ | 199 | u64 time_stamp; /* page time stamp */ |
205 | local_t write; /* index for next write */ | ||
206 | local_t commit; /* write commited index */ | 200 | local_t commit; /* write commited index */ |
201 | unsigned char data[]; /* data of buffer page */ | ||
202 | }; | ||
203 | |||
204 | struct buffer_page { | ||
205 | local_t write; /* index for next write */ | ||
207 | unsigned read; /* index for next read */ | 206 | unsigned read; /* index for next read */ |
208 | struct list_head list; /* list of free pages */ | 207 | struct list_head list; /* list of free pages */ |
209 | void *page; /* Actual data page */ | 208 | struct buffer_data_page *page; /* Actual data page */ |
210 | }; | 209 | }; |
211 | 210 | ||
211 | static void rb_init_page(struct buffer_data_page *bpage) | ||
212 | { | ||
213 | local_set(&bpage->commit, 0); | ||
214 | } | ||
215 | |||
212 | /* | 216 | /* |
213 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 217 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
214 | * this issue out. | 218 | * this issue out. |
@@ -230,7 +234,7 @@ static inline int test_time_stamp(u64 delta) | |||
230 | return 0; | 234 | return 0; |
231 | } | 235 | } |
232 | 236 | ||
233 | #define BUF_PAGE_SIZE PAGE_SIZE | 237 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) |
234 | 238 | ||
235 | /* | 239 | /* |
236 | * head_page == tail_page && head == tail then buffer is empty. | 240 | * head_page == tail_page && head == tail then buffer is empty. |
@@ -294,19 +298,19 @@ struct ring_buffer_iter { | |||
294 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 298 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
295 | { | 299 | { |
296 | struct list_head *head = &cpu_buffer->pages; | 300 | struct list_head *head = &cpu_buffer->pages; |
297 | struct buffer_page *page, *tmp; | 301 | struct buffer_page *bpage, *tmp; |
298 | 302 | ||
299 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) | 303 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) |
300 | return -1; | 304 | return -1; |
301 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) | 305 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) |
302 | return -1; | 306 | return -1; |
303 | 307 | ||
304 | list_for_each_entry_safe(page, tmp, head, list) { | 308 | list_for_each_entry_safe(bpage, tmp, head, list) { |
305 | if (RB_WARN_ON(cpu_buffer, | 309 | if (RB_WARN_ON(cpu_buffer, |
306 | page->list.next->prev != &page->list)) | 310 | bpage->list.next->prev != &bpage->list)) |
307 | return -1; | 311 | return -1; |
308 | if (RB_WARN_ON(cpu_buffer, | 312 | if (RB_WARN_ON(cpu_buffer, |
309 | page->list.prev->next != &page->list)) | 313 | bpage->list.prev->next != &bpage->list)) |
310 | return -1; | 314 | return -1; |
311 | } | 315 | } |
312 | 316 | ||
@@ -317,22 +321,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
317 | unsigned nr_pages) | 321 | unsigned nr_pages) |
318 | { | 322 | { |
319 | struct list_head *head = &cpu_buffer->pages; | 323 | struct list_head *head = &cpu_buffer->pages; |
320 | struct buffer_page *page, *tmp; | 324 | struct buffer_page *bpage, *tmp; |
321 | unsigned long addr; | 325 | unsigned long addr; |
322 | LIST_HEAD(pages); | 326 | LIST_HEAD(pages); |
323 | unsigned i; | 327 | unsigned i; |
324 | 328 | ||
325 | for (i = 0; i < nr_pages; i++) { | 329 | for (i = 0; i < nr_pages; i++) { |
326 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 330 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
327 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); | 331 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); |
328 | if (!page) | 332 | if (!bpage) |
329 | goto free_pages; | 333 | goto free_pages; |
330 | list_add(&page->list, &pages); | 334 | list_add(&bpage->list, &pages); |
331 | 335 | ||
332 | addr = __get_free_page(GFP_KERNEL); | 336 | addr = __get_free_page(GFP_KERNEL); |
333 | if (!addr) | 337 | if (!addr) |
334 | goto free_pages; | 338 | goto free_pages; |
335 | page->page = (void *)addr; | 339 | bpage->page = (void *)addr; |
340 | rb_init_page(bpage->page); | ||
336 | } | 341 | } |
337 | 342 | ||
338 | list_splice(&pages, head); | 343 | list_splice(&pages, head); |
@@ -342,9 +347,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
342 | return 0; | 347 | return 0; |
343 | 348 | ||
344 | free_pages: | 349 | free_pages: |
345 | list_for_each_entry_safe(page, tmp, &pages, list) { | 350 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
346 | list_del_init(&page->list); | 351 | list_del_init(&bpage->list); |
347 | free_buffer_page(page); | 352 | free_buffer_page(bpage); |
348 | } | 353 | } |
349 | return -ENOMEM; | 354 | return -ENOMEM; |
350 | } | 355 | } |
@@ -353,7 +358,7 @@ static struct ring_buffer_per_cpu * | |||
353 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | 358 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) |
354 | { | 359 | { |
355 | struct ring_buffer_per_cpu *cpu_buffer; | 360 | struct ring_buffer_per_cpu *cpu_buffer; |
356 | struct buffer_page *page; | 361 | struct buffer_page *bpage; |
357 | unsigned long addr; | 362 | unsigned long addr; |
358 | int ret; | 363 | int ret; |
359 | 364 | ||
@@ -368,16 +373,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
368 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 373 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
369 | INIT_LIST_HEAD(&cpu_buffer->pages); | 374 | INIT_LIST_HEAD(&cpu_buffer->pages); |
370 | 375 | ||
371 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 376 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
372 | GFP_KERNEL, cpu_to_node(cpu)); | 377 | GFP_KERNEL, cpu_to_node(cpu)); |
373 | if (!page) | 378 | if (!bpage) |
374 | goto fail_free_buffer; | 379 | goto fail_free_buffer; |
375 | 380 | ||
376 | cpu_buffer->reader_page = page; | 381 | cpu_buffer->reader_page = bpage; |
377 | addr = __get_free_page(GFP_KERNEL); | 382 | addr = __get_free_page(GFP_KERNEL); |
378 | if (!addr) | 383 | if (!addr) |
379 | goto fail_free_reader; | 384 | goto fail_free_reader; |
380 | page->page = (void *)addr; | 385 | bpage->page = (void *)addr; |
386 | rb_init_page(bpage->page); | ||
381 | 387 | ||
382 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 388 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
383 | 389 | ||
@@ -402,14 +408,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
402 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | 408 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) |
403 | { | 409 | { |
404 | struct list_head *head = &cpu_buffer->pages; | 410 | struct list_head *head = &cpu_buffer->pages; |
405 | struct buffer_page *page, *tmp; | 411 | struct buffer_page *bpage, *tmp; |
406 | 412 | ||
407 | list_del_init(&cpu_buffer->reader_page->list); | 413 | list_del_init(&cpu_buffer->reader_page->list); |
408 | free_buffer_page(cpu_buffer->reader_page); | 414 | free_buffer_page(cpu_buffer->reader_page); |
409 | 415 | ||
410 | list_for_each_entry_safe(page, tmp, head, list) { | 416 | list_for_each_entry_safe(bpage, tmp, head, list) { |
411 | list_del_init(&page->list); | 417 | list_del_init(&bpage->list); |
412 | free_buffer_page(page); | 418 | free_buffer_page(bpage); |
413 | } | 419 | } |
414 | kfree(cpu_buffer); | 420 | kfree(cpu_buffer); |
415 | } | 421 | } |
@@ -506,7 +512,7 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | |||
506 | static void | 512 | static void |
507 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 513 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) |
508 | { | 514 | { |
509 | struct buffer_page *page; | 515 | struct buffer_page *bpage; |
510 | struct list_head *p; | 516 | struct list_head *p; |
511 | unsigned i; | 517 | unsigned i; |
512 | 518 | ||
@@ -517,9 +523,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
517 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) | 523 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
518 | return; | 524 | return; |
519 | p = cpu_buffer->pages.next; | 525 | p = cpu_buffer->pages.next; |
520 | page = list_entry(p, struct buffer_page, list); | 526 | bpage = list_entry(p, struct buffer_page, list); |
521 | list_del_init(&page->list); | 527 | list_del_init(&bpage->list); |
522 | free_buffer_page(page); | 528 | free_buffer_page(bpage); |
523 | } | 529 | } |
524 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) | 530 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
525 | return; | 531 | return; |
@@ -536,7 +542,7 @@ static void | |||
536 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | 542 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, |
537 | struct list_head *pages, unsigned nr_pages) | 543 | struct list_head *pages, unsigned nr_pages) |
538 | { | 544 | { |
539 | struct buffer_page *page; | 545 | struct buffer_page *bpage; |
540 | struct list_head *p; | 546 | struct list_head *p; |
541 | unsigned i; | 547 | unsigned i; |
542 | 548 | ||
@@ -547,9 +553,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
547 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) | 553 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
548 | return; | 554 | return; |
549 | p = pages->next; | 555 | p = pages->next; |
550 | page = list_entry(p, struct buffer_page, list); | 556 | bpage = list_entry(p, struct buffer_page, list); |
551 | list_del_init(&page->list); | 557 | list_del_init(&bpage->list); |
552 | list_add_tail(&page->list, &cpu_buffer->pages); | 558 | list_add_tail(&bpage->list, &cpu_buffer->pages); |
553 | } | 559 | } |
554 | rb_reset_cpu(cpu_buffer); | 560 | rb_reset_cpu(cpu_buffer); |
555 | 561 | ||
@@ -576,7 +582,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
576 | { | 582 | { |
577 | struct ring_buffer_per_cpu *cpu_buffer; | 583 | struct ring_buffer_per_cpu *cpu_buffer; |
578 | unsigned nr_pages, rm_pages, new_pages; | 584 | unsigned nr_pages, rm_pages, new_pages; |
579 | struct buffer_page *page, *tmp; | 585 | struct buffer_page *bpage, *tmp; |
580 | unsigned long buffer_size; | 586 | unsigned long buffer_size; |
581 | unsigned long addr; | 587 | unsigned long addr; |
582 | LIST_HEAD(pages); | 588 | LIST_HEAD(pages); |
@@ -637,16 +643,17 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
637 | 643 | ||
638 | for_each_buffer_cpu(buffer, cpu) { | 644 | for_each_buffer_cpu(buffer, cpu) { |
639 | for (i = 0; i < new_pages; i++) { | 645 | for (i = 0; i < new_pages; i++) { |
640 | page = kzalloc_node(ALIGN(sizeof(*page), | 646 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), |
641 | cache_line_size()), | 647 | cache_line_size()), |
642 | GFP_KERNEL, cpu_to_node(cpu)); | 648 | GFP_KERNEL, cpu_to_node(cpu)); |
643 | if (!page) | 649 | if (!bpage) |
644 | goto free_pages; | 650 | goto free_pages; |
645 | list_add(&page->list, &pages); | 651 | list_add(&bpage->list, &pages); |
646 | addr = __get_free_page(GFP_KERNEL); | 652 | addr = __get_free_page(GFP_KERNEL); |
647 | if (!addr) | 653 | if (!addr) |
648 | goto free_pages; | 654 | goto free_pages; |
649 | page->page = (void *)addr; | 655 | bpage->page = (void *)addr; |
656 | rb_init_page(bpage->page); | ||
650 | } | 657 | } |
651 | } | 658 | } |
652 | 659 | ||
@@ -667,9 +674,9 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
667 | return size; | 674 | return size; |
668 | 675 | ||
669 | free_pages: | 676 | free_pages: |
670 | list_for_each_entry_safe(page, tmp, &pages, list) { | 677 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
671 | list_del_init(&page->list); | 678 | list_del_init(&bpage->list); |
672 | free_buffer_page(page); | 679 | free_buffer_page(bpage); |
673 | } | 680 | } |
674 | mutex_unlock(&buffer->mutex); | 681 | mutex_unlock(&buffer->mutex); |
675 | return -ENOMEM; | 682 | return -ENOMEM; |
@@ -680,9 +687,15 @@ static inline int rb_null_event(struct ring_buffer_event *event) | |||
680 | return event->type == RINGBUF_TYPE_PADDING; | 687 | return event->type == RINGBUF_TYPE_PADDING; |
681 | } | 688 | } |
682 | 689 | ||
683 | static inline void *__rb_page_index(struct buffer_page *page, unsigned index) | 690 | static inline void * |
691 | __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) | ||
684 | { | 692 | { |
685 | return page->page + index; | 693 | return bpage->data + index; |
694 | } | ||
695 | |||
696 | static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) | ||
697 | { | ||
698 | return bpage->page->data + index; | ||
686 | } | 699 | } |
687 | 700 | ||
688 | static inline struct ring_buffer_event * | 701 | static inline struct ring_buffer_event * |
@@ -712,7 +725,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage) | |||
712 | 725 | ||
713 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 726 | static inline unsigned rb_page_commit(struct buffer_page *bpage) |
714 | { | 727 | { |
715 | return local_read(&bpage->commit); | 728 | return local_read(&bpage->page->commit); |
716 | } | 729 | } |
717 | 730 | ||
718 | /* Size is determined by what has been commited */ | 731 | /* Size is determined by what has been commited */ |
@@ -758,14 +771,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | |||
758 | } | 771 | } |
759 | 772 | ||
760 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, | 773 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, |
761 | struct buffer_page **page) | 774 | struct buffer_page **bpage) |
762 | { | 775 | { |
763 | struct list_head *p = (*page)->list.next; | 776 | struct list_head *p = (*bpage)->list.next; |
764 | 777 | ||
765 | if (p == &cpu_buffer->pages) | 778 | if (p == &cpu_buffer->pages) |
766 | p = p->next; | 779 | p = p->next; |
767 | 780 | ||
768 | *page = list_entry(p, struct buffer_page, list); | 781 | *bpage = list_entry(p, struct buffer_page, list); |
769 | } | 782 | } |
770 | 783 | ||
771 | static inline unsigned | 784 | static inline unsigned |
@@ -804,14 +817,15 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
804 | if (RB_WARN_ON(cpu_buffer, | 817 | if (RB_WARN_ON(cpu_buffer, |
805 | cpu_buffer->commit_page == cpu_buffer->tail_page)) | 818 | cpu_buffer->commit_page == cpu_buffer->tail_page)) |
806 | return; | 819 | return; |
807 | cpu_buffer->commit_page->commit = | 820 | cpu_buffer->commit_page->page->commit = |
808 | cpu_buffer->commit_page->write; | 821 | cpu_buffer->commit_page->write; |
809 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 822 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
810 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 823 | cpu_buffer->write_stamp = |
824 | cpu_buffer->commit_page->page->time_stamp; | ||
811 | } | 825 | } |
812 | 826 | ||
813 | /* Now set the commit to the event's index */ | 827 | /* Now set the commit to the event's index */ |
814 | local_set(&cpu_buffer->commit_page->commit, index); | 828 | local_set(&cpu_buffer->commit_page->page->commit, index); |
815 | } | 829 | } |
816 | 830 | ||
817 | static inline void | 831 | static inline void |
@@ -826,16 +840,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
826 | * assign the commit to the tail. | 840 | * assign the commit to the tail. |
827 | */ | 841 | */ |
828 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 842 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
829 | cpu_buffer->commit_page->commit = | 843 | cpu_buffer->commit_page->page->commit = |
830 | cpu_buffer->commit_page->write; | 844 | cpu_buffer->commit_page->write; |
831 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 845 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
832 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 846 | cpu_buffer->write_stamp = |
847 | cpu_buffer->commit_page->page->time_stamp; | ||
833 | /* add barrier to keep gcc from optimizing too much */ | 848 | /* add barrier to keep gcc from optimizing too much */ |
834 | barrier(); | 849 | barrier(); |
835 | } | 850 | } |
836 | while (rb_commit_index(cpu_buffer) != | 851 | while (rb_commit_index(cpu_buffer) != |
837 | rb_page_write(cpu_buffer->commit_page)) { | 852 | rb_page_write(cpu_buffer->commit_page)) { |
838 | cpu_buffer->commit_page->commit = | 853 | cpu_buffer->commit_page->page->commit = |
839 | cpu_buffer->commit_page->write; | 854 | cpu_buffer->commit_page->write; |
840 | barrier(); | 855 | barrier(); |
841 | } | 856 | } |
@@ -843,7 +858,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
843 | 858 | ||
844 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 859 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
845 | { | 860 | { |
846 | cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; | 861 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; |
847 | cpu_buffer->reader_page->read = 0; | 862 | cpu_buffer->reader_page->read = 0; |
848 | } | 863 | } |
849 | 864 | ||
@@ -862,7 +877,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter) | |||
862 | else | 877 | else |
863 | rb_inc_page(cpu_buffer, &iter->head_page); | 878 | rb_inc_page(cpu_buffer, &iter->head_page); |
864 | 879 | ||
865 | iter->read_stamp = iter->head_page->time_stamp; | 880 | iter->read_stamp = iter->head_page->page->time_stamp; |
866 | iter->head = 0; | 881 | iter->head = 0; |
867 | } | 882 | } |
868 | 883 | ||
@@ -998,12 +1013,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
998 | */ | 1013 | */ |
999 | if (tail_page == cpu_buffer->tail_page) { | 1014 | if (tail_page == cpu_buffer->tail_page) { |
1000 | local_set(&next_page->write, 0); | 1015 | local_set(&next_page->write, 0); |
1001 | local_set(&next_page->commit, 0); | 1016 | local_set(&next_page->page->commit, 0); |
1002 | cpu_buffer->tail_page = next_page; | 1017 | cpu_buffer->tail_page = next_page; |
1003 | 1018 | ||
1004 | /* reread the time stamp */ | 1019 | /* reread the time stamp */ |
1005 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1020 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1006 | cpu_buffer->tail_page->time_stamp = *ts; | 1021 | cpu_buffer->tail_page->page->time_stamp = *ts; |
1007 | } | 1022 | } |
1008 | 1023 | ||
1009 | /* | 1024 | /* |
@@ -1048,7 +1063,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1048 | * this page's time stamp. | 1063 | * this page's time stamp. |
1049 | */ | 1064 | */ |
1050 | if (!tail && rb_is_commit(cpu_buffer, event)) | 1065 | if (!tail && rb_is_commit(cpu_buffer, event)) |
1051 | cpu_buffer->commit_page->time_stamp = *ts; | 1066 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1052 | 1067 | ||
1053 | return event; | 1068 | return event; |
1054 | 1069 | ||
@@ -1099,7 +1114,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
1099 | event->time_delta = *delta & TS_MASK; | 1114 | event->time_delta = *delta & TS_MASK; |
1100 | event->array[0] = *delta >> TS_SHIFT; | 1115 | event->array[0] = *delta >> TS_SHIFT; |
1101 | } else { | 1116 | } else { |
1102 | cpu_buffer->commit_page->time_stamp = *ts; | 1117 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1103 | event->time_delta = 0; | 1118 | event->time_delta = 0; |
1104 | event->array[0] = 0; | 1119 | event->array[0] = 0; |
1105 | } | 1120 | } |
@@ -1552,7 +1567,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) | |||
1552 | if (iter->head) | 1567 | if (iter->head) |
1553 | iter->read_stamp = cpu_buffer->read_stamp; | 1568 | iter->read_stamp = cpu_buffer->read_stamp; |
1554 | else | 1569 | else |
1555 | iter->read_stamp = iter->head_page->time_stamp; | 1570 | iter->read_stamp = iter->head_page->page->time_stamp; |
1556 | } | 1571 | } |
1557 | 1572 | ||
1558 | /** | 1573 | /** |
@@ -1696,7 +1711,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1696 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 1711 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
1697 | 1712 | ||
1698 | local_set(&cpu_buffer->reader_page->write, 0); | 1713 | local_set(&cpu_buffer->reader_page->write, 0); |
1699 | local_set(&cpu_buffer->reader_page->commit, 0); | 1714 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
1700 | 1715 | ||
1701 | /* Make the reader page now replace the head */ | 1716 | /* Make the reader page now replace the head */ |
1702 | reader->list.prev->next = &cpu_buffer->reader_page->list; | 1717 | reader->list.prev->next = &cpu_buffer->reader_page->list; |
@@ -2088,7 +2103,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2088 | cpu_buffer->head_page | 2103 | cpu_buffer->head_page |
2089 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); | 2104 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
2090 | local_set(&cpu_buffer->head_page->write, 0); | 2105 | local_set(&cpu_buffer->head_page->write, 0); |
2091 | local_set(&cpu_buffer->head_page->commit, 0); | 2106 | local_set(&cpu_buffer->head_page->page->commit, 0); |
2092 | 2107 | ||
2093 | cpu_buffer->head_page->read = 0; | 2108 | cpu_buffer->head_page->read = 0; |
2094 | 2109 | ||
@@ -2097,7 +2112,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2097 | 2112 | ||
2098 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 2113 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
2099 | local_set(&cpu_buffer->reader_page->write, 0); | 2114 | local_set(&cpu_buffer->reader_page->write, 0); |
2100 | local_set(&cpu_buffer->reader_page->commit, 0); | 2115 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2101 | cpu_buffer->reader_page->read = 0; | 2116 | cpu_buffer->reader_page->read = 0; |
2102 | 2117 | ||
2103 | cpu_buffer->overrun = 0; | 2118 | cpu_buffer->overrun = 0; |
@@ -2223,6 +2238,166 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2223 | return 0; | 2238 | return 0; |
2224 | } | 2239 | } |
2225 | 2240 | ||
2241 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | ||
2242 | struct buffer_data_page *bpage) | ||
2243 | { | ||
2244 | struct ring_buffer_event *event; | ||
2245 | unsigned long head; | ||
2246 | |||
2247 | __raw_spin_lock(&cpu_buffer->lock); | ||
2248 | for (head = 0; head < local_read(&bpage->commit); | ||
2249 | head += rb_event_length(event)) { | ||
2250 | |||
2251 | event = __rb_data_page_index(bpage, head); | ||
2252 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) | ||
2253 | return; | ||
2254 | /* Only count data entries */ | ||
2255 | if (event->type != RINGBUF_TYPE_DATA) | ||
2256 | continue; | ||
2257 | cpu_buffer->entries--; | ||
2258 | } | ||
2259 | __raw_spin_unlock(&cpu_buffer->lock); | ||
2260 | } | ||
2261 | |||
2262 | /** | ||
2263 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | ||
2264 | * @buffer: the buffer to allocate for. | ||
2265 | * | ||
2266 | * This function is used in conjunction with ring_buffer_read_page. | ||
2267 | * When reading a full page from the ring buffer, these functions | ||
2268 | * can be used to speed up the process. The calling function should | ||
2269 | * allocate a few pages first with this function. Then when it | ||
2270 | * needs to get pages from the ring buffer, it passes the result | ||
2271 | * of this function into ring_buffer_read_page, which will swap | ||
2272 | * the page that was allocated, with the read page of the buffer. | ||
2273 | * | ||
2274 | * Returns: | ||
2275 | * The page allocated, or NULL on error. | ||
2276 | */ | ||
2277 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | ||
2278 | { | ||
2279 | unsigned long addr; | ||
2280 | struct buffer_data_page *bpage; | ||
2281 | |||
2282 | addr = __get_free_page(GFP_KERNEL); | ||
2283 | if (!addr) | ||
2284 | return NULL; | ||
2285 | |||
2286 | bpage = (void *)addr; | ||
2287 | |||
2288 | return bpage; | ||
2289 | } | ||
2290 | |||
2291 | /** | ||
2292 | * ring_buffer_free_read_page - free an allocated read page | ||
2293 | * @buffer: the buffer the page was allocate for | ||
2294 | * @data: the page to free | ||
2295 | * | ||
2296 | * Free a page allocated from ring_buffer_alloc_read_page. | ||
2297 | */ | ||
2298 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | ||
2299 | { | ||
2300 | free_page((unsigned long)data); | ||
2301 | } | ||
2302 | |||
2303 | /** | ||
2304 | * ring_buffer_read_page - extract a page from the ring buffer | ||
2305 | * @buffer: buffer to extract from | ||
2306 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page | ||
2307 | * @cpu: the cpu of the buffer to extract | ||
2308 | * @full: should the extraction only happen when the page is full. | ||
2309 | * | ||
2310 | * This function will pull out a page from the ring buffer and consume it. | ||
2311 | * @data_page must be the address of the variable that was returned | ||
2312 | * from ring_buffer_alloc_read_page. This is because the page might be used | ||
2313 | * to swap with a page in the ring buffer. | ||
2314 | * | ||
2315 | * for example: | ||
2316 | * rpage = ring_buffer_alloc_page(buffer); | ||
2317 | * if (!rpage) | ||
2318 | * return error; | ||
2319 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); | ||
2320 | * if (ret) | ||
2321 | * process_page(rpage); | ||
2322 | * | ||
2323 | * When @full is set, the function will not return true unless | ||
2324 | * the writer is off the reader page. | ||
2325 | * | ||
2326 | * Note: it is up to the calling functions to handle sleeps and wakeups. | ||
2327 | * The ring buffer can be used anywhere in the kernel and can not | ||
2328 | * blindly call wake_up. The layer that uses the ring buffer must be | ||
2329 | * responsible for that. | ||
2330 | * | ||
2331 | * Returns: | ||
2332 | * 1 if data has been transferred | ||
2333 | * 0 if no data has been transferred. | ||
2334 | */ | ||
2335 | int ring_buffer_read_page(struct ring_buffer *buffer, | ||
2336 | void **data_page, int cpu, int full) | ||
2337 | { | ||
2338 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | ||
2339 | struct ring_buffer_event *event; | ||
2340 | struct buffer_data_page *bpage; | ||
2341 | unsigned long flags; | ||
2342 | int ret = 0; | ||
2343 | |||
2344 | if (!data_page) | ||
2345 | return 0; | ||
2346 | |||
2347 | bpage = *data_page; | ||
2348 | if (!bpage) | ||
2349 | return 0; | ||
2350 | |||
2351 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
2352 | |||
2353 | /* | ||
2354 | * rb_buffer_peek will get the next ring buffer if | ||
2355 | * the current reader page is empty. | ||
2356 | */ | ||
2357 | event = rb_buffer_peek(buffer, cpu, NULL); | ||
2358 | if (!event) | ||
2359 | goto out; | ||
2360 | |||
2361 | /* check for data */ | ||
2362 | if (!local_read(&cpu_buffer->reader_page->page->commit)) | ||
2363 | goto out; | ||
2364 | /* | ||
2365 | * If the writer is already off of the read page, then simply | ||
2366 | * switch the read page with the given page. Otherwise | ||
2367 | * we need to copy the data from the reader to the writer. | ||
2368 | */ | ||
2369 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { | ||
2370 | unsigned int read = cpu_buffer->reader_page->read; | ||
2371 | |||
2372 | if (full) | ||
2373 | goto out; | ||
2374 | /* The writer is still on the reader page, we must copy */ | ||
2375 | bpage = cpu_buffer->reader_page->page; | ||
2376 | memcpy(bpage->data, | ||
2377 | cpu_buffer->reader_page->page->data + read, | ||
2378 | local_read(&bpage->commit) - read); | ||
2379 | |||
2380 | /* consume what was read */ | ||
2381 | cpu_buffer->reader_page += read; | ||
2382 | |||
2383 | } else { | ||
2384 | /* swap the pages */ | ||
2385 | rb_init_page(bpage); | ||
2386 | bpage = cpu_buffer->reader_page->page; | ||
2387 | cpu_buffer->reader_page->page = *data_page; | ||
2388 | cpu_buffer->reader_page->read = 0; | ||
2389 | *data_page = bpage; | ||
2390 | } | ||
2391 | ret = 1; | ||
2392 | |||
2393 | /* update the entry counter */ | ||
2394 | rb_remove_entries(cpu_buffer, bpage); | ||
2395 | out: | ||
2396 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2397 | |||
2398 | return ret; | ||
2399 | } | ||
2400 | |||
2226 | static ssize_t | 2401 | static ssize_t |
2227 | rb_simple_read(struct file *filp, char __user *ubuf, | 2402 | rb_simple_read(struct file *filp, char __user *ubuf, |
2228 | size_t cnt, loff_t *ppos) | 2403 | size_t cnt, loff_t *ppos) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a45b59e53fbc..8ebe0070c47a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -44,6 +44,15 @@ | |||
44 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 44 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
45 | unsigned long __read_mostly tracing_thresh; | 45 | unsigned long __read_mostly tracing_thresh; |
46 | 46 | ||
47 | /* | ||
48 | * We need to change this state when a selftest is running. | ||
49 | * A selftest will lurk into the ring-buffer to count the | ||
50 | * entries inserted during the selftest although some concurrent | ||
51 | * insertions into the ring-buffer such as ftrace_printk could occurred | ||
52 | * at the same time, giving false positive or negative results. | ||
53 | */ | ||
54 | static bool __read_mostly tracing_selftest_running; | ||
55 | |||
47 | /* For tracers that don't implement custom flags */ | 56 | /* For tracers that don't implement custom flags */ |
48 | static struct tracer_opt dummy_tracer_opt[] = { | 57 | static struct tracer_opt dummy_tracer_opt[] = { |
49 | { } | 58 | { } |
@@ -566,6 +575,8 @@ int register_tracer(struct tracer *type) | |||
566 | unlock_kernel(); | 575 | unlock_kernel(); |
567 | mutex_lock(&trace_types_lock); | 576 | mutex_lock(&trace_types_lock); |
568 | 577 | ||
578 | tracing_selftest_running = true; | ||
579 | |||
569 | for (t = trace_types; t; t = t->next) { | 580 | for (t = trace_types; t; t = t->next) { |
570 | if (strcmp(type->name, t->name) == 0) { | 581 | if (strcmp(type->name, t->name) == 0) { |
571 | /* already found */ | 582 | /* already found */ |
@@ -589,6 +600,7 @@ int register_tracer(struct tracer *type) | |||
589 | struct tracer *saved_tracer = current_trace; | 600 | struct tracer *saved_tracer = current_trace; |
590 | struct trace_array *tr = &global_trace; | 601 | struct trace_array *tr = &global_trace; |
591 | int i; | 602 | int i; |
603 | |||
592 | /* | 604 | /* |
593 | * Run a selftest on this tracer. | 605 | * Run a selftest on this tracer. |
594 | * Here we reset the trace buffer, and set the current | 606 | * Here we reset the trace buffer, and set the current |
@@ -624,6 +636,7 @@ int register_tracer(struct tracer *type) | |||
624 | max_tracer_type_len = len; | 636 | max_tracer_type_len = len; |
625 | 637 | ||
626 | out: | 638 | out: |
639 | tracing_selftest_running = false; | ||
627 | mutex_unlock(&trace_types_lock); | 640 | mutex_unlock(&trace_types_lock); |
628 | lock_kernel(); | 641 | lock_kernel(); |
629 | 642 | ||
@@ -804,7 +817,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
804 | spin_unlock(&trace_cmdline_lock); | 817 | spin_unlock(&trace_cmdline_lock); |
805 | } | 818 | } |
806 | 819 | ||
807 | static char *trace_find_cmdline(int pid) | 820 | char *trace_find_cmdline(int pid) |
808 | { | 821 | { |
809 | char *cmdline = "<...>"; | 822 | char *cmdline = "<...>"; |
810 | unsigned map; | 823 | unsigned map; |
@@ -878,15 +891,39 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
878 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 891 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
879 | } | 892 | } |
880 | 893 | ||
881 | #ifdef CONFIG_FUNCTION_RET_TRACER | 894 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
882 | static void __trace_function_return(struct trace_array *tr, | 895 | static void __trace_graph_entry(struct trace_array *tr, |
896 | struct trace_array_cpu *data, | ||
897 | struct ftrace_graph_ent *trace, | ||
898 | unsigned long flags, | ||
899 | int pc) | ||
900 | { | ||
901 | struct ring_buffer_event *event; | ||
902 | struct ftrace_graph_ent_entry *entry; | ||
903 | unsigned long irq_flags; | ||
904 | |||
905 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
906 | return; | ||
907 | |||
908 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
909 | &irq_flags); | ||
910 | if (!event) | ||
911 | return; | ||
912 | entry = ring_buffer_event_data(event); | ||
913 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
914 | entry->ent.type = TRACE_GRAPH_ENT; | ||
915 | entry->graph_ent = *trace; | ||
916 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
917 | } | ||
918 | |||
919 | static void __trace_graph_return(struct trace_array *tr, | ||
883 | struct trace_array_cpu *data, | 920 | struct trace_array_cpu *data, |
884 | struct ftrace_retfunc *trace, | 921 | struct ftrace_graph_ret *trace, |
885 | unsigned long flags, | 922 | unsigned long flags, |
886 | int pc) | 923 | int pc) |
887 | { | 924 | { |
888 | struct ring_buffer_event *event; | 925 | struct ring_buffer_event *event; |
889 | struct ftrace_ret_entry *entry; | 926 | struct ftrace_graph_ret_entry *entry; |
890 | unsigned long irq_flags; | 927 | unsigned long irq_flags; |
891 | 928 | ||
892 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 929 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
@@ -898,12 +935,8 @@ static void __trace_function_return(struct trace_array *tr, | |||
898 | return; | 935 | return; |
899 | entry = ring_buffer_event_data(event); | 936 | entry = ring_buffer_event_data(event); |
900 | tracing_generic_entry_update(&entry->ent, flags, pc); | 937 | tracing_generic_entry_update(&entry->ent, flags, pc); |
901 | entry->ent.type = TRACE_FN_RET; | 938 | entry->ent.type = TRACE_GRAPH_RET; |
902 | entry->ip = trace->func; | 939 | entry->ret = *trace; |
903 | entry->parent_ip = trace->ret; | ||
904 | entry->rettime = trace->rettime; | ||
905 | entry->calltime = trace->calltime; | ||
906 | entry->overrun = trace->overrun; | ||
907 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 940 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); |
908 | } | 941 | } |
909 | #endif | 942 | #endif |
@@ -963,6 +996,7 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
963 | struct trace_array_cpu *data, | 996 | struct trace_array_cpu *data, |
964 | unsigned long flags, int pc) | 997 | unsigned long flags, int pc) |
965 | { | 998 | { |
999 | #ifdef CONFIG_STACKTRACE | ||
966 | struct ring_buffer_event *event; | 1000 | struct ring_buffer_event *event; |
967 | struct userstack_entry *entry; | 1001 | struct userstack_entry *entry; |
968 | struct stack_trace trace; | 1002 | struct stack_trace trace; |
@@ -988,6 +1022,7 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
988 | 1022 | ||
989 | save_stack_trace_user(&trace); | 1023 | save_stack_trace_user(&trace); |
990 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1024 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
1025 | #endif | ||
991 | } | 1026 | } |
992 | 1027 | ||
993 | void __trace_userstack(struct trace_array *tr, | 1028 | void __trace_userstack(struct trace_array *tr, |
@@ -1177,8 +1212,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1177 | local_irq_restore(flags); | 1212 | local_irq_restore(flags); |
1178 | } | 1213 | } |
1179 | 1214 | ||
1180 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1215 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1181 | void trace_function_return(struct ftrace_retfunc *trace) | 1216 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
1182 | { | 1217 | { |
1183 | struct trace_array *tr = &global_trace; | 1218 | struct trace_array *tr = &global_trace; |
1184 | struct trace_array_cpu *data; | 1219 | struct trace_array_cpu *data; |
@@ -1187,18 +1222,52 @@ void trace_function_return(struct ftrace_retfunc *trace) | |||
1187 | int cpu; | 1222 | int cpu; |
1188 | int pc; | 1223 | int pc; |
1189 | 1224 | ||
1190 | raw_local_irq_save(flags); | 1225 | if (!ftrace_trace_task(current)) |
1226 | return 0; | ||
1227 | |||
1228 | if (!ftrace_graph_addr(trace->func)) | ||
1229 | return 0; | ||
1230 | |||
1231 | local_irq_save(flags); | ||
1191 | cpu = raw_smp_processor_id(); | 1232 | cpu = raw_smp_processor_id(); |
1192 | data = tr->data[cpu]; | 1233 | data = tr->data[cpu]; |
1193 | disabled = atomic_inc_return(&data->disabled); | 1234 | disabled = atomic_inc_return(&data->disabled); |
1194 | if (likely(disabled == 1)) { | 1235 | if (likely(disabled == 1)) { |
1195 | pc = preempt_count(); | 1236 | pc = preempt_count(); |
1196 | __trace_function_return(tr, data, trace, flags, pc); | 1237 | __trace_graph_entry(tr, data, trace, flags, pc); |
1197 | } | 1238 | } |
1239 | /* Only do the atomic if it is not already set */ | ||
1240 | if (!test_tsk_trace_graph(current)) | ||
1241 | set_tsk_trace_graph(current); | ||
1198 | atomic_dec(&data->disabled); | 1242 | atomic_dec(&data->disabled); |
1199 | raw_local_irq_restore(flags); | 1243 | local_irq_restore(flags); |
1244 | |||
1245 | return 1; | ||
1200 | } | 1246 | } |
1201 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | 1247 | |
1248 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
1249 | { | ||
1250 | struct trace_array *tr = &global_trace; | ||
1251 | struct trace_array_cpu *data; | ||
1252 | unsigned long flags; | ||
1253 | long disabled; | ||
1254 | int cpu; | ||
1255 | int pc; | ||
1256 | |||
1257 | local_irq_save(flags); | ||
1258 | cpu = raw_smp_processor_id(); | ||
1259 | data = tr->data[cpu]; | ||
1260 | disabled = atomic_inc_return(&data->disabled); | ||
1261 | if (likely(disabled == 1)) { | ||
1262 | pc = preempt_count(); | ||
1263 | __trace_graph_return(tr, data, trace, flags, pc); | ||
1264 | } | ||
1265 | if (!trace->depth) | ||
1266 | clear_tsk_trace_graph(current); | ||
1267 | atomic_dec(&data->disabled); | ||
1268 | local_irq_restore(flags); | ||
1269 | } | ||
1270 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1202 | 1271 | ||
1203 | static struct ftrace_ops trace_ops __read_mostly = | 1272 | static struct ftrace_ops trace_ops __read_mostly = |
1204 | { | 1273 | { |
@@ -2000,9 +2069,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
2000 | trace_seq_print_cont(s, iter); | 2069 | trace_seq_print_cont(s, iter); |
2001 | break; | 2070 | break; |
2002 | } | 2071 | } |
2003 | case TRACE_FN_RET: { | 2072 | case TRACE_GRAPH_RET: { |
2004 | return print_return_function(iter); | 2073 | return print_graph_function(iter); |
2005 | break; | 2074 | } |
2075 | case TRACE_GRAPH_ENT: { | ||
2076 | return print_graph_function(iter); | ||
2006 | } | 2077 | } |
2007 | case TRACE_BRANCH: { | 2078 | case TRACE_BRANCH: { |
2008 | struct trace_branch *field; | 2079 | struct trace_branch *field; |
@@ -2298,7 +2369,9 @@ static int s_show(struct seq_file *m, void *v) | |||
2298 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | 2369 | seq_printf(m, "# tracer: %s\n", iter->trace->name); |
2299 | seq_puts(m, "#\n"); | 2370 | seq_puts(m, "#\n"); |
2300 | } | 2371 | } |
2301 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2372 | if (iter->trace && iter->trace->print_header) |
2373 | iter->trace->print_header(m); | ||
2374 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
2302 | /* print nothing if the buffers are empty */ | 2375 | /* print nothing if the buffers are empty */ |
2303 | if (trace_empty(iter)) | 2376 | if (trace_empty(iter)) |
2304 | return 0; | 2377 | return 0; |
@@ -2350,6 +2423,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
2350 | iter->trace = current_trace; | 2423 | iter->trace = current_trace; |
2351 | iter->pos = -1; | 2424 | iter->pos = -1; |
2352 | 2425 | ||
2426 | /* Notify the tracer early; before we stop tracing. */ | ||
2427 | if (iter->trace && iter->trace->open) | ||
2428 | iter->trace->open(iter); | ||
2429 | |||
2353 | /* Annotate start of buffers if we had overruns */ | 2430 | /* Annotate start of buffers if we had overruns */ |
2354 | if (ring_buffer_overruns(iter->tr->buffer)) | 2431 | if (ring_buffer_overruns(iter->tr->buffer)) |
2355 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 2432 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
@@ -2375,9 +2452,6 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
2375 | /* stop the trace while dumping */ | 2452 | /* stop the trace while dumping */ |
2376 | tracing_stop(); | 2453 | tracing_stop(); |
2377 | 2454 | ||
2378 | if (iter->trace && iter->trace->open) | ||
2379 | iter->trace->open(iter); | ||
2380 | |||
2381 | mutex_unlock(&trace_types_lock); | 2455 | mutex_unlock(&trace_types_lock); |
2382 | 2456 | ||
2383 | out: | 2457 | out: |
@@ -2597,7 +2671,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2597 | if (err) | 2671 | if (err) |
2598 | goto err_unlock; | 2672 | goto err_unlock; |
2599 | 2673 | ||
2600 | raw_local_irq_disable(); | 2674 | local_irq_disable(); |
2601 | __raw_spin_lock(&ftrace_max_lock); | 2675 | __raw_spin_lock(&ftrace_max_lock); |
2602 | for_each_tracing_cpu(cpu) { | 2676 | for_each_tracing_cpu(cpu) { |
2603 | /* | 2677 | /* |
@@ -2614,7 +2688,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2614 | } | 2688 | } |
2615 | } | 2689 | } |
2616 | __raw_spin_unlock(&ftrace_max_lock); | 2690 | __raw_spin_unlock(&ftrace_max_lock); |
2617 | raw_local_irq_enable(); | 2691 | local_irq_enable(); |
2618 | 2692 | ||
2619 | tracing_cpumask = tracing_cpumask_new; | 2693 | tracing_cpumask = tracing_cpumask_new; |
2620 | 2694 | ||
@@ -3285,7 +3359,7 @@ static int mark_printk(const char *fmt, ...) | |||
3285 | int ret; | 3359 | int ret; |
3286 | va_list args; | 3360 | va_list args; |
3287 | va_start(args, fmt); | 3361 | va_start(args, fmt); |
3288 | ret = trace_vprintk(0, fmt, args); | 3362 | ret = trace_vprintk(0, -1, fmt, args); |
3289 | va_end(args); | 3363 | va_end(args); |
3290 | return ret; | 3364 | return ret; |
3291 | } | 3365 | } |
@@ -3514,7 +3588,7 @@ static __init int tracer_init_debugfs(void) | |||
3514 | return 0; | 3588 | return 0; |
3515 | } | 3589 | } |
3516 | 3590 | ||
3517 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 3591 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
3518 | { | 3592 | { |
3519 | static DEFINE_SPINLOCK(trace_buf_lock); | 3593 | static DEFINE_SPINLOCK(trace_buf_lock); |
3520 | static char trace_buf[TRACE_BUF_SIZE]; | 3594 | static char trace_buf[TRACE_BUF_SIZE]; |
@@ -3522,11 +3596,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
3522 | struct ring_buffer_event *event; | 3596 | struct ring_buffer_event *event; |
3523 | struct trace_array *tr = &global_trace; | 3597 | struct trace_array *tr = &global_trace; |
3524 | struct trace_array_cpu *data; | 3598 | struct trace_array_cpu *data; |
3525 | struct print_entry *entry; | ||
3526 | unsigned long flags, irq_flags; | ||
3527 | int cpu, len = 0, size, pc; | 3599 | int cpu, len = 0, size, pc; |
3600 | struct print_entry *entry; | ||
3601 | unsigned long irq_flags; | ||
3528 | 3602 | ||
3529 | if (tracing_disabled) | 3603 | if (tracing_disabled || tracing_selftest_running) |
3530 | return 0; | 3604 | return 0; |
3531 | 3605 | ||
3532 | pc = preempt_count(); | 3606 | pc = preempt_count(); |
@@ -3537,7 +3611,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
3537 | if (unlikely(atomic_read(&data->disabled))) | 3611 | if (unlikely(atomic_read(&data->disabled))) |
3538 | goto out; | 3612 | goto out; |
3539 | 3613 | ||
3540 | spin_lock_irqsave(&trace_buf_lock, flags); | 3614 | pause_graph_tracing(); |
3615 | spin_lock_irqsave(&trace_buf_lock, irq_flags); | ||
3541 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 3616 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
3542 | 3617 | ||
3543 | len = min(len, TRACE_BUF_SIZE-1); | 3618 | len = min(len, TRACE_BUF_SIZE-1); |
@@ -3548,17 +3623,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
3548 | if (!event) | 3623 | if (!event) |
3549 | goto out_unlock; | 3624 | goto out_unlock; |
3550 | entry = ring_buffer_event_data(event); | 3625 | entry = ring_buffer_event_data(event); |
3551 | tracing_generic_entry_update(&entry->ent, flags, pc); | 3626 | tracing_generic_entry_update(&entry->ent, irq_flags, pc); |
3552 | entry->ent.type = TRACE_PRINT; | 3627 | entry->ent.type = TRACE_PRINT; |
3553 | entry->ip = ip; | 3628 | entry->ip = ip; |
3629 | entry->depth = depth; | ||
3554 | 3630 | ||
3555 | memcpy(&entry->buf, trace_buf, len); | 3631 | memcpy(&entry->buf, trace_buf, len); |
3556 | entry->buf[len] = 0; | 3632 | entry->buf[len] = 0; |
3557 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 3633 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
3558 | 3634 | ||
3559 | out_unlock: | 3635 | out_unlock: |
3560 | spin_unlock_irqrestore(&trace_buf_lock, flags); | 3636 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); |
3561 | 3637 | unpause_graph_tracing(); | |
3562 | out: | 3638 | out: |
3563 | preempt_enable_notrace(); | 3639 | preempt_enable_notrace(); |
3564 | 3640 | ||
@@ -3575,7 +3651,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) | |||
3575 | return 0; | 3651 | return 0; |
3576 | 3652 | ||
3577 | va_start(ap, fmt); | 3653 | va_start(ap, fmt); |
3578 | ret = trace_vprintk(ip, fmt, ap); | 3654 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); |
3579 | va_end(ap); | 3655 | va_end(ap); |
3580 | return ret; | 3656 | return ret; |
3581 | } | 3657 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 28c15c2ebc22..5ac697065a48 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -25,8 +25,11 @@ enum trace_type { | |||
25 | TRACE_BRANCH, | 25 | TRACE_BRANCH, |
26 | TRACE_BOOT_CALL, | 26 | TRACE_BOOT_CALL, |
27 | TRACE_BOOT_RET, | 27 | TRACE_BOOT_RET, |
28 | TRACE_FN_RET, | 28 | TRACE_GRAPH_RET, |
29 | TRACE_GRAPH_ENT, | ||
29 | TRACE_USER_STACK, | 30 | TRACE_USER_STACK, |
31 | TRACE_BTS, | ||
32 | TRACE_POWER, | ||
30 | 33 | ||
31 | __TRACE_LAST_TYPE | 34 | __TRACE_LAST_TYPE |
32 | }; | 35 | }; |
@@ -55,14 +58,16 @@ struct ftrace_entry { | |||
55 | unsigned long parent_ip; | 58 | unsigned long parent_ip; |
56 | }; | 59 | }; |
57 | 60 | ||
61 | /* Function call entry */ | ||
62 | struct ftrace_graph_ent_entry { | ||
63 | struct trace_entry ent; | ||
64 | struct ftrace_graph_ent graph_ent; | ||
65 | }; | ||
66 | |||
58 | /* Function return entry */ | 67 | /* Function return entry */ |
59 | struct ftrace_ret_entry { | 68 | struct ftrace_graph_ret_entry { |
60 | struct trace_entry ent; | 69 | struct trace_entry ent; |
61 | unsigned long ip; | 70 | struct ftrace_graph_ret ret; |
62 | unsigned long parent_ip; | ||
63 | unsigned long long calltime; | ||
64 | unsigned long long rettime; | ||
65 | unsigned long overrun; | ||
66 | }; | 71 | }; |
67 | extern struct tracer boot_tracer; | 72 | extern struct tracer boot_tracer; |
68 | 73 | ||
@@ -112,6 +117,7 @@ struct userstack_entry { | |||
112 | struct print_entry { | 117 | struct print_entry { |
113 | struct trace_entry ent; | 118 | struct trace_entry ent; |
114 | unsigned long ip; | 119 | unsigned long ip; |
120 | int depth; | ||
115 | char buf[]; | 121 | char buf[]; |
116 | }; | 122 | }; |
117 | 123 | ||
@@ -153,6 +159,17 @@ struct trace_branch { | |||
153 | char correct; | 159 | char correct; |
154 | }; | 160 | }; |
155 | 161 | ||
162 | struct bts_entry { | ||
163 | struct trace_entry ent; | ||
164 | unsigned long from; | ||
165 | unsigned long to; | ||
166 | }; | ||
167 | |||
168 | struct trace_power { | ||
169 | struct trace_entry ent; | ||
170 | struct power_trace state_data; | ||
171 | }; | ||
172 | |||
156 | /* | 173 | /* |
157 | * trace_flag_type is an enumeration that holds different | 174 | * trace_flag_type is an enumeration that holds different |
158 | * states when a trace occurs. These are: | 175 | * states when a trace occurs. These are: |
@@ -257,7 +274,12 @@ extern void __ftrace_bad_type(void); | |||
257 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ | 274 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
258 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | 275 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ |
259 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | 276 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
260 | IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ | 277 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
278 | TRACE_GRAPH_ENT); \ | ||
279 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | ||
280 | TRACE_GRAPH_RET); \ | ||
281 | IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ | ||
282 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | ||
261 | __ftrace_bad_type(); \ | 283 | __ftrace_bad_type(); \ |
262 | } while (0) | 284 | } while (0) |
263 | 285 | ||
@@ -311,6 +333,7 @@ struct tracer { | |||
311 | int (*selftest)(struct tracer *trace, | 333 | int (*selftest)(struct tracer *trace, |
312 | struct trace_array *tr); | 334 | struct trace_array *tr); |
313 | #endif | 335 | #endif |
336 | void (*print_header)(struct seq_file *m); | ||
314 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 337 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
315 | /* If you handled the flag setting, return 0 */ | 338 | /* If you handled the flag setting, return 0 */ |
316 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 339 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
@@ -388,8 +411,12 @@ void trace_function(struct trace_array *tr, | |||
388 | unsigned long ip, | 411 | unsigned long ip, |
389 | unsigned long parent_ip, | 412 | unsigned long parent_ip, |
390 | unsigned long flags, int pc); | 413 | unsigned long flags, int pc); |
391 | void | 414 | |
392 | trace_function_return(struct ftrace_retfunc *trace); | 415 | void trace_graph_return(struct ftrace_graph_ret *trace); |
416 | int trace_graph_entry(struct ftrace_graph_ent *trace); | ||
417 | void trace_bts(struct trace_array *tr, | ||
418 | unsigned long from, | ||
419 | unsigned long to); | ||
393 | 420 | ||
394 | void tracing_start_cmdline_record(void); | 421 | void tracing_start_cmdline_record(void); |
395 | void tracing_stop_cmdline_record(void); | 422 | void tracing_stop_cmdline_record(void); |
@@ -431,6 +458,7 @@ struct tracer_switch_ops { | |||
431 | struct tracer_switch_ops *next; | 458 | struct tracer_switch_ops *next; |
432 | }; | 459 | }; |
433 | 460 | ||
461 | char *trace_find_cmdline(int pid); | ||
434 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 462 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
435 | 463 | ||
436 | #ifdef CONFIG_DYNAMIC_FTRACE | 464 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -471,20 +499,63 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | |||
471 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 499 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
472 | size_t cnt); | 500 | size_t cnt); |
473 | extern long ns2usecs(cycle_t nsec); | 501 | extern long ns2usecs(cycle_t nsec); |
474 | extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); | 502 | extern int |
503 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); | ||
475 | 504 | ||
476 | extern unsigned long trace_flags; | 505 | extern unsigned long trace_flags; |
477 | 506 | ||
478 | /* Standard output formatting function used for function return traces */ | 507 | /* Standard output formatting function used for function return traces */ |
479 | #ifdef CONFIG_FUNCTION_RET_TRACER | 508 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
480 | extern enum print_line_t print_return_function(struct trace_iterator *iter); | 509 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); |
510 | |||
511 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
512 | /* TODO: make this variable */ | ||
513 | #define FTRACE_GRAPH_MAX_FUNCS 32 | ||
514 | extern int ftrace_graph_count; | ||
515 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | ||
516 | |||
517 | static inline int ftrace_graph_addr(unsigned long addr) | ||
518 | { | ||
519 | int i; | ||
520 | |||
521 | if (!ftrace_graph_count || test_tsk_trace_graph(current)) | ||
522 | return 1; | ||
523 | |||
524 | for (i = 0; i < ftrace_graph_count; i++) { | ||
525 | if (addr == ftrace_graph_funcs[i]) | ||
526 | return 1; | ||
527 | } | ||
528 | |||
529 | return 0; | ||
530 | } | ||
481 | #else | 531 | #else |
532 | static inline int ftrace_trace_addr(unsigned long addr) | ||
533 | { | ||
534 | return 1; | ||
535 | } | ||
536 | static inline int ftrace_graph_addr(unsigned long addr) | ||
537 | { | ||
538 | return 1; | ||
539 | } | ||
540 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
541 | |||
542 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
482 | static inline enum print_line_t | 543 | static inline enum print_line_t |
483 | print_return_function(struct trace_iterator *iter) | 544 | print_graph_function(struct trace_iterator *iter) |
484 | { | 545 | { |
485 | return TRACE_TYPE_UNHANDLED; | 546 | return TRACE_TYPE_UNHANDLED; |
486 | } | 547 | } |
487 | #endif | 548 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
549 | |||
550 | extern struct pid *ftrace_pid_trace; | ||
551 | |||
552 | static inline int ftrace_trace_task(struct task_struct *task) | ||
553 | { | ||
554 | if (!ftrace_pid_trace) | ||
555 | return 1; | ||
556 | |||
557 | return test_tsk_trace_trace(task); | ||
558 | } | ||
488 | 559 | ||
489 | /* | 560 | /* |
490 | * trace_iterator_flags is an enumeration that defines bit | 561 | * trace_iterator_flags is an enumeration that defines bit |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 877ee88e6a74..6c00feb3bac7 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kallsyms.h> | 6 | #include <linux/kallsyms.h> |
7 | #include <linux/seq_file.h> | 7 | #include <linux/seq_file.h> |
8 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
9 | #include <linux/irqflags.h> | ||
9 | #include <linux/debugfs.h> | 10 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 11 | #include <linux/uaccess.h> |
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -41,7 +42,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
41 | if (unlikely(!tr)) | 42 | if (unlikely(!tr)) |
42 | return; | 43 | return; |
43 | 44 | ||
44 | raw_local_irq_save(flags); | 45 | local_irq_save(flags); |
45 | cpu = raw_smp_processor_id(); | 46 | cpu = raw_smp_processor_id(); |
46 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 47 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
47 | goto out; | 48 | goto out; |
@@ -73,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
73 | 74 | ||
74 | out: | 75 | out: |
75 | atomic_dec(&tr->data[cpu]->disabled); | 76 | atomic_dec(&tr->data[cpu]->disabled); |
76 | raw_local_irq_restore(flags); | 77 | local_irq_restore(flags); |
77 | } | 78 | } |
78 | 79 | ||
79 | static inline | 80 | static inline |
diff --git a/kernel/trace/trace_bts.c b/kernel/trace/trace_bts.c new file mode 100644 index 000000000000..23b76e4690ef --- /dev/null +++ b/kernel/trace/trace_bts.c | |||
@@ -0,0 +1,276 @@ | |||
1 | /* | ||
2 | * BTS tracer | ||
3 | * | ||
4 | * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/kallsyms.h> | ||
13 | |||
14 | #include <asm/ds.h> | ||
15 | |||
16 | #include "trace.h" | ||
17 | |||
18 | |||
19 | #define SIZEOF_BTS (1 << 13) | ||
20 | |||
21 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | ||
22 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); | ||
23 | |||
24 | #define this_tracer per_cpu(tracer, smp_processor_id()) | ||
25 | #define this_buffer per_cpu(buffer, smp_processor_id()) | ||
26 | |||
27 | |||
28 | /* | ||
29 | * Information to interpret a BTS record. | ||
30 | * This will go into an in-kernel BTS interface. | ||
31 | */ | ||
32 | static unsigned char sizeof_field; | ||
33 | static unsigned long debugctl_mask; | ||
34 | |||
35 | #define sizeof_bts (3 * sizeof_field) | ||
36 | |||
37 | static void bts_trace_cpuinit(struct cpuinfo_x86 *c) | ||
38 | { | ||
39 | switch (c->x86) { | ||
40 | case 0x6: | ||
41 | switch (c->x86_model) { | ||
42 | case 0x0 ... 0xC: | ||
43 | break; | ||
44 | case 0xD: | ||
45 | case 0xE: /* Pentium M */ | ||
46 | sizeof_field = sizeof(long); | ||
47 | debugctl_mask = (1<<6)|(1<<7); | ||
48 | break; | ||
49 | default: | ||
50 | sizeof_field = 8; | ||
51 | debugctl_mask = (1<<6)|(1<<7); | ||
52 | break; | ||
53 | } | ||
54 | break; | ||
55 | case 0xF: | ||
56 | switch (c->x86_model) { | ||
57 | case 0x0: | ||
58 | case 0x1: | ||
59 | case 0x2: /* Netburst */ | ||
60 | sizeof_field = sizeof(long); | ||
61 | debugctl_mask = (1<<2)|(1<<3); | ||
62 | break; | ||
63 | default: | ||
64 | /* sorry, don't know about them */ | ||
65 | break; | ||
66 | } | ||
67 | break; | ||
68 | default: | ||
69 | /* sorry, don't know about them */ | ||
70 | break; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | static inline void bts_enable(void) | ||
75 | { | ||
76 | unsigned long debugctl; | ||
77 | |||
78 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | ||
79 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl | debugctl_mask); | ||
80 | } | ||
81 | |||
82 | static inline void bts_disable(void) | ||
83 | { | ||
84 | unsigned long debugctl; | ||
85 | |||
86 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | ||
87 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl & ~debugctl_mask); | ||
88 | } | ||
89 | |||
90 | static void bts_trace_reset(struct trace_array *tr) | ||
91 | { | ||
92 | int cpu; | ||
93 | |||
94 | tr->time_start = ftrace_now(tr->cpu); | ||
95 | |||
96 | for_each_online_cpu(cpu) | ||
97 | tracing_reset(tr, cpu); | ||
98 | } | ||
99 | |||
100 | static void bts_trace_start_cpu(void *arg) | ||
101 | { | ||
102 | this_tracer = | ||
103 | ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS, | ||
104 | /* ovfl = */ NULL, /* th = */ (size_t)-1); | ||
105 | if (IS_ERR(this_tracer)) { | ||
106 | this_tracer = NULL; | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | bts_enable(); | ||
111 | } | ||
112 | |||
113 | static void bts_trace_start(struct trace_array *tr) | ||
114 | { | ||
115 | int cpu; | ||
116 | |||
117 | bts_trace_reset(tr); | ||
118 | |||
119 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
120 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | ||
121 | } | ||
122 | |||
123 | static void bts_trace_stop_cpu(void *arg) | ||
124 | { | ||
125 | if (this_tracer) { | ||
126 | bts_disable(); | ||
127 | |||
128 | ds_release_bts(this_tracer); | ||
129 | this_tracer = NULL; | ||
130 | } | ||
131 | } | ||
132 | |||
133 | static void bts_trace_stop(struct trace_array *tr) | ||
134 | { | ||
135 | int cpu; | ||
136 | |||
137 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
138 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); | ||
139 | } | ||
140 | |||
141 | static int bts_trace_init(struct trace_array *tr) | ||
142 | { | ||
143 | bts_trace_cpuinit(&boot_cpu_data); | ||
144 | bts_trace_reset(tr); | ||
145 | bts_trace_start(tr); | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static void bts_trace_print_header(struct seq_file *m) | ||
151 | { | ||
152 | #ifdef __i386__ | ||
153 | seq_puts(m, "# CPU# FROM TO FUNCTION\n"); | ||
154 | seq_puts(m, "# | | | |\n"); | ||
155 | #else | ||
156 | seq_puts(m, | ||
157 | "# CPU# FROM TO FUNCTION\n"); | ||
158 | seq_puts(m, | ||
159 | "# | | | |\n"); | ||
160 | #endif | ||
161 | } | ||
162 | |||
163 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | ||
164 | { | ||
165 | struct trace_entry *entry = iter->ent; | ||
166 | struct trace_seq *seq = &iter->seq; | ||
167 | struct bts_entry *it; | ||
168 | |||
169 | trace_assign_type(it, entry); | ||
170 | |||
171 | if (entry->type == TRACE_BTS) { | ||
172 | int ret; | ||
173 | #ifdef CONFIG_KALLSYMS | ||
174 | char function[KSYM_SYMBOL_LEN]; | ||
175 | sprint_symbol(function, it->from); | ||
176 | #else | ||
177 | char *function = "<unknown>"; | ||
178 | #endif | ||
179 | |||
180 | ret = trace_seq_printf(seq, "%4d 0x%lx -> 0x%lx [%s]\n", | ||
181 | entry->cpu, it->from, it->to, function); | ||
182 | if (!ret) | ||
183 | return TRACE_TYPE_PARTIAL_LINE;; | ||
184 | return TRACE_TYPE_HANDLED; | ||
185 | } | ||
186 | return TRACE_TYPE_UNHANDLED; | ||
187 | } | ||
188 | |||
189 | void trace_bts(struct trace_array *tr, unsigned long from, unsigned long to) | ||
190 | { | ||
191 | struct ring_buffer_event *event; | ||
192 | struct bts_entry *entry; | ||
193 | unsigned long irq; | ||
194 | |||
195 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); | ||
196 | if (!event) | ||
197 | return; | ||
198 | entry = ring_buffer_event_data(event); | ||
199 | tracing_generic_entry_update(&entry->ent, 0, from); | ||
200 | entry->ent.type = TRACE_BTS; | ||
201 | entry->ent.cpu = smp_processor_id(); | ||
202 | entry->from = from; | ||
203 | entry->to = to; | ||
204 | ring_buffer_unlock_commit(tr->buffer, event, irq); | ||
205 | } | ||
206 | |||
207 | static void trace_bts_at(struct trace_array *tr, size_t index) | ||
208 | { | ||
209 | const void *raw = NULL; | ||
210 | unsigned long from, to; | ||
211 | int err; | ||
212 | |||
213 | err = ds_access_bts(this_tracer, index, &raw); | ||
214 | if (err < 0) | ||
215 | return; | ||
216 | |||
217 | from = *(const unsigned long *)raw; | ||
218 | to = *(const unsigned long *)((const char *)raw + sizeof_field); | ||
219 | |||
220 | trace_bts(tr, from, to); | ||
221 | } | ||
222 | |||
223 | static void trace_bts_cpu(void *arg) | ||
224 | { | ||
225 | struct trace_array *tr = (struct trace_array *) arg; | ||
226 | size_t index = 0, end = 0, i; | ||
227 | int err; | ||
228 | |||
229 | if (!this_tracer) | ||
230 | return; | ||
231 | |||
232 | bts_disable(); | ||
233 | |||
234 | err = ds_get_bts_index(this_tracer, &index); | ||
235 | if (err < 0) | ||
236 | goto out; | ||
237 | |||
238 | err = ds_get_bts_end(this_tracer, &end); | ||
239 | if (err < 0) | ||
240 | goto out; | ||
241 | |||
242 | for (i = index; i < end; i++) | ||
243 | trace_bts_at(tr, i); | ||
244 | |||
245 | for (i = 0; i < index; i++) | ||
246 | trace_bts_at(tr, i); | ||
247 | |||
248 | out: | ||
249 | bts_enable(); | ||
250 | } | ||
251 | |||
252 | static void trace_bts_prepare(struct trace_iterator *iter) | ||
253 | { | ||
254 | int cpu; | ||
255 | |||
256 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
257 | smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); | ||
258 | } | ||
259 | |||
260 | struct tracer bts_tracer __read_mostly = | ||
261 | { | ||
262 | .name = "bts", | ||
263 | .init = bts_trace_init, | ||
264 | .reset = bts_trace_stop, | ||
265 | .print_header = bts_trace_print_header, | ||
266 | .print_line = bts_trace_print_line, | ||
267 | .start = bts_trace_start, | ||
268 | .stop = bts_trace_stop, | ||
269 | .open = trace_bts_prepare | ||
270 | }; | ||
271 | |||
272 | __init static int init_bts_trace(void) | ||
273 | { | ||
274 | return register_tracer(&bts_tracer); | ||
275 | } | ||
276 | device_initcall(init_bts_trace); | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c new file mode 100644 index 000000000000..af60eef4cbcc --- /dev/null +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -0,0 +1,611 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Function graph tracer. | ||
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * Mostly borrowed from function tracer which | ||
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/fs.h> | ||
13 | |||
14 | #include "trace.h" | ||
15 | |||
16 | #define TRACE_GRAPH_INDENT 2 | ||
17 | |||
18 | /* Flag options */ | ||
19 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | ||
20 | #define TRACE_GRAPH_PRINT_CPU 0x2 | ||
21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | ||
22 | #define TRACE_GRAPH_PRINT_PROC 0x8 | ||
23 | |||
24 | static struct tracer_opt trace_opts[] = { | ||
25 | /* Display overruns ? */ | ||
26 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | ||
27 | /* Display CPU ? */ | ||
28 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | ||
29 | /* Display Overhead ? */ | ||
30 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | ||
31 | /* Display proc name/pid */ | ||
32 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | ||
33 | { } /* Empty entry */ | ||
34 | }; | ||
35 | |||
36 | static struct tracer_flags tracer_flags = { | ||
37 | /* Don't display overruns and proc by default */ | ||
38 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | ||
39 | .opts = trace_opts | ||
40 | }; | ||
41 | |||
42 | /* pid on the last trace processed */ | ||
43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | ||
44 | |||
45 | static int graph_trace_init(struct trace_array *tr) | ||
46 | { | ||
47 | int cpu, ret; | ||
48 | |||
49 | for_each_online_cpu(cpu) | ||
50 | tracing_reset(tr, cpu); | ||
51 | |||
52 | ret = register_ftrace_graph(&trace_graph_return, | ||
53 | &trace_graph_entry); | ||
54 | if (ret) | ||
55 | return ret; | ||
56 | tracing_start_cmdline_record(); | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static void graph_trace_reset(struct trace_array *tr) | ||
62 | { | ||
63 | tracing_stop_cmdline_record(); | ||
64 | unregister_ftrace_graph(); | ||
65 | } | ||
66 | |||
67 | static inline int log10_cpu(int nb) | ||
68 | { | ||
69 | if (nb / 100) | ||
70 | return 3; | ||
71 | if (nb / 10) | ||
72 | return 2; | ||
73 | return 1; | ||
74 | } | ||
75 | |||
76 | static enum print_line_t | ||
77 | print_graph_cpu(struct trace_seq *s, int cpu) | ||
78 | { | ||
79 | int i; | ||
80 | int ret; | ||
81 | int log10_this = log10_cpu(cpu); | ||
82 | int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); | ||
83 | |||
84 | |||
85 | /* | ||
86 | * Start with a space character - to make it stand out | ||
87 | * to the right a bit when trace output is pasted into | ||
88 | * email: | ||
89 | */ | ||
90 | ret = trace_seq_printf(s, " "); | ||
91 | |||
92 | /* | ||
93 | * Tricky - we space the CPU field according to the max | ||
94 | * number of online CPUs. On a 2-cpu system it would take | ||
95 | * a maximum of 1 digit - on a 128 cpu system it would | ||
96 | * take up to 3 digits: | ||
97 | */ | ||
98 | for (i = 0; i < log10_all - log10_this; i++) { | ||
99 | ret = trace_seq_printf(s, " "); | ||
100 | if (!ret) | ||
101 | return TRACE_TYPE_PARTIAL_LINE; | ||
102 | } | ||
103 | ret = trace_seq_printf(s, "%d) ", cpu); | ||
104 | if (!ret) | ||
105 | return TRACE_TYPE_PARTIAL_LINE; | ||
106 | |||
107 | return TRACE_TYPE_HANDLED; | ||
108 | } | ||
109 | |||
110 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | ||
111 | |||
112 | static enum print_line_t | ||
113 | print_graph_proc(struct trace_seq *s, pid_t pid) | ||
114 | { | ||
115 | int i; | ||
116 | int ret; | ||
117 | int len; | ||
118 | char comm[8]; | ||
119 | int spaces = 0; | ||
120 | /* sign + log10(MAX_INT) + '\0' */ | ||
121 | char pid_str[11]; | ||
122 | |||
123 | strncpy(comm, trace_find_cmdline(pid), 7); | ||
124 | comm[7] = '\0'; | ||
125 | sprintf(pid_str, "%d", pid); | ||
126 | |||
127 | /* 1 stands for the "-" character */ | ||
128 | len = strlen(comm) + strlen(pid_str) + 1; | ||
129 | |||
130 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | ||
131 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | ||
132 | |||
133 | /* First spaces to align center */ | ||
134 | for (i = 0; i < spaces / 2; i++) { | ||
135 | ret = trace_seq_printf(s, " "); | ||
136 | if (!ret) | ||
137 | return TRACE_TYPE_PARTIAL_LINE; | ||
138 | } | ||
139 | |||
140 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | ||
141 | if (!ret) | ||
142 | return TRACE_TYPE_PARTIAL_LINE; | ||
143 | |||
144 | /* Last spaces to align center */ | ||
145 | for (i = 0; i < spaces - (spaces / 2); i++) { | ||
146 | ret = trace_seq_printf(s, " "); | ||
147 | if (!ret) | ||
148 | return TRACE_TYPE_PARTIAL_LINE; | ||
149 | } | ||
150 | return TRACE_TYPE_HANDLED; | ||
151 | } | ||
152 | |||
153 | |||
154 | /* If the pid changed since the last trace, output this event */ | ||
155 | static enum print_line_t | ||
156 | verif_pid(struct trace_seq *s, pid_t pid, int cpu) | ||
157 | { | ||
158 | pid_t prev_pid; | ||
159 | int ret; | ||
160 | |||
161 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | ||
162 | return TRACE_TYPE_HANDLED; | ||
163 | |||
164 | prev_pid = last_pid[cpu]; | ||
165 | last_pid[cpu] = pid; | ||
166 | |||
167 | /* | ||
168 | * Context-switch trace line: | ||
169 | |||
170 | ------------------------------------------ | ||
171 | | 1) migration/0--1 => sshd-1755 | ||
172 | ------------------------------------------ | ||
173 | |||
174 | */ | ||
175 | ret = trace_seq_printf(s, | ||
176 | " ------------------------------------------\n"); | ||
177 | if (!ret) | ||
178 | TRACE_TYPE_PARTIAL_LINE; | ||
179 | |||
180 | ret = print_graph_cpu(s, cpu); | ||
181 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
182 | TRACE_TYPE_PARTIAL_LINE; | ||
183 | |||
184 | ret = print_graph_proc(s, prev_pid); | ||
185 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
186 | TRACE_TYPE_PARTIAL_LINE; | ||
187 | |||
188 | ret = trace_seq_printf(s, " => "); | ||
189 | if (!ret) | ||
190 | TRACE_TYPE_PARTIAL_LINE; | ||
191 | |||
192 | ret = print_graph_proc(s, pid); | ||
193 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
194 | TRACE_TYPE_PARTIAL_LINE; | ||
195 | |||
196 | ret = trace_seq_printf(s, | ||
197 | "\n ------------------------------------------\n\n"); | ||
198 | if (!ret) | ||
199 | TRACE_TYPE_PARTIAL_LINE; | ||
200 | |||
201 | return ret; | ||
202 | } | ||
203 | |||
204 | static bool | ||
205 | trace_branch_is_leaf(struct trace_iterator *iter, | ||
206 | struct ftrace_graph_ent_entry *curr) | ||
207 | { | ||
208 | struct ring_buffer_iter *ring_iter; | ||
209 | struct ring_buffer_event *event; | ||
210 | struct ftrace_graph_ret_entry *next; | ||
211 | |||
212 | ring_iter = iter->buffer_iter[iter->cpu]; | ||
213 | |||
214 | if (!ring_iter) | ||
215 | return false; | ||
216 | |||
217 | event = ring_buffer_iter_peek(ring_iter, NULL); | ||
218 | |||
219 | if (!event) | ||
220 | return false; | ||
221 | |||
222 | next = ring_buffer_event_data(event); | ||
223 | |||
224 | if (next->ent.type != TRACE_GRAPH_RET) | ||
225 | return false; | ||
226 | |||
227 | if (curr->ent.pid != next->ent.pid || | ||
228 | curr->graph_ent.func != next->ret.func) | ||
229 | return false; | ||
230 | |||
231 | return true; | ||
232 | } | ||
233 | |||
234 | |||
235 | static enum print_line_t | ||
236 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | ||
237 | { | ||
238 | unsigned long nsecs_rem = do_div(duration, 1000); | ||
239 | /* log10(ULONG_MAX) + '\0' */ | ||
240 | char msecs_str[21]; | ||
241 | char nsecs_str[5]; | ||
242 | int ret, len; | ||
243 | int i; | ||
244 | |||
245 | sprintf(msecs_str, "%lu", (unsigned long) duration); | ||
246 | |||
247 | /* Print msecs */ | ||
248 | ret = trace_seq_printf(s, msecs_str); | ||
249 | if (!ret) | ||
250 | return TRACE_TYPE_PARTIAL_LINE; | ||
251 | |||
252 | len = strlen(msecs_str); | ||
253 | |||
254 | /* Print nsecs (we don't want to exceed 7 numbers) */ | ||
255 | if (len < 7) { | ||
256 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | ||
257 | ret = trace_seq_printf(s, ".%s", nsecs_str); | ||
258 | if (!ret) | ||
259 | return TRACE_TYPE_PARTIAL_LINE; | ||
260 | len += strlen(nsecs_str); | ||
261 | } | ||
262 | |||
263 | ret = trace_seq_printf(s, " us "); | ||
264 | if (!ret) | ||
265 | return TRACE_TYPE_PARTIAL_LINE; | ||
266 | |||
267 | /* Print remaining spaces to fit the row's width */ | ||
268 | for (i = len; i < 7; i++) { | ||
269 | ret = trace_seq_printf(s, " "); | ||
270 | if (!ret) | ||
271 | return TRACE_TYPE_PARTIAL_LINE; | ||
272 | } | ||
273 | |||
274 | ret = trace_seq_printf(s, "| "); | ||
275 | if (!ret) | ||
276 | return TRACE_TYPE_PARTIAL_LINE; | ||
277 | return TRACE_TYPE_HANDLED; | ||
278 | |||
279 | } | ||
280 | |||
281 | /* Signal a overhead of time execution to the output */ | ||
282 | static int | ||
283 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
284 | { | ||
285 | /* Duration exceeded 100 msecs */ | ||
286 | if (duration > 100000ULL) | ||
287 | return trace_seq_printf(s, "! "); | ||
288 | |||
289 | /* Duration exceeded 10 msecs */ | ||
290 | if (duration > 10000ULL) | ||
291 | return trace_seq_printf(s, "+ "); | ||
292 | |||
293 | return trace_seq_printf(s, " "); | ||
294 | } | ||
295 | |||
296 | /* Case of a leaf function on its call entry */ | ||
297 | static enum print_line_t | ||
298 | print_graph_entry_leaf(struct trace_iterator *iter, | ||
299 | struct ftrace_graph_ent_entry *entry, struct trace_seq *s) | ||
300 | { | ||
301 | struct ftrace_graph_ret_entry *ret_entry; | ||
302 | struct ftrace_graph_ret *graph_ret; | ||
303 | struct ring_buffer_event *event; | ||
304 | struct ftrace_graph_ent *call; | ||
305 | unsigned long long duration; | ||
306 | int ret; | ||
307 | int i; | ||
308 | |||
309 | event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
310 | ret_entry = ring_buffer_event_data(event); | ||
311 | graph_ret = &ret_entry->ret; | ||
312 | call = &entry->graph_ent; | ||
313 | duration = graph_ret->rettime - graph_ret->calltime; | ||
314 | |||
315 | /* Overhead */ | ||
316 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
317 | ret = print_graph_overhead(duration, s); | ||
318 | if (!ret) | ||
319 | return TRACE_TYPE_PARTIAL_LINE; | ||
320 | } | ||
321 | |||
322 | /* Duration */ | ||
323 | ret = print_graph_duration(duration, s); | ||
324 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
325 | return TRACE_TYPE_PARTIAL_LINE; | ||
326 | |||
327 | /* Function */ | ||
328 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | ||
329 | ret = trace_seq_printf(s, " "); | ||
330 | if (!ret) | ||
331 | return TRACE_TYPE_PARTIAL_LINE; | ||
332 | } | ||
333 | |||
334 | ret = seq_print_ip_sym(s, call->func, 0); | ||
335 | if (!ret) | ||
336 | return TRACE_TYPE_PARTIAL_LINE; | ||
337 | |||
338 | ret = trace_seq_printf(s, "();\n"); | ||
339 | if (!ret) | ||
340 | return TRACE_TYPE_PARTIAL_LINE; | ||
341 | |||
342 | return TRACE_TYPE_HANDLED; | ||
343 | } | ||
344 | |||
345 | static enum print_line_t | ||
346 | print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | ||
347 | struct trace_seq *s) | ||
348 | { | ||
349 | int i; | ||
350 | int ret; | ||
351 | struct ftrace_graph_ent *call = &entry->graph_ent; | ||
352 | |||
353 | /* No overhead */ | ||
354 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
355 | ret = trace_seq_printf(s, " "); | ||
356 | if (!ret) | ||
357 | return TRACE_TYPE_PARTIAL_LINE; | ||
358 | } | ||
359 | |||
360 | /* No time */ | ||
361 | ret = trace_seq_printf(s, " | "); | ||
362 | |||
363 | /* Function */ | ||
364 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | ||
365 | ret = trace_seq_printf(s, " "); | ||
366 | if (!ret) | ||
367 | return TRACE_TYPE_PARTIAL_LINE; | ||
368 | } | ||
369 | |||
370 | ret = seq_print_ip_sym(s, call->func, 0); | ||
371 | if (!ret) | ||
372 | return TRACE_TYPE_PARTIAL_LINE; | ||
373 | |||
374 | ret = trace_seq_printf(s, "() {\n"); | ||
375 | if (!ret) | ||
376 | return TRACE_TYPE_PARTIAL_LINE; | ||
377 | |||
378 | return TRACE_TYPE_HANDLED; | ||
379 | } | ||
380 | |||
381 | static enum print_line_t | ||
382 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | ||
383 | struct trace_iterator *iter, int cpu) | ||
384 | { | ||
385 | int ret; | ||
386 | struct trace_entry *ent = iter->ent; | ||
387 | |||
388 | /* Pid */ | ||
389 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | ||
390 | return TRACE_TYPE_PARTIAL_LINE; | ||
391 | |||
392 | /* Cpu */ | ||
393 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
394 | ret = print_graph_cpu(s, cpu); | ||
395 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
396 | return TRACE_TYPE_PARTIAL_LINE; | ||
397 | } | ||
398 | |||
399 | /* Proc */ | ||
400 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
401 | ret = print_graph_proc(s, ent->pid); | ||
402 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
403 | return TRACE_TYPE_PARTIAL_LINE; | ||
404 | |||
405 | ret = trace_seq_printf(s, " | "); | ||
406 | if (!ret) | ||
407 | return TRACE_TYPE_PARTIAL_LINE; | ||
408 | } | ||
409 | |||
410 | if (trace_branch_is_leaf(iter, field)) | ||
411 | return print_graph_entry_leaf(iter, field, s); | ||
412 | else | ||
413 | return print_graph_entry_nested(field, s); | ||
414 | |||
415 | } | ||
416 | |||
417 | static enum print_line_t | ||
418 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | ||
419 | struct trace_entry *ent, int cpu) | ||
420 | { | ||
421 | int i; | ||
422 | int ret; | ||
423 | unsigned long long duration = trace->rettime - trace->calltime; | ||
424 | |||
425 | /* Pid */ | ||
426 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | ||
427 | return TRACE_TYPE_PARTIAL_LINE; | ||
428 | |||
429 | /* Cpu */ | ||
430 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
431 | ret = print_graph_cpu(s, cpu); | ||
432 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
433 | return TRACE_TYPE_PARTIAL_LINE; | ||
434 | } | ||
435 | |||
436 | /* Proc */ | ||
437 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
438 | ret = print_graph_proc(s, ent->pid); | ||
439 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
440 | return TRACE_TYPE_PARTIAL_LINE; | ||
441 | |||
442 | ret = trace_seq_printf(s, " | "); | ||
443 | if (!ret) | ||
444 | return TRACE_TYPE_PARTIAL_LINE; | ||
445 | } | ||
446 | |||
447 | /* Overhead */ | ||
448 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
449 | ret = print_graph_overhead(duration, s); | ||
450 | if (!ret) | ||
451 | return TRACE_TYPE_PARTIAL_LINE; | ||
452 | } | ||
453 | |||
454 | /* Duration */ | ||
455 | ret = print_graph_duration(duration, s); | ||
456 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
457 | return TRACE_TYPE_PARTIAL_LINE; | ||
458 | |||
459 | /* Closing brace */ | ||
460 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | ||
461 | ret = trace_seq_printf(s, " "); | ||
462 | if (!ret) | ||
463 | return TRACE_TYPE_PARTIAL_LINE; | ||
464 | } | ||
465 | |||
466 | ret = trace_seq_printf(s, "}\n"); | ||
467 | if (!ret) | ||
468 | return TRACE_TYPE_PARTIAL_LINE; | ||
469 | |||
470 | /* Overrun */ | ||
471 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | ||
472 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | ||
473 | trace->overrun); | ||
474 | if (!ret) | ||
475 | return TRACE_TYPE_PARTIAL_LINE; | ||
476 | } | ||
477 | return TRACE_TYPE_HANDLED; | ||
478 | } | ||
479 | |||
480 | static enum print_line_t | ||
481 | print_graph_comment(struct print_entry *trace, struct trace_seq *s, | ||
482 | struct trace_entry *ent, struct trace_iterator *iter) | ||
483 | { | ||
484 | int i; | ||
485 | int ret; | ||
486 | |||
487 | /* Pid */ | ||
488 | if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) | ||
489 | return TRACE_TYPE_PARTIAL_LINE; | ||
490 | |||
491 | /* Cpu */ | ||
492 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
493 | ret = print_graph_cpu(s, iter->cpu); | ||
494 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
495 | return TRACE_TYPE_PARTIAL_LINE; | ||
496 | } | ||
497 | |||
498 | /* Proc */ | ||
499 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
500 | ret = print_graph_proc(s, ent->pid); | ||
501 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
502 | return TRACE_TYPE_PARTIAL_LINE; | ||
503 | |||
504 | ret = trace_seq_printf(s, " | "); | ||
505 | if (!ret) | ||
506 | return TRACE_TYPE_PARTIAL_LINE; | ||
507 | } | ||
508 | |||
509 | /* No overhead */ | ||
510 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
511 | ret = trace_seq_printf(s, " "); | ||
512 | if (!ret) | ||
513 | return TRACE_TYPE_PARTIAL_LINE; | ||
514 | } | ||
515 | |||
516 | /* No time */ | ||
517 | ret = trace_seq_printf(s, " | "); | ||
518 | if (!ret) | ||
519 | return TRACE_TYPE_PARTIAL_LINE; | ||
520 | |||
521 | /* Indentation */ | ||
522 | if (trace->depth > 0) | ||
523 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | ||
524 | ret = trace_seq_printf(s, " "); | ||
525 | if (!ret) | ||
526 | return TRACE_TYPE_PARTIAL_LINE; | ||
527 | } | ||
528 | |||
529 | /* The comment */ | ||
530 | ret = trace_seq_printf(s, "/* %s", trace->buf); | ||
531 | if (!ret) | ||
532 | return TRACE_TYPE_PARTIAL_LINE; | ||
533 | |||
534 | if (ent->flags & TRACE_FLAG_CONT) | ||
535 | trace_seq_print_cont(s, iter); | ||
536 | |||
537 | ret = trace_seq_printf(s, " */\n"); | ||
538 | if (!ret) | ||
539 | return TRACE_TYPE_PARTIAL_LINE; | ||
540 | |||
541 | return TRACE_TYPE_HANDLED; | ||
542 | } | ||
543 | |||
544 | |||
545 | enum print_line_t | ||
546 | print_graph_function(struct trace_iterator *iter) | ||
547 | { | ||
548 | struct trace_seq *s = &iter->seq; | ||
549 | struct trace_entry *entry = iter->ent; | ||
550 | |||
551 | switch (entry->type) { | ||
552 | case TRACE_GRAPH_ENT: { | ||
553 | struct ftrace_graph_ent_entry *field; | ||
554 | trace_assign_type(field, entry); | ||
555 | return print_graph_entry(field, s, iter, | ||
556 | iter->cpu); | ||
557 | } | ||
558 | case TRACE_GRAPH_RET: { | ||
559 | struct ftrace_graph_ret_entry *field; | ||
560 | trace_assign_type(field, entry); | ||
561 | return print_graph_return(&field->ret, s, entry, iter->cpu); | ||
562 | } | ||
563 | case TRACE_PRINT: { | ||
564 | struct print_entry *field; | ||
565 | trace_assign_type(field, entry); | ||
566 | return print_graph_comment(field, s, entry, iter); | ||
567 | } | ||
568 | default: | ||
569 | return TRACE_TYPE_UNHANDLED; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | static void print_graph_headers(struct seq_file *s) | ||
574 | { | ||
575 | /* 1st line */ | ||
576 | seq_printf(s, "# "); | ||
577 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | ||
578 | seq_printf(s, "CPU "); | ||
579 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | ||
580 | seq_printf(s, "TASK/PID "); | ||
581 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) | ||
582 | seq_printf(s, "OVERHEAD/"); | ||
583 | seq_printf(s, "DURATION FUNCTION CALLS\n"); | ||
584 | |||
585 | /* 2nd line */ | ||
586 | seq_printf(s, "# "); | ||
587 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | ||
588 | seq_printf(s, "| "); | ||
589 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | ||
590 | seq_printf(s, "| | "); | ||
591 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
592 | seq_printf(s, "| "); | ||
593 | seq_printf(s, "| | | | |\n"); | ||
594 | } else | ||
595 | seq_printf(s, " | | | | |\n"); | ||
596 | } | ||
597 | static struct tracer graph_trace __read_mostly = { | ||
598 | .name = "function_graph", | ||
599 | .init = graph_trace_init, | ||
600 | .reset = graph_trace_reset, | ||
601 | .print_line = print_graph_function, | ||
602 | .print_header = print_graph_headers, | ||
603 | .flags = &tracer_flags, | ||
604 | }; | ||
605 | |||
606 | static __init int init_graph_trace(void) | ||
607 | { | ||
608 | return register_tracer(&graph_trace); | ||
609 | } | ||
610 | |||
611 | device_initcall(init_graph_trace); | ||
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c deleted file mode 100644 index e00d64509c9c..000000000000 --- a/kernel/trace/trace_functions_return.c +++ /dev/null | |||
@@ -1,98 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Function return tracer. | ||
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * Mostly borrowed from function tracer which | ||
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/fs.h> | ||
13 | |||
14 | #include "trace.h" | ||
15 | |||
16 | |||
17 | #define TRACE_RETURN_PRINT_OVERRUN 0x1 | ||
18 | static struct tracer_opt trace_opts[] = { | ||
19 | /* Display overruns or not */ | ||
20 | { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) }, | ||
21 | { } /* Empty entry */ | ||
22 | }; | ||
23 | |||
24 | static struct tracer_flags tracer_flags = { | ||
25 | .val = 0, /* Don't display overruns by default */ | ||
26 | .opts = trace_opts | ||
27 | }; | ||
28 | |||
29 | |||
30 | static int return_trace_init(struct trace_array *tr) | ||
31 | { | ||
32 | int cpu; | ||
33 | for_each_online_cpu(cpu) | ||
34 | tracing_reset(tr, cpu); | ||
35 | |||
36 | return register_ftrace_return(&trace_function_return); | ||
37 | } | ||
38 | |||
39 | static void return_trace_reset(struct trace_array *tr) | ||
40 | { | ||
41 | unregister_ftrace_return(); | ||
42 | } | ||
43 | |||
44 | |||
45 | enum print_line_t | ||
46 | print_return_function(struct trace_iterator *iter) | ||
47 | { | ||
48 | struct trace_seq *s = &iter->seq; | ||
49 | struct trace_entry *entry = iter->ent; | ||
50 | struct ftrace_ret_entry *field; | ||
51 | int ret; | ||
52 | |||
53 | if (entry->type == TRACE_FN_RET) { | ||
54 | trace_assign_type(field, entry); | ||
55 | ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip); | ||
56 | if (!ret) | ||
57 | return TRACE_TYPE_PARTIAL_LINE; | ||
58 | |||
59 | ret = seq_print_ip_sym(s, field->ip, | ||
60 | trace_flags & TRACE_ITER_SYM_MASK); | ||
61 | if (!ret) | ||
62 | return TRACE_TYPE_PARTIAL_LINE; | ||
63 | |||
64 | ret = trace_seq_printf(s, " (%llu ns)", | ||
65 | field->rettime - field->calltime); | ||
66 | if (!ret) | ||
67 | return TRACE_TYPE_PARTIAL_LINE; | ||
68 | |||
69 | if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) { | ||
70 | ret = trace_seq_printf(s, " (Overruns: %lu)", | ||
71 | field->overrun); | ||
72 | if (!ret) | ||
73 | return TRACE_TYPE_PARTIAL_LINE; | ||
74 | } | ||
75 | |||
76 | ret = trace_seq_printf(s, "\n"); | ||
77 | if (!ret) | ||
78 | return TRACE_TYPE_PARTIAL_LINE; | ||
79 | |||
80 | return TRACE_TYPE_HANDLED; | ||
81 | } | ||
82 | return TRACE_TYPE_UNHANDLED; | ||
83 | } | ||
84 | |||
85 | static struct tracer return_trace __read_mostly = { | ||
86 | .name = "return", | ||
87 | .init = return_trace_init, | ||
88 | .reset = return_trace_reset, | ||
89 | .print_line = print_return_function, | ||
90 | .flags = &tracer_flags, | ||
91 | }; | ||
92 | |||
93 | static __init int init_return_trace(void) | ||
94 | { | ||
95 | return register_tracer(&return_trace); | ||
96 | } | ||
97 | |||
98 | device_initcall(init_return_trace); | ||
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 2a98a206acc2..2fb6da6523b3 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -366,5 +366,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map) | |||
366 | 366 | ||
367 | int mmio_trace_printk(const char *fmt, va_list args) | 367 | int mmio_trace_printk(const char *fmt, va_list args) |
368 | { | 368 | { |
369 | return trace_vprintk(0, fmt, args); | 369 | return trace_vprintk(0, -1, fmt, args); |
370 | } | 370 | } |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c new file mode 100644 index 000000000000..a7172a352f62 --- /dev/null +++ b/kernel/trace/trace_power.c | |||
@@ -0,0 +1,179 @@ | |||
1 | /* | ||
2 | * ring buffer based C-state tracer | ||
3 | * | ||
4 | * Arjan van de Ven <arjan@linux.intel.com> | ||
5 | * Copyright (C) 2008 Intel Corporation | ||
6 | * | ||
7 | * Much is borrowed from trace_boot.c which is | ||
8 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/ftrace.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | #include "trace.h" | ||
19 | |||
20 | static struct trace_array *power_trace; | ||
21 | static int __read_mostly trace_power_enabled; | ||
22 | |||
23 | |||
24 | static void start_power_trace(struct trace_array *tr) | ||
25 | { | ||
26 | trace_power_enabled = 1; | ||
27 | } | ||
28 | |||
29 | static void stop_power_trace(struct trace_array *tr) | ||
30 | { | ||
31 | trace_power_enabled = 0; | ||
32 | } | ||
33 | |||
34 | |||
35 | static int power_trace_init(struct trace_array *tr) | ||
36 | { | ||
37 | int cpu; | ||
38 | power_trace = tr; | ||
39 | |||
40 | trace_power_enabled = 1; | ||
41 | |||
42 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
43 | tracing_reset(tr, cpu); | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static enum print_line_t power_print_line(struct trace_iterator *iter) | ||
48 | { | ||
49 | int ret = 0; | ||
50 | struct trace_entry *entry = iter->ent; | ||
51 | struct trace_power *field ; | ||
52 | struct power_trace *it; | ||
53 | struct trace_seq *s = &iter->seq; | ||
54 | struct timespec stamp; | ||
55 | struct timespec duration; | ||
56 | |||
57 | trace_assign_type(field, entry); | ||
58 | it = &field->state_data; | ||
59 | stamp = ktime_to_timespec(it->stamp); | ||
60 | duration = ktime_to_timespec(ktime_sub(it->end, it->stamp)); | ||
61 | |||
62 | if (entry->type == TRACE_POWER) { | ||
63 | if (it->type == POWER_CSTATE) | ||
64 | ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n", | ||
65 | stamp.tv_sec, | ||
66 | stamp.tv_nsec, | ||
67 | it->state, iter->cpu, | ||
68 | duration.tv_sec, | ||
69 | duration.tv_nsec); | ||
70 | if (it->type == POWER_PSTATE) | ||
71 | ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n", | ||
72 | stamp.tv_sec, | ||
73 | stamp.tv_nsec, | ||
74 | it->state, iter->cpu); | ||
75 | if (!ret) | ||
76 | return TRACE_TYPE_PARTIAL_LINE; | ||
77 | return TRACE_TYPE_HANDLED; | ||
78 | } | ||
79 | return TRACE_TYPE_UNHANDLED; | ||
80 | } | ||
81 | |||
82 | static struct tracer power_tracer __read_mostly = | ||
83 | { | ||
84 | .name = "power", | ||
85 | .init = power_trace_init, | ||
86 | .start = start_power_trace, | ||
87 | .stop = stop_power_trace, | ||
88 | .reset = stop_power_trace, | ||
89 | .print_line = power_print_line, | ||
90 | }; | ||
91 | |||
92 | static int init_power_trace(void) | ||
93 | { | ||
94 | return register_tracer(&power_tracer); | ||
95 | } | ||
96 | device_initcall(init_power_trace); | ||
97 | |||
98 | void trace_power_start(struct power_trace *it, unsigned int type, | ||
99 | unsigned int level) | ||
100 | { | ||
101 | if (!trace_power_enabled) | ||
102 | return; | ||
103 | |||
104 | memset(it, 0, sizeof(struct power_trace)); | ||
105 | it->state = level; | ||
106 | it->type = type; | ||
107 | it->stamp = ktime_get(); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(trace_power_start); | ||
110 | |||
111 | |||
112 | void trace_power_end(struct power_trace *it) | ||
113 | { | ||
114 | struct ring_buffer_event *event; | ||
115 | struct trace_power *entry; | ||
116 | struct trace_array_cpu *data; | ||
117 | unsigned long irq_flags; | ||
118 | struct trace_array *tr = power_trace; | ||
119 | |||
120 | if (!trace_power_enabled) | ||
121 | return; | ||
122 | |||
123 | preempt_disable(); | ||
124 | it->end = ktime_get(); | ||
125 | data = tr->data[smp_processor_id()]; | ||
126 | |||
127 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
128 | &irq_flags); | ||
129 | if (!event) | ||
130 | goto out; | ||
131 | entry = ring_buffer_event_data(event); | ||
132 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
133 | entry->ent.type = TRACE_POWER; | ||
134 | entry->state_data = *it; | ||
135 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
136 | |||
137 | trace_wake_up(); | ||
138 | |||
139 | out: | ||
140 | preempt_enable(); | ||
141 | } | ||
142 | EXPORT_SYMBOL_GPL(trace_power_end); | ||
143 | |||
144 | void trace_power_mark(struct power_trace *it, unsigned int type, | ||
145 | unsigned int level) | ||
146 | { | ||
147 | struct ring_buffer_event *event; | ||
148 | struct trace_power *entry; | ||
149 | struct trace_array_cpu *data; | ||
150 | unsigned long irq_flags; | ||
151 | struct trace_array *tr = power_trace; | ||
152 | |||
153 | if (!trace_power_enabled) | ||
154 | return; | ||
155 | |||
156 | memset(it, 0, sizeof(struct power_trace)); | ||
157 | it->state = level; | ||
158 | it->type = type; | ||
159 | it->stamp = ktime_get(); | ||
160 | preempt_disable(); | ||
161 | it->end = it->stamp; | ||
162 | data = tr->data[smp_processor_id()]; | ||
163 | |||
164 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
165 | &irq_flags); | ||
166 | if (!event) | ||
167 | goto out; | ||
168 | entry = ring_buffer_event_data(event); | ||
169 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
170 | entry->ent.type = TRACE_POWER; | ||
171 | entry->state_data = *it; | ||
172 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
173 | |||
174 | trace_wake_up(); | ||
175 | |||
176 | out: | ||
177 | preempt_enable(); | ||
178 | } | ||
179 | EXPORT_SYMBOL_GPL(trace_power_mark); | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index fde3be15c642..0b863f2cbc8e 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -48,7 +48,7 @@ static inline void check_stack(void) | |||
48 | if (!object_is_on_stack(&this_size)) | 48 | if (!object_is_on_stack(&this_size)) |
49 | return; | 49 | return; |
50 | 50 | ||
51 | raw_local_irq_save(flags); | 51 | local_irq_save(flags); |
52 | __raw_spin_lock(&max_stack_lock); | 52 | __raw_spin_lock(&max_stack_lock); |
53 | 53 | ||
54 | /* a race could have already updated it */ | 54 | /* a race could have already updated it */ |
@@ -78,6 +78,7 @@ static inline void check_stack(void) | |||
78 | * on a new max, so it is far from a fast path. | 78 | * on a new max, so it is far from a fast path. |
79 | */ | 79 | */ |
80 | while (i < max_stack_trace.nr_entries) { | 80 | while (i < max_stack_trace.nr_entries) { |
81 | int found = 0; | ||
81 | 82 | ||
82 | stack_dump_index[i] = this_size; | 83 | stack_dump_index[i] = this_size; |
83 | p = start; | 84 | p = start; |
@@ -86,17 +87,19 @@ static inline void check_stack(void) | |||
86 | if (*p == stack_dump_trace[i]) { | 87 | if (*p == stack_dump_trace[i]) { |
87 | this_size = stack_dump_index[i++] = | 88 | this_size = stack_dump_index[i++] = |
88 | (top - p) * sizeof(unsigned long); | 89 | (top - p) * sizeof(unsigned long); |
90 | found = 1; | ||
89 | /* Start the search from here */ | 91 | /* Start the search from here */ |
90 | start = p + 1; | 92 | start = p + 1; |
91 | } | 93 | } |
92 | } | 94 | } |
93 | 95 | ||
94 | i++; | 96 | if (!found) |
97 | i++; | ||
95 | } | 98 | } |
96 | 99 | ||
97 | out: | 100 | out: |
98 | __raw_spin_unlock(&max_stack_lock); | 101 | __raw_spin_unlock(&max_stack_lock); |
99 | raw_local_irq_restore(flags); | 102 | local_irq_restore(flags); |
100 | } | 103 | } |
101 | 104 | ||
102 | static void | 105 | static void |
@@ -162,11 +165,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
162 | if (ret < 0) | 165 | if (ret < 0) |
163 | return ret; | 166 | return ret; |
164 | 167 | ||
165 | raw_local_irq_save(flags); | 168 | local_irq_save(flags); |
166 | __raw_spin_lock(&max_stack_lock); | 169 | __raw_spin_lock(&max_stack_lock); |
167 | *ptr = val; | 170 | *ptr = val; |
168 | __raw_spin_unlock(&max_stack_lock); | 171 | __raw_spin_unlock(&max_stack_lock); |
169 | raw_local_irq_restore(flags); | 172 | local_irq_restore(flags); |
170 | 173 | ||
171 | return count; | 174 | return count; |
172 | } | 175 | } |