aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ftrace.txt79
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/ftrace.h4
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/entry_32.S17
-rw-r--r--arch/x86/kernel/ftrace.c90
-rw-r--r--arch/x86/kernel/process.c16
-rw-r--r--include/linux/ftrace.h77
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/trace/Kconfig30
-rw-r--r--kernel/trace/Makefile3
-rw-r--r--kernel/trace/ftrace.c306
-rw-r--r--kernel/trace/trace.c79
-rw-r--r--kernel/trace/trace.h42
-rw-r--r--kernel/trace/trace_functions_graph.c175
-rw-r--r--kernel/trace/trace_functions_return.c98
-rw-r--r--kernel/trace/trace_power.c179
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--scripts/trace/power.pl108
24 files changed, 1054 insertions, 273 deletions
diff --git a/Documentation/ftrace.txt b/Documentation/ftrace.txt
index 35a78bc6651d..de05042f11b9 100644
--- a/Documentation/ftrace.txt
+++ b/Documentation/ftrace.txt
@@ -127,6 +127,8 @@ of ftrace. Here is a list of some of the key files:
127 be traced. If a function exists in both set_ftrace_filter 127 be traced. If a function exists in both set_ftrace_filter
128 and set_ftrace_notrace, the function will _not_ be traced. 128 and set_ftrace_notrace, the function will _not_ be traced.
129 129
130 set_ftrace_pid: Have the function tracer only trace a single thread.
131
130 available_filter_functions: This lists the functions that ftrace 132 available_filter_functions: This lists the functions that ftrace
131 has processed and can trace. These are the function 133 has processed and can trace. These are the function
132 names that you can pass to "set_ftrace_filter" or 134 names that you can pass to "set_ftrace_filter" or
@@ -1073,6 +1075,83 @@ For simple one time traces, the above is sufficent. For anything else,
1073a search through /proc/mounts may be needed to find where the debugfs 1075a search through /proc/mounts may be needed to find where the debugfs
1074file-system is mounted. 1076file-system is mounted.
1075 1077
1078
1079Single thread tracing
1080---------------------
1081
1082By writing into /debug/tracing/set_ftrace_pid you can trace a
1083single thread. For example:
1084
1085# cat /debug/tracing/set_ftrace_pid
1086no pid
1087# echo 3111 > /debug/tracing/set_ftrace_pid
1088# cat /debug/tracing/set_ftrace_pid
10893111
1090# echo function > /debug/tracing/current_tracer
1091# cat /debug/tracing/trace | head
1092 # tracer: function
1093 #
1094 # TASK-PID CPU# TIMESTAMP FUNCTION
1095 # | | | | |
1096 yum-updatesd-3111 [003] 1637.254676: finish_task_switch <-thread_return
1097 yum-updatesd-3111 [003] 1637.254681: hrtimer_cancel <-schedule_hrtimeout_range
1098 yum-updatesd-3111 [003] 1637.254682: hrtimer_try_to_cancel <-hrtimer_cancel
1099 yum-updatesd-3111 [003] 1637.254683: lock_hrtimer_base <-hrtimer_try_to_cancel
1100 yum-updatesd-3111 [003] 1637.254685: fget_light <-do_sys_poll
1101 yum-updatesd-3111 [003] 1637.254686: pipe_poll <-do_sys_poll
1102# echo -1 > /debug/tracing/set_ftrace_pid
1103# cat /debug/tracing/trace |head
1104 # tracer: function
1105 #
1106 # TASK-PID CPU# TIMESTAMP FUNCTION
1107 # | | | | |
1108 ##### CPU 3 buffer started ####
1109 yum-updatesd-3111 [003] 1701.957688: free_poll_entry <-poll_freewait
1110 yum-updatesd-3111 [003] 1701.957689: remove_wait_queue <-free_poll_entry
1111 yum-updatesd-3111 [003] 1701.957691: fput <-free_poll_entry
1112 yum-updatesd-3111 [003] 1701.957692: audit_syscall_exit <-sysret_audit
1113 yum-updatesd-3111 [003] 1701.957693: path_put <-audit_syscall_exit
1114
1115If you want to trace a function when executing, you could use
1116something like this simple program:
1117
1118#include <stdio.h>
1119#include <stdlib.h>
1120#include <sys/types.h>
1121#include <sys/stat.h>
1122#include <fcntl.h>
1123#include <unistd.h>
1124
1125int main (int argc, char **argv)
1126{
1127 if (argc < 1)
1128 exit(-1);
1129
1130 if (fork() > 0) {
1131 int fd, ffd;
1132 char line[64];
1133 int s;
1134
1135 ffd = open("/debug/tracing/current_tracer", O_WRONLY);
1136 if (ffd < 0)
1137 exit(-1);
1138 write(ffd, "nop", 3);
1139
1140 fd = open("/debug/tracing/set_ftrace_pid", O_WRONLY);
1141 s = sprintf(line, "%d\n", getpid());
1142 write(fd, line, s);
1143
1144 write(ffd, "function", 8);
1145
1146 close(fd);
1147 close(ffd);
1148
1149 execvp(argv[1], argv+1);
1150 }
1151
1152 return 0;
1153}
1154
1076dynamic ftrace 1155dynamic ftrace
1077-------------- 1156--------------
1078 1157
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e49a4fd718fe..0842b1127684 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,7 +29,7 @@ config X86
29 select HAVE_FTRACE_MCOUNT_RECORD 29 select HAVE_FTRACE_MCOUNT_RECORD
30 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
31 select HAVE_FUNCTION_TRACER 31 select HAVE_FUNCTION_TRACER
32 select HAVE_FUNCTION_RET_TRACER if X86_32 32 select HAVE_FUNCTION_GRAPH_TRACER if X86_32
33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
35 select HAVE_ARCH_KGDB if !X86_VOYAGER 35 select HAVE_ARCH_KGDB if !X86_VOYAGER
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 754a3e082f94..7e61b4ceb9a4 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -28,7 +28,7 @@ struct dyn_arch_ftrace {
28#endif /* __ASSEMBLY__ */ 28#endif /* __ASSEMBLY__ */
29#endif /* CONFIG_FUNCTION_TRACER */ 29#endif /* CONFIG_FUNCTION_TRACER */
30 30
31#ifdef CONFIG_FUNCTION_RET_TRACER 31#ifdef CONFIG_FUNCTION_GRAPH_TRACER
32 32
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34 34
@@ -51,6 +51,6 @@ struct ftrace_ret_stack {
51extern void return_to_handler(void); 51extern void return_to_handler(void);
52 52
53#endif /* __ASSEMBLY__ */ 53#endif /* __ASSEMBLY__ */
54#endif /* CONFIG_FUNCTION_RET_TRACER */ 54#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
55 55
56#endif /* _ASM_X86_FTRACE_H */ 56#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index af2bc36ca1c4..64939a0c3986 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -14,7 +14,7 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 14CFLAGS_REMOVE_ftrace.o = -pg
15endif 15endif
16 16
17ifdef CONFIG_FUNCTION_RET_TRACER 17ifdef CONFIG_FUNCTION_GRAPH_TRACER
18# Don't trace __switch_to() but let it for function tracer 18# Don't trace __switch_to() but let it for function tracer
19CFLAGS_REMOVE_process_32.o = -pg 19CFLAGS_REMOVE_process_32.o = -pg
20endif 20endif
@@ -70,7 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
70obj-$(CONFIG_X86_IO_APIC) += io_apic.o 70obj-$(CONFIG_X86_IO_APIC) += io_apic.o
71obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 71obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
72obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 72obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
73obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o 73obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
74obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 74obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
75obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 75obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
76obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 76obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 8e48c5d4467d..88ea02dcb622 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -33,6 +33,7 @@
33#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <linux/ftrace.h>
36 37
37#include <linux/acpi.h> 38#include <linux/acpi.h>
38#include <acpi/processor.h> 39#include <acpi/processor.h>
@@ -391,6 +392,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
391 unsigned int next_perf_state = 0; /* Index into perf table */ 392 unsigned int next_perf_state = 0; /* Index into perf table */
392 unsigned int i; 393 unsigned int i;
393 int result = 0; 394 int result = 0;
395 struct power_trace it;
394 396
395 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); 397 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
396 398
@@ -427,6 +429,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
427 } 429 }
428 } 430 }
429 431
432 trace_power_mark(&it, POWER_PSTATE, next_perf_state);
433
430 switch (data->cpu_feature) { 434 switch (data->cpu_feature) {
431 case SYSTEM_INTEL_MSR_CAPABLE: 435 case SYSTEM_INTEL_MSR_CAPABLE:
432 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 436 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 74defe21ba42..958af86186c4 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1174,6 +1174,11 @@ ftrace_call:
1174 popl %edx 1174 popl %edx
1175 popl %ecx 1175 popl %ecx
1176 popl %eax 1176 popl %eax
1177#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1178.globl ftrace_graph_call
1179ftrace_graph_call:
1180 jmp ftrace_stub
1181#endif
1177 1182
1178.globl ftrace_stub 1183.globl ftrace_stub
1179ftrace_stub: 1184ftrace_stub:
@@ -1188,9 +1193,9 @@ ENTRY(mcount)
1188 1193
1189 cmpl $ftrace_stub, ftrace_trace_function 1194 cmpl $ftrace_stub, ftrace_trace_function
1190 jnz trace 1195 jnz trace
1191#ifdef CONFIG_FUNCTION_RET_TRACER 1196#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1192 cmpl $ftrace_stub, ftrace_function_return 1197 cmpl $ftrace_stub, ftrace_graph_return
1193 jnz ftrace_return_caller 1198 jnz ftrace_graph_caller
1194#endif 1199#endif
1195.globl ftrace_stub 1200.globl ftrace_stub
1196ftrace_stub: 1201ftrace_stub:
@@ -1215,8 +1220,8 @@ END(mcount)
1215#endif /* CONFIG_DYNAMIC_FTRACE */ 1220#endif /* CONFIG_DYNAMIC_FTRACE */
1216#endif /* CONFIG_FUNCTION_TRACER */ 1221#endif /* CONFIG_FUNCTION_TRACER */
1217 1222
1218#ifdef CONFIG_FUNCTION_RET_TRACER 1223#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1219ENTRY(ftrace_return_caller) 1224ENTRY(ftrace_graph_caller)
1220 cmpl $0, function_trace_stop 1225 cmpl $0, function_trace_stop
1221 jne ftrace_stub 1226 jne ftrace_stub
1222 1227
@@ -1230,7 +1235,7 @@ ENTRY(ftrace_return_caller)
1230 popl %ecx 1235 popl %ecx
1231 popl %eax 1236 popl %eax
1232 ret 1237 ret
1233END(ftrace_return_caller) 1238END(ftrace_graph_caller)
1234 1239
1235.globl return_to_handler 1240.globl return_to_handler
1236return_to_handler: 1241return_to_handler:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index bb137f7297ed..7ef914e6a2f6 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -111,7 +111,6 @@ static void ftrace_mod_code(void)
111 */ 111 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, 112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE); 113 MCOUNT_INSN_SIZE);
114
115} 114}
116 115
117void ftrace_nmi_enter(void) 116void ftrace_nmi_enter(void)
@@ -323,9 +322,53 @@ int __init ftrace_dyn_arch_init(void *data)
323} 322}
324#endif 323#endif
325 324
326#ifdef CONFIG_FUNCTION_RET_TRACER 325#ifdef CONFIG_FUNCTION_GRAPH_TRACER
326
327#ifdef CONFIG_DYNAMIC_FTRACE
328extern void ftrace_graph_call(void);
329
330static int ftrace_mod_jmp(unsigned long ip,
331 int old_offset, int new_offset)
332{
333 unsigned char code[MCOUNT_INSN_SIZE];
334
335 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
336 return -EFAULT;
337
338 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
339 return -EINVAL;
340
341 *(int *)(&code[1]) = new_offset;
342
343 if (do_ftrace_mod_code(ip, &code))
344 return -EPERM;
345
346 return 0;
347}
348
349int ftrace_enable_ftrace_graph_caller(void)
350{
351 unsigned long ip = (unsigned long)(&ftrace_graph_call);
352 int old_offset, new_offset;
353
354 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
355 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
356
357 return ftrace_mod_jmp(ip, old_offset, new_offset);
358}
359
360int ftrace_disable_ftrace_graph_caller(void)
361{
362 unsigned long ip = (unsigned long)(&ftrace_graph_call);
363 int old_offset, new_offset;
364
365 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
366 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
367
368 return ftrace_mod_jmp(ip, old_offset, new_offset);
369}
327 370
328#ifndef CONFIG_DYNAMIC_FTRACE 371#else /* CONFIG_DYNAMIC_FTRACE */
329 372
330/* 373/*
331 * These functions are picked from those used on 374 * These functions are picked from those used on
@@ -343,11 +386,12 @@ void ftrace_nmi_exit(void)
343{ 386{
344 atomic_dec(&in_nmi); 387 atomic_dec(&in_nmi);
345} 388}
389
346#endif /* !CONFIG_DYNAMIC_FTRACE */ 390#endif /* !CONFIG_DYNAMIC_FTRACE */
347 391
348/* Add a function return address to the trace stack on thread info.*/ 392/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time, 393static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func) 394 unsigned long func, int *depth)
351{ 395{
352 int index; 396 int index;
353 397
@@ -365,21 +409,22 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
365 current->ret_stack[index].ret = ret; 409 current->ret_stack[index].ret = ret;
366 current->ret_stack[index].func = func; 410 current->ret_stack[index].func = func;
367 current->ret_stack[index].calltime = time; 411 current->ret_stack[index].calltime = time;
412 *depth = index;
368 413
369 return 0; 414 return 0;
370} 415}
371 416
372/* Retrieve a function return address to the trace stack on thread info.*/ 417/* Retrieve a function return address to the trace stack on thread info.*/
373static void pop_return_trace(unsigned long *ret, unsigned long long *time, 418static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
374 unsigned long *func, unsigned long *overrun)
375{ 419{
376 int index; 420 int index;
377 421
378 index = current->curr_ret_stack; 422 index = current->curr_ret_stack;
379 *ret = current->ret_stack[index].ret; 423 *ret = current->ret_stack[index].ret;
380 *func = current->ret_stack[index].func; 424 trace->func = current->ret_stack[index].func;
381 *time = current->ret_stack[index].calltime; 425 trace->calltime = current->ret_stack[index].calltime;
382 *overrun = atomic_read(&current->trace_overrun); 426 trace->overrun = atomic_read(&current->trace_overrun);
427 trace->depth = index;
383 current->curr_ret_stack--; 428 current->curr_ret_stack--;
384} 429}
385 430
@@ -389,13 +434,14 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
389 */ 434 */
390unsigned long ftrace_return_to_handler(void) 435unsigned long ftrace_return_to_handler(void)
391{ 436{
392 struct ftrace_retfunc trace; 437 struct ftrace_graph_ret trace;
393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func, 438 unsigned long ret;
394 &trace.overrun); 439
440 pop_return_trace(&trace, &ret);
395 trace.rettime = cpu_clock(raw_smp_processor_id()); 441 trace.rettime = cpu_clock(raw_smp_processor_id());
396 ftrace_function_return(&trace); 442 ftrace_graph_return(&trace);
397 443
398 return trace.ret; 444 return ret;
399} 445}
400 446
401/* 447/*
@@ -407,6 +453,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
407 unsigned long old; 453 unsigned long old;
408 unsigned long long calltime; 454 unsigned long long calltime;
409 int faulted; 455 int faulted;
456 struct ftrace_graph_ent trace;
410 unsigned long return_hooker = (unsigned long) 457 unsigned long return_hooker = (unsigned long)
411 &return_to_handler; 458 &return_to_handler;
412 459
@@ -440,20 +487,27 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
440 ); 487 );
441 488
442 if (WARN_ON(faulted)) { 489 if (WARN_ON(faulted)) {
443 unregister_ftrace_return(); 490 unregister_ftrace_graph();
444 return; 491 return;
445 } 492 }
446 493
447 if (WARN_ON(!__kernel_text_address(old))) { 494 if (WARN_ON(!__kernel_text_address(old))) {
448 unregister_ftrace_return(); 495 unregister_ftrace_graph();
449 *parent = old; 496 *parent = old;
450 return; 497 return;
451 } 498 }
452 499
453 calltime = cpu_clock(raw_smp_processor_id()); 500 calltime = cpu_clock(raw_smp_processor_id());
454 501
455 if (push_return_trace(old, calltime, self_addr) == -EBUSY) 502 if (push_return_trace(old, calltime,
503 self_addr, &trace.depth) == -EBUSY) {
456 *parent = old; 504 *parent = old;
505 return;
506 }
507
508 trace.func = self_addr;
509 ftrace_graph_entry(&trace);
510
457} 511}
458 512
459#endif /* CONFIG_FUNCTION_RET_TRACER */ 513#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c622772744d8..c27af49a4ede 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -7,6 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/pm.h> 8#include <linux/pm.h>
9#include <linux/clockchips.h> 9#include <linux/clockchips.h>
10#include <linux/ftrace.h>
10#include <asm/system.h> 11#include <asm/system.h>
11 12
12unsigned long idle_halt; 13unsigned long idle_halt;
@@ -100,6 +101,9 @@ static inline int hlt_use_halt(void)
100void default_idle(void) 101void default_idle(void)
101{ 102{
102 if (hlt_use_halt()) { 103 if (hlt_use_halt()) {
104 struct power_trace it;
105
106 trace_power_start(&it, POWER_CSTATE, 1);
103 current_thread_info()->status &= ~TS_POLLING; 107 current_thread_info()->status &= ~TS_POLLING;
104 /* 108 /*
105 * TS_POLLING-cleared state must be visible before we 109 * TS_POLLING-cleared state must be visible before we
@@ -112,6 +116,7 @@ void default_idle(void)
112 else 116 else
113 local_irq_enable(); 117 local_irq_enable();
114 current_thread_info()->status |= TS_POLLING; 118 current_thread_info()->status |= TS_POLLING;
119 trace_power_end(&it);
115 } else { 120 } else {
116 local_irq_enable(); 121 local_irq_enable();
117 /* loop is done by the caller */ 122 /* loop is done by the caller */
@@ -154,24 +159,31 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
154 */ 159 */
155void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 160void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
156{ 161{
162 struct power_trace it;
163
164 trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
157 if (!need_resched()) { 165 if (!need_resched()) {
158 __monitor((void *)&current_thread_info()->flags, 0, 0); 166 __monitor((void *)&current_thread_info()->flags, 0, 0);
159 smp_mb(); 167 smp_mb();
160 if (!need_resched()) 168 if (!need_resched())
161 __mwait(ax, cx); 169 __mwait(ax, cx);
162 } 170 }
171 trace_power_end(&it);
163} 172}
164 173
165/* Default MONITOR/MWAIT with no hints, used for default C1 state */ 174/* Default MONITOR/MWAIT with no hints, used for default C1 state */
166static void mwait_idle(void) 175static void mwait_idle(void)
167{ 176{
177 struct power_trace it;
168 if (!need_resched()) { 178 if (!need_resched()) {
179 trace_power_start(&it, POWER_CSTATE, 1);
169 __monitor((void *)&current_thread_info()->flags, 0, 0); 180 __monitor((void *)&current_thread_info()->flags, 0, 0);
170 smp_mb(); 181 smp_mb();
171 if (!need_resched()) 182 if (!need_resched())
172 __sti_mwait(0, 0); 183 __sti_mwait(0, 0);
173 else 184 else
174 local_irq_enable(); 185 local_irq_enable();
186 trace_power_end(&it);
175 } else 187 } else
176 local_irq_enable(); 188 local_irq_enable();
177} 189}
@@ -183,9 +195,13 @@ static void mwait_idle(void)
183 */ 195 */
184static void poll_idle(void) 196static void poll_idle(void)
185{ 197{
198 struct power_trace it;
199
200 trace_power_start(&it, POWER_CSTATE, 0);
186 local_irq_enable(); 201 local_irq_enable();
187 while (!need_resched()) 202 while (!need_resched())
188 cpu_relax(); 203 cpu_relax();
204 trace_power_end(&it);
189} 205}
190 206
191/* 207/*
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 7854d87b97b2..afba918c623c 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -115,8 +115,13 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
115extern void ftrace_caller(void); 115extern void ftrace_caller(void);
116extern void ftrace_call(void); 116extern void ftrace_call(void);
117extern void mcount_call(void); 117extern void mcount_call(void);
118#ifdef CONFIG_FUNCTION_RET_TRACER 118#ifdef CONFIG_FUNCTION_GRAPH_TRACER
119extern void ftrace_return_caller(void); 119extern void ftrace_graph_caller(void);
120extern int ftrace_enable_ftrace_graph_caller(void);
121extern int ftrace_disable_ftrace_graph_caller(void);
122#else
123static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
124static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
120#endif 125#endif
121 126
122/** 127/**
@@ -311,35 +316,77 @@ ftrace_init_module(struct module *mod,
311 unsigned long *start, unsigned long *end) { } 316 unsigned long *start, unsigned long *end) { }
312#endif 317#endif
313 318
319enum {
320 POWER_NONE = 0,
321 POWER_CSTATE = 1,
322 POWER_PSTATE = 2,
323};
324
325struct power_trace {
326#ifdef CONFIG_POWER_TRACER
327 ktime_t stamp;
328 ktime_t end;
329 int type;
330 int state;
331#endif
332};
333
334#ifdef CONFIG_POWER_TRACER
335extern void trace_power_start(struct power_trace *it, unsigned int type,
336 unsigned int state);
337extern void trace_power_mark(struct power_trace *it, unsigned int type,
338 unsigned int state);
339extern void trace_power_end(struct power_trace *it);
340#else
341static inline void trace_power_start(struct power_trace *it, unsigned int type,
342 unsigned int state) { }
343static inline void trace_power_mark(struct power_trace *it, unsigned int type,
344 unsigned int state) { }
345static inline void trace_power_end(struct power_trace *it) { }
346#endif
347
348
349/*
350 * Structure that defines an entry function trace.
351 */
352struct ftrace_graph_ent {
353 unsigned long func; /* Current function */
354 int depth;
355};
314 356
315/* 357/*
316 * Structure that defines a return function trace. 358 * Structure that defines a return function trace.
317 */ 359 */
318struct ftrace_retfunc { 360struct ftrace_graph_ret {
319 unsigned long ret; /* Return address */
320 unsigned long func; /* Current function */ 361 unsigned long func; /* Current function */
321 unsigned long long calltime; 362 unsigned long long calltime;
322 unsigned long long rettime; 363 unsigned long long rettime;
323 /* Number of functions that overran the depth limit for current task */ 364 /* Number of functions that overran the depth limit for current task */
324 unsigned long overrun; 365 unsigned long overrun;
366 int depth;
325}; 367};
326 368
327#ifdef CONFIG_FUNCTION_RET_TRACER 369#ifdef CONFIG_FUNCTION_GRAPH_TRACER
328#define FTRACE_RETFUNC_DEPTH 50 370#define FTRACE_RETFUNC_DEPTH 50
329#define FTRACE_RETSTACK_ALLOC_SIZE 32 371#define FTRACE_RETSTACK_ALLOC_SIZE 32
330/* Type of a callback handler of tracing return function */ 372/* Type of the callback handlers for tracing function graph*/
331typedef void (*trace_function_return_t)(struct ftrace_retfunc *); 373typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
374typedef void (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
375
376extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
377 trace_func_graph_ent_t entryfunc);
378
379/* The current handlers in use */
380extern trace_func_graph_ret_t ftrace_graph_return;
381extern trace_func_graph_ent_t ftrace_graph_entry;
332 382
333extern int register_ftrace_return(trace_function_return_t func); 383extern void unregister_ftrace_graph(void);
334/* The current handler in use */
335extern trace_function_return_t ftrace_function_return;
336extern void unregister_ftrace_return(void);
337 384
338extern void ftrace_retfunc_init_task(struct task_struct *t); 385extern void ftrace_graph_init_task(struct task_struct *t);
339extern void ftrace_retfunc_exit_task(struct task_struct *t); 386extern void ftrace_graph_exit_task(struct task_struct *t);
340#else 387#else
341static inline void ftrace_retfunc_init_task(struct task_struct *t) { } 388static inline void ftrace_graph_init_task(struct task_struct *t) { }
342static inline void ftrace_retfunc_exit_task(struct task_struct *t) { } 389static inline void ftrace_graph_exit_task(struct task_struct *t) { }
343#endif 390#endif
344 391
345#endif /* _LINUX_FTRACE_H */ 392#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 0b4df55d7a74..366a054d0b05 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_RET_TRACER) 5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d02a0ca70ee9..7ad48f2a2758 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1365,7 +1365,7 @@ struct task_struct {
1365 unsigned long default_timer_slack_ns; 1365 unsigned long default_timer_slack_ns;
1366 1366
1367 struct list_head *scm_work_list; 1367 struct list_head *scm_work_list;
1368#ifdef CONFIG_FUNCTION_RET_TRACER 1368#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1369 /* Index of current stored adress in ret_stack */ 1369 /* Index of current stored adress in ret_stack */
1370 int curr_ret_stack; 1370 int curr_ret_stack;
1371 /* Stack of return addresses for return function tracing */ 1371 /* Stack of return addresses for return function tracing */
diff --git a/kernel/Makefile b/kernel/Makefile
index 03a45e7e87b7..703cf3b7389c 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -21,7 +21,7 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
21CFLAGS_REMOVE_sched_clock.o = -pg 21CFLAGS_REMOVE_sched_clock.o = -pg
22CFLAGS_REMOVE_sched.o = -pg 22CFLAGS_REMOVE_sched.o = -pg
23endif 23endif
24ifdef CONFIG_FUNCTION_RET_TRACER 24ifdef CONFIG_FUNCTION_GRAPH_TRACER
25CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address() 25CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
26CFLAGS_REMOVE_module.o = -pg # For __module_text_address() 26CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
27endif 27endif
diff --git a/kernel/fork.c b/kernel/fork.c
index d6e1a3205f62..5f82a999c032 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -140,7 +140,7 @@ void free_task(struct task_struct *tsk)
140 prop_local_destroy_single(&tsk->dirties); 140 prop_local_destroy_single(&tsk->dirties);
141 free_thread_info(tsk->stack); 141 free_thread_info(tsk->stack);
142 rt_mutex_debug_task_free(tsk); 142 rt_mutex_debug_task_free(tsk);
143 ftrace_retfunc_exit_task(tsk); 143 ftrace_graph_exit_task(tsk);
144 free_task_struct(tsk); 144 free_task_struct(tsk);
145} 145}
146EXPORT_SYMBOL(free_task); 146EXPORT_SYMBOL(free_task);
@@ -1271,7 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1271 total_forks++; 1271 total_forks++;
1272 spin_unlock(&current->sighand->siglock); 1272 spin_unlock(&current->sighand->siglock);
1273 write_unlock_irq(&tasklist_lock); 1273 write_unlock_irq(&tasklist_lock);
1274 ftrace_retfunc_init_task(p); 1274 ftrace_graph_init_task(p);
1275 proc_fork_connector(p); 1275 proc_fork_connector(p);
1276 cgroup_post_fork(p); 1276 cgroup_post_fork(p);
1277 return p; 1277 return p;
diff --git a/kernel/sched.c b/kernel/sched.c
index 388d9db044ab..52490bf6b884 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5901,7 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5901 * The idle tasks have their own, simple scheduling class: 5901 * The idle tasks have their own, simple scheduling class:
5902 */ 5902 */
5903 idle->sched_class = &idle_sched_class; 5903 idle->sched_class = &idle_sched_class;
5904 ftrace_retfunc_init_task(idle); 5904 ftrace_graph_init_task(idle);
5905} 5905}
5906 5906
5907/* 5907/*
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 620feadff67a..8b6b673b4d6c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -12,7 +12,7 @@ config NOP_TRACER
12config HAVE_FUNCTION_TRACER 12config HAVE_FUNCTION_TRACER
13 bool 13 bool
14 14
15config HAVE_FUNCTION_RET_TRACER 15config HAVE_FUNCTION_GRAPH_TRACER
16 bool 16 bool
17 17
18config HAVE_FUNCTION_TRACE_MCOUNT_TEST 18config HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -63,15 +63,18 @@ config FUNCTION_TRACER
63 (the bootup default), then the overhead of the instructions is very 63 (the bootup default), then the overhead of the instructions is very
64 small and not measurable even in micro-benchmarks. 64 small and not measurable even in micro-benchmarks.
65 65
66config FUNCTION_RET_TRACER 66config FUNCTION_GRAPH_TRACER
67 bool "Kernel Function return Tracer" 67 bool "Kernel Function Graph Tracer"
68 depends on HAVE_FUNCTION_RET_TRACER 68 depends on HAVE_FUNCTION_GRAPH_TRACER
69 depends on FUNCTION_TRACER 69 depends on FUNCTION_TRACER
70 help 70 help
71 Enable the kernel to trace a function at its return. 71 Enable the kernel to trace a function at both its return
72 It's first purpose is to trace the duration of functions. 72 and its entry.
73 This is done by setting the current return address on the thread 73 It's first purpose is to trace the duration of functions and
74 info structure of the current task. 74 draw a call graph for each thread with some informations like
75 the return value.
76 This is done by setting the current return address on the current
77 task structure into a stack of calls.
75 78
76config IRQSOFF_TRACER 79config IRQSOFF_TRACER
77 bool "Interrupts-off Latency Tracer" 80 bool "Interrupts-off Latency Tracer"
@@ -217,6 +220,17 @@ config BRANCH_TRACER
217 220
218 Say N if unsure. 221 Say N if unsure.
219 222
223config POWER_TRACER
224 bool "Trace power consumption behavior"
225 depends on DEBUG_KERNEL
226 depends on X86
227 select TRACING
228 help
229 This tracer helps developers to analyze and optimize the kernels
230 power management decisions, specifically the C-state and P-state
231 behavior.
232
233
220config STACK_TRACER 234config STACK_TRACER
221 bool "Trace max stack" 235 bool "Trace max stack"
222 depends on HAVE_FUNCTION_TRACER 236 depends on HAVE_FUNCTION_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index cef4bcb4e822..62dc561b6676 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -29,8 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
29obj-$(CONFIG_STACK_TRACER) += trace_stack.o 29obj-$(CONFIG_STACK_TRACER) += trace_stack.o
30obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 30obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o 31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
32obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o 32obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o 33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
34obj-$(CONFIG_BTS_TRACER) += trace_bts.o 34obj-$(CONFIG_BTS_TRACER) += trace_bts.o
35obj-$(CONFIG_POWER_TRACER) += trace_power.o
35 36
36libftrace-y := ftrace.o 37libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 53042f118f23..cbf8b09f63a5 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,12 +47,12 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* ftrace_pid_trace >= 0 will only trace threads with this pid */
51static int ftrace_pid_trace = -1;
52
50/* Quick disabling of function tracer. */ 53/* Quick disabling of function tracer. */
51int function_trace_stop; 54int function_trace_stop;
52 55
53/* By default, current tracing type is normal tracing. */
54enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
55
56/* 56/*
57 * ftrace_disabled is set when an anomaly is discovered. 57 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled. 58 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -61,6 +61,7 @@ static int ftrace_disabled __read_mostly;
61 61
62static DEFINE_SPINLOCK(ftrace_lock); 62static DEFINE_SPINLOCK(ftrace_lock);
63static DEFINE_MUTEX(ftrace_sysctl_lock); 63static DEFINE_MUTEX(ftrace_sysctl_lock);
64static DEFINE_MUTEX(ftrace_start_lock);
64 65
65static struct ftrace_ops ftrace_list_end __read_mostly = 66static struct ftrace_ops ftrace_list_end __read_mostly =
66{ 67{
@@ -70,6 +71,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
70static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 71static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
71ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 72ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
72ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 73ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
74ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
73 75
74static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 76static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
75{ 77{
@@ -86,6 +88,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
86 }; 88 };
87} 89}
88 90
91static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
92{
93 if (current->pid != ftrace_pid_trace)
94 return;
95
96 ftrace_pid_function(ip, parent_ip);
97}
98
99static void set_ftrace_pid_function(ftrace_func_t func)
100{
101 /* do not set ftrace_pid_function to itself! */
102 if (func != ftrace_pid_func)
103 ftrace_pid_function = func;
104}
105
89/** 106/**
90 * clear_ftrace_function - reset the ftrace function 107 * clear_ftrace_function - reset the ftrace function
91 * 108 *
@@ -96,6 +113,7 @@ void clear_ftrace_function(void)
96{ 113{
97 ftrace_trace_function = ftrace_stub; 114 ftrace_trace_function = ftrace_stub;
98 __ftrace_trace_function = ftrace_stub; 115 __ftrace_trace_function = ftrace_stub;
116 ftrace_pid_function = ftrace_stub;
99} 117}
100 118
101#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 119#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -128,20 +146,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
128 ftrace_list = ops; 146 ftrace_list = ops;
129 147
130 if (ftrace_enabled) { 148 if (ftrace_enabled) {
149 ftrace_func_t func;
150
151 if (ops->next == &ftrace_list_end)
152 func = ops->func;
153 else
154 func = ftrace_list_func;
155
156 if (ftrace_pid_trace >= 0) {
157 set_ftrace_pid_function(func);
158 func = ftrace_pid_func;
159 }
160
131 /* 161 /*
132 * For one func, simply call it directly. 162 * For one func, simply call it directly.
133 * For more than one func, call the chain. 163 * For more than one func, call the chain.
134 */ 164 */
135#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 165#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
136 if (ops->next == &ftrace_list_end) 166 ftrace_trace_function = func;
137 ftrace_trace_function = ops->func;
138 else
139 ftrace_trace_function = ftrace_list_func;
140#else 167#else
141 if (ops->next == &ftrace_list_end) 168 __ftrace_trace_function = func;
142 __ftrace_trace_function = ops->func;
143 else
144 __ftrace_trace_function = ftrace_list_func;
145 ftrace_trace_function = ftrace_test_stop_func; 169 ftrace_trace_function = ftrace_test_stop_func;
146#endif 170#endif
147 } 171 }
@@ -182,8 +206,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
182 206
183 if (ftrace_enabled) { 207 if (ftrace_enabled) {
184 /* If we only have one func left, then call that directly */ 208 /* If we only have one func left, then call that directly */
185 if (ftrace_list->next == &ftrace_list_end) 209 if (ftrace_list->next == &ftrace_list_end) {
186 ftrace_trace_function = ftrace_list->func; 210 ftrace_func_t func = ftrace_list->func;
211
212 if (ftrace_pid_trace >= 0) {
213 set_ftrace_pid_function(func);
214 func = ftrace_pid_func;
215 }
216#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217 ftrace_trace_function = func;
218#else
219 __ftrace_trace_function = func;
220#endif
221 }
187 } 222 }
188 223
189 out: 224 out:
@@ -192,6 +227,38 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
192 return ret; 227 return ret;
193} 228}
194 229
230static void ftrace_update_pid_func(void)
231{
232 ftrace_func_t func;
233
234 /* should not be called from interrupt context */
235 spin_lock(&ftrace_lock);
236
237 if (ftrace_trace_function == ftrace_stub)
238 goto out;
239
240 func = ftrace_trace_function;
241
242 if (ftrace_pid_trace >= 0) {
243 set_ftrace_pid_function(func);
244 func = ftrace_pid_func;
245 } else {
246 if (func != ftrace_pid_func)
247 goto out;
248
249 set_ftrace_pid_function(func);
250 }
251
252#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
254#else
255 __ftrace_trace_function = func;
256#endif
257
258 out:
259 spin_unlock(&ftrace_lock);
260}
261
195#ifdef CONFIG_DYNAMIC_FTRACE 262#ifdef CONFIG_DYNAMIC_FTRACE
196#ifndef CONFIG_FTRACE_MCOUNT_RECORD 263#ifndef CONFIG_FTRACE_MCOUNT_RECORD
197# error Dynamic ftrace depends on MCOUNT_RECORD 264# error Dynamic ftrace depends on MCOUNT_RECORD
@@ -211,6 +278,8 @@ enum {
211 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 278 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
212 FTRACE_ENABLE_MCOUNT = (1 << 3), 279 FTRACE_ENABLE_MCOUNT = (1 << 3),
213 FTRACE_DISABLE_MCOUNT = (1 << 4), 280 FTRACE_DISABLE_MCOUNT = (1 << 4),
281 FTRACE_START_FUNC_RET = (1 << 5),
282 FTRACE_STOP_FUNC_RET = (1 << 6),
214}; 283};
215 284
216static int ftrace_filtered; 285static int ftrace_filtered;
@@ -395,14 +464,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
395 unsigned long ip, fl; 464 unsigned long ip, fl;
396 unsigned long ftrace_addr; 465 unsigned long ftrace_addr;
397 466
398#ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller;
401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller;
403#else
404 ftrace_addr = (unsigned long)ftrace_caller; 467 ftrace_addr = (unsigned long)ftrace_caller;
405#endif
406 468
407 ip = rec->ip; 469 ip = rec->ip;
408 470
@@ -535,6 +597,11 @@ static int __ftrace_modify_code(void *data)
535 if (*command & FTRACE_UPDATE_TRACE_FUNC) 597 if (*command & FTRACE_UPDATE_TRACE_FUNC)
536 ftrace_update_ftrace_func(ftrace_trace_function); 598 ftrace_update_ftrace_func(ftrace_trace_function);
537 599
600 if (*command & FTRACE_START_FUNC_RET)
601 ftrace_enable_ftrace_graph_caller();
602 else if (*command & FTRACE_STOP_FUNC_RET)
603 ftrace_disable_ftrace_graph_caller();
604
538 return 0; 605 return 0;
539} 606}
540 607
@@ -545,12 +612,22 @@ static void ftrace_run_update_code(int command)
545 612
546static ftrace_func_t saved_ftrace_func; 613static ftrace_func_t saved_ftrace_func;
547static int ftrace_start_up; 614static int ftrace_start_up;
548static DEFINE_MUTEX(ftrace_start_lock);
549 615
550static void ftrace_startup(void) 616static void ftrace_startup_enable(int command)
551{ 617{
552 int command = 0; 618 if (saved_ftrace_func != ftrace_trace_function) {
619 saved_ftrace_func = ftrace_trace_function;
620 command |= FTRACE_UPDATE_TRACE_FUNC;
621 }
622
623 if (!command || !ftrace_enabled)
624 return;
625
626 ftrace_run_update_code(command);
627}
553 628
629static void ftrace_startup(int command)
630{
554 if (unlikely(ftrace_disabled)) 631 if (unlikely(ftrace_disabled))
555 return; 632 return;
556 633
@@ -558,23 +635,13 @@ static void ftrace_startup(void)
558 ftrace_start_up++; 635 ftrace_start_up++;
559 command |= FTRACE_ENABLE_CALLS; 636 command |= FTRACE_ENABLE_CALLS;
560 637
561 if (saved_ftrace_func != ftrace_trace_function) { 638 ftrace_startup_enable(command);
562 saved_ftrace_func = ftrace_trace_function;
563 command |= FTRACE_UPDATE_TRACE_FUNC;
564 }
565 639
566 if (!command || !ftrace_enabled)
567 goto out;
568
569 ftrace_run_update_code(command);
570 out:
571 mutex_unlock(&ftrace_start_lock); 640 mutex_unlock(&ftrace_start_lock);
572} 641}
573 642
574static void ftrace_shutdown(void) 643static void ftrace_shutdown(int command)
575{ 644{
576 int command = 0;
577
578 if (unlikely(ftrace_disabled)) 645 if (unlikely(ftrace_disabled))
579 return; 646 return;
580 647
@@ -1262,13 +1329,10 @@ static struct file_operations ftrace_notrace_fops = {
1262 .release = ftrace_notrace_release, 1329 .release = ftrace_notrace_release,
1263}; 1330};
1264 1331
1265static __init int ftrace_init_debugfs(void) 1332static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1266{ 1333{
1267 struct dentry *d_tracer;
1268 struct dentry *entry; 1334 struct dentry *entry;
1269 1335
1270 d_tracer = tracing_init_dentry();
1271
1272 entry = debugfs_create_file("available_filter_functions", 0444, 1336 entry = debugfs_create_file("available_filter_functions", 0444,
1273 d_tracer, NULL, &ftrace_avail_fops); 1337 d_tracer, NULL, &ftrace_avail_fops);
1274 if (!entry) 1338 if (!entry)
@@ -1295,8 +1359,6 @@ static __init int ftrace_init_debugfs(void)
1295 return 0; 1359 return 0;
1296} 1360}
1297 1361
1298fs_initcall(ftrace_init_debugfs);
1299
1300static int ftrace_convert_nops(struct module *mod, 1362static int ftrace_convert_nops(struct module *mod,
1301 unsigned long *start, 1363 unsigned long *start,
1302 unsigned long *end) 1364 unsigned long *end)
@@ -1382,12 +1444,101 @@ static int __init ftrace_nodyn_init(void)
1382} 1444}
1383device_initcall(ftrace_nodyn_init); 1445device_initcall(ftrace_nodyn_init);
1384 1446
1385# define ftrace_startup() do { } while (0) 1447static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1386# define ftrace_shutdown() do { } while (0) 1448static inline void ftrace_startup_enable(int command) { }
1449/* Keep as macros so we do not need to define the commands */
1450# define ftrace_startup(command) do { } while (0)
1451# define ftrace_shutdown(command) do { } while (0)
1387# define ftrace_startup_sysctl() do { } while (0) 1452# define ftrace_startup_sysctl() do { } while (0)
1388# define ftrace_shutdown_sysctl() do { } while (0) 1453# define ftrace_shutdown_sysctl() do { } while (0)
1389#endif /* CONFIG_DYNAMIC_FTRACE */ 1454#endif /* CONFIG_DYNAMIC_FTRACE */
1390 1455
1456static ssize_t
1457ftrace_pid_read(struct file *file, char __user *ubuf,
1458 size_t cnt, loff_t *ppos)
1459{
1460 char buf[64];
1461 int r;
1462
1463 if (ftrace_pid_trace >= 0)
1464 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1465 else
1466 r = sprintf(buf, "no pid\n");
1467
1468 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1469}
1470
1471static ssize_t
1472ftrace_pid_write(struct file *filp, const char __user *ubuf,
1473 size_t cnt, loff_t *ppos)
1474{
1475 char buf[64];
1476 long val;
1477 int ret;
1478
1479 if (cnt >= sizeof(buf))
1480 return -EINVAL;
1481
1482 if (copy_from_user(&buf, ubuf, cnt))
1483 return -EFAULT;
1484
1485 buf[cnt] = 0;
1486
1487 ret = strict_strtol(buf, 10, &val);
1488 if (ret < 0)
1489 return ret;
1490
1491 mutex_lock(&ftrace_start_lock);
1492 if (ret < 0) {
1493 /* disable pid tracing */
1494 if (ftrace_pid_trace < 0)
1495 goto out;
1496 ftrace_pid_trace = -1;
1497
1498 } else {
1499
1500 if (ftrace_pid_trace == val)
1501 goto out;
1502
1503 ftrace_pid_trace = val;
1504 }
1505
1506 /* update the function call */
1507 ftrace_update_pid_func();
1508 ftrace_startup_enable(0);
1509
1510 out:
1511 mutex_unlock(&ftrace_start_lock);
1512
1513 return cnt;
1514}
1515
1516static struct file_operations ftrace_pid_fops = {
1517 .read = ftrace_pid_read,
1518 .write = ftrace_pid_write,
1519};
1520
1521static __init int ftrace_init_debugfs(void)
1522{
1523 struct dentry *d_tracer;
1524 struct dentry *entry;
1525
1526 d_tracer = tracing_init_dentry();
1527 if (!d_tracer)
1528 return 0;
1529
1530 ftrace_init_dyn_debugfs(d_tracer);
1531
1532 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1533 NULL, &ftrace_pid_fops);
1534 if (!entry)
1535 pr_warning("Could not create debugfs "
1536 "'set_ftrace_pid' entry\n");
1537 return 0;
1538}
1539
1540fs_initcall(ftrace_init_debugfs);
1541
1391/** 1542/**
1392 * ftrace_kill - kill ftrace 1543 * ftrace_kill - kill ftrace
1393 * 1544 *
@@ -1422,15 +1573,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
1422 1573
1423 mutex_lock(&ftrace_sysctl_lock); 1574 mutex_lock(&ftrace_sysctl_lock);
1424 1575
1425 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1426 ret = -EBUSY;
1427 goto out;
1428 }
1429
1430 ret = __register_ftrace_function(ops); 1576 ret = __register_ftrace_function(ops);
1431 ftrace_startup(); 1577 ftrace_startup(0);
1432 1578
1433out:
1434 mutex_unlock(&ftrace_sysctl_lock); 1579 mutex_unlock(&ftrace_sysctl_lock);
1435 return ret; 1580 return ret;
1436} 1581}
@@ -1447,7 +1592,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1447 1592
1448 mutex_lock(&ftrace_sysctl_lock); 1593 mutex_lock(&ftrace_sysctl_lock);
1449 ret = __unregister_ftrace_function(ops); 1594 ret = __unregister_ftrace_function(ops);
1450 ftrace_shutdown(); 1595 ftrace_shutdown(0);
1451 mutex_unlock(&ftrace_sysctl_lock); 1596 mutex_unlock(&ftrace_sysctl_lock);
1452 1597
1453 return ret; 1598 return ret;
@@ -1496,14 +1641,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1496 return ret; 1641 return ret;
1497} 1642}
1498 1643
1499#ifdef CONFIG_FUNCTION_RET_TRACER 1644#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1500
1501static atomic_t ftrace_retfunc_active;
1502 1645
1503/* The callback that hooks the return of a function */ 1646static atomic_t ftrace_graph_active;
1504trace_function_return_t ftrace_function_return =
1505 (trace_function_return_t)ftrace_stub;
1506 1647
1648/* The callbacks that hook a function */
1649trace_func_graph_ret_t ftrace_graph_return =
1650 (trace_func_graph_ret_t)ftrace_stub;
1651trace_func_graph_ent_t ftrace_graph_entry =
1652 (trace_func_graph_ent_t)ftrace_stub;
1507 1653
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1654/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 1655static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1549,7 +1695,7 @@ free:
1549} 1695}
1550 1696
1551/* Allocate a return stack for each task */ 1697/* Allocate a return stack for each task */
1552static int start_return_tracing(void) 1698static int start_graph_tracing(void)
1553{ 1699{
1554 struct ftrace_ret_stack **ret_stack_list; 1700 struct ftrace_ret_stack **ret_stack_list;
1555 int ret; 1701 int ret;
@@ -1569,52 +1715,46 @@ static int start_return_tracing(void)
1569 return ret; 1715 return ret;
1570} 1716}
1571 1717
1572int register_ftrace_return(trace_function_return_t func) 1718int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1719 trace_func_graph_ent_t entryfunc)
1573{ 1720{
1574 int ret = 0; 1721 int ret = 0;
1575 1722
1576 mutex_lock(&ftrace_sysctl_lock); 1723 mutex_lock(&ftrace_sysctl_lock);
1577 1724
1578 /* 1725 atomic_inc(&ftrace_graph_active);
1579 * Don't launch return tracing if normal function 1726 ret = start_graph_tracing();
1580 * tracing is already running.
1581 */
1582 if (ftrace_trace_function != ftrace_stub) {
1583 ret = -EBUSY;
1584 goto out;
1585 }
1586 atomic_inc(&ftrace_retfunc_active);
1587 ret = start_return_tracing();
1588 if (ret) { 1727 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active); 1728 atomic_dec(&ftrace_graph_active);
1590 goto out; 1729 goto out;
1591 } 1730 }
1592 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1731
1593 ftrace_function_return = func; 1732 ftrace_graph_return = retfunc;
1594 ftrace_startup(); 1733 ftrace_graph_entry = entryfunc;
1734
1735 ftrace_startup(FTRACE_START_FUNC_RET);
1595 1736
1596out: 1737out:
1597 mutex_unlock(&ftrace_sysctl_lock); 1738 mutex_unlock(&ftrace_sysctl_lock);
1598 return ret; 1739 return ret;
1599} 1740}
1600 1741
1601void unregister_ftrace_return(void) 1742void unregister_ftrace_graph(void)
1602{ 1743{
1603 mutex_lock(&ftrace_sysctl_lock); 1744 mutex_lock(&ftrace_sysctl_lock);
1604 1745
1605 atomic_dec(&ftrace_retfunc_active); 1746 atomic_dec(&ftrace_graph_active);
1606 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1747 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1607 ftrace_shutdown(); 1748 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1608 /* Restore normal tracing type */ 1749 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1609 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1610 1750
1611 mutex_unlock(&ftrace_sysctl_lock); 1751 mutex_unlock(&ftrace_sysctl_lock);
1612} 1752}
1613 1753
1614/* Allocate a return stack for newly created task */ 1754/* Allocate a return stack for newly created task */
1615void ftrace_retfunc_init_task(struct task_struct *t) 1755void ftrace_graph_init_task(struct task_struct *t)
1616{ 1756{
1617 if (atomic_read(&ftrace_retfunc_active)) { 1757 if (atomic_read(&ftrace_graph_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 1758 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack), 1759 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL); 1760 GFP_KERNEL);
@@ -1626,7 +1766,7 @@ void ftrace_retfunc_init_task(struct task_struct *t)
1626 t->ret_stack = NULL; 1766 t->ret_stack = NULL;
1627} 1767}
1628 1768
1629void ftrace_retfunc_exit_task(struct task_struct *t) 1769void ftrace_graph_exit_task(struct task_struct *t)
1630{ 1770{
1631 struct ftrace_ret_stack *ret_stack = t->ret_stack; 1771 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1632 1772
@@ -1638,5 +1778,3 @@ void ftrace_retfunc_exit_task(struct task_struct *t)
1638} 1778}
1639#endif 1779#endif
1640 1780
1641
1642
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8df8fdd69c95..5811e0a5f732 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -804,7 +804,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
804 spin_unlock(&trace_cmdline_lock); 804 spin_unlock(&trace_cmdline_lock);
805} 805}
806 806
807static char *trace_find_cmdline(int pid) 807char *trace_find_cmdline(int pid)
808{ 808{
809 char *cmdline = "<...>"; 809 char *cmdline = "<...>";
810 unsigned map; 810 unsigned map;
@@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
878 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 878 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
879} 879}
880 880
881#ifdef CONFIG_FUNCTION_RET_TRACER 881#ifdef CONFIG_FUNCTION_GRAPH_TRACER
882static void __trace_function_return(struct trace_array *tr, 882static void __trace_graph_entry(struct trace_array *tr,
883 struct trace_array_cpu *data, 883 struct trace_array_cpu *data,
884 struct ftrace_retfunc *trace, 884 struct ftrace_graph_ent *trace,
885 unsigned long flags, 885 unsigned long flags,
886 int pc) 886 int pc)
887{ 887{
888 struct ring_buffer_event *event; 888 struct ring_buffer_event *event;
889 struct ftrace_ret_entry *entry; 889 struct ftrace_graph_ent_entry *entry;
890 unsigned long irq_flags; 890 unsigned long irq_flags;
891 891
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -898,12 +898,32 @@ static void __trace_function_return(struct trace_array *tr,
898 return; 898 return;
899 entry = ring_buffer_event_data(event); 899 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc); 900 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_FN_RET; 901 entry->ent.type = TRACE_GRAPH_ENT;
902 entry->ip = trace->func; 902 entry->graph_ent = *trace;
903 entry->parent_ip = trace->ret; 903 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
904 entry->rettime = trace->rettime; 904}
905 entry->calltime = trace->calltime; 905
906 entry->overrun = trace->overrun; 906static void __trace_graph_return(struct trace_array *tr,
907 struct trace_array_cpu *data,
908 struct ftrace_graph_ret *trace,
909 unsigned long flags,
910 int pc)
911{
912 struct ring_buffer_event *event;
913 struct ftrace_graph_ret_entry *entry;
914 unsigned long irq_flags;
915
916 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
917 return;
918
919 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
920 &irq_flags);
921 if (!event)
922 return;
923 entry = ring_buffer_event_data(event);
924 tracing_generic_entry_update(&entry->ent, flags, pc);
925 entry->ent.type = TRACE_GRAPH_RET;
926 entry->ret = *trace;
907 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 927 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
908} 928}
909#endif 929#endif
@@ -1177,8 +1197,29 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1177 local_irq_restore(flags); 1197 local_irq_restore(flags);
1178} 1198}
1179 1199
1180#ifdef CONFIG_FUNCTION_RET_TRACER 1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1181void trace_function_return(struct ftrace_retfunc *trace) 1201void trace_graph_entry(struct ftrace_graph_ent *trace)
1202{
1203 struct trace_array *tr = &global_trace;
1204 struct trace_array_cpu *data;
1205 unsigned long flags;
1206 long disabled;
1207 int cpu;
1208 int pc;
1209
1210 raw_local_irq_save(flags);
1211 cpu = raw_smp_processor_id();
1212 data = tr->data[cpu];
1213 disabled = atomic_inc_return(&data->disabled);
1214 if (likely(disabled == 1)) {
1215 pc = preempt_count();
1216 __trace_graph_entry(tr, data, trace, flags, pc);
1217 }
1218 atomic_dec(&data->disabled);
1219 raw_local_irq_restore(flags);
1220}
1221
1222void trace_graph_return(struct ftrace_graph_ret *trace)
1182{ 1223{
1183 struct trace_array *tr = &global_trace; 1224 struct trace_array *tr = &global_trace;
1184 struct trace_array_cpu *data; 1225 struct trace_array_cpu *data;
@@ -1193,12 +1234,12 @@ void trace_function_return(struct ftrace_retfunc *trace)
1193 disabled = atomic_inc_return(&data->disabled); 1234 disabled = atomic_inc_return(&data->disabled);
1194 if (likely(disabled == 1)) { 1235 if (likely(disabled == 1)) {
1195 pc = preempt_count(); 1236 pc = preempt_count();
1196 __trace_function_return(tr, data, trace, flags, pc); 1237 __trace_graph_return(tr, data, trace, flags, pc);
1197 } 1238 }
1198 atomic_dec(&data->disabled); 1239 atomic_dec(&data->disabled);
1199 raw_local_irq_restore(flags); 1240 raw_local_irq_restore(flags);
1200} 1241}
1201#endif /* CONFIG_FUNCTION_RET_TRACER */ 1242#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1202 1243
1203static struct ftrace_ops trace_ops __read_mostly = 1244static struct ftrace_ops trace_ops __read_mostly =
1204{ 1245{
@@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2000 trace_seq_print_cont(s, iter); 2041 trace_seq_print_cont(s, iter);
2001 break; 2042 break;
2002 } 2043 }
2003 case TRACE_FN_RET: { 2044 case TRACE_GRAPH_RET: {
2004 return print_return_function(iter); 2045 return print_graph_function(iter);
2005 break; 2046 }
2047 case TRACE_GRAPH_ENT: {
2048 return print_graph_function(iter);
2006 } 2049 }
2007 case TRACE_BRANCH: { 2050 case TRACE_BRANCH: {
2008 struct trace_branch *field; 2051 struct trace_branch *field;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3abd645e8af2..f96f4e787ff3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -25,9 +25,11 @@ enum trace_type {
25 TRACE_BRANCH, 25 TRACE_BRANCH,
26 TRACE_BOOT_CALL, 26 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET, 27 TRACE_BOOT_RET,
28 TRACE_FN_RET, 28 TRACE_GRAPH_RET,
29 TRACE_GRAPH_ENT,
29 TRACE_USER_STACK, 30 TRACE_USER_STACK,
30 TRACE_BTS, 31 TRACE_BTS,
32 TRACE_POWER,
31 33
32 __TRACE_LAST_TYPE 34 __TRACE_LAST_TYPE
33}; 35};
@@ -56,14 +58,16 @@ struct ftrace_entry {
56 unsigned long parent_ip; 58 unsigned long parent_ip;
57}; 59};
58 60
61/* Function call entry */
62struct ftrace_graph_ent_entry {
63 struct trace_entry ent;
64 struct ftrace_graph_ent graph_ent;
65};
66
59/* Function return entry */ 67/* Function return entry */
60struct ftrace_ret_entry { 68struct ftrace_graph_ret_entry {
61 struct trace_entry ent; 69 struct trace_entry ent;
62 unsigned long ip; 70 struct ftrace_graph_ret ret;
63 unsigned long parent_ip;
64 unsigned long long calltime;
65 unsigned long long rettime;
66 unsigned long overrun;
67}; 71};
68extern struct tracer boot_tracer; 72extern struct tracer boot_tracer;
69 73
@@ -160,6 +164,11 @@ struct bts_entry {
160 unsigned long to; 164 unsigned long to;
161}; 165};
162 166
167struct trace_power {
168 struct trace_entry ent;
169 struct power_trace state_data;
170};
171
163/* 172/*
164 * trace_flag_type is an enumeration that holds different 173 * trace_flag_type is an enumeration that holds different
165 * states when a trace occurs. These are: 174 * states when a trace occurs. These are:
@@ -264,8 +273,12 @@ extern void __ftrace_bad_type(void);
264 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ 273 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
265 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ 274 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
266 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 275 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
267 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ 276 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
277 TRACE_GRAPH_ENT); \
278 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
279 TRACE_GRAPH_RET); \
268 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ 280 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
281 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
269 __ftrace_bad_type(); \ 282 __ftrace_bad_type(); \
270 } while (0) 283 } while (0)
271 284
@@ -397,9 +410,9 @@ void trace_function(struct trace_array *tr,
397 unsigned long ip, 410 unsigned long ip,
398 unsigned long parent_ip, 411 unsigned long parent_ip,
399 unsigned long flags, int pc); 412 unsigned long flags, int pc);
400void
401trace_function_return(struct ftrace_retfunc *trace);
402 413
414void trace_graph_return(struct ftrace_graph_ret *trace);
415void trace_graph_entry(struct ftrace_graph_ent *trace);
403void trace_bts(struct trace_array *tr, 416void trace_bts(struct trace_array *tr,
404 unsigned long from, 417 unsigned long from,
405 unsigned long to); 418 unsigned long to);
@@ -444,6 +457,7 @@ struct tracer_switch_ops {
444 struct tracer_switch_ops *next; 457 struct tracer_switch_ops *next;
445}; 458};
446 459
460char *trace_find_cmdline(int pid);
447#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 461#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
448 462
449#ifdef CONFIG_DYNAMIC_FTRACE 463#ifdef CONFIG_DYNAMIC_FTRACE
@@ -489,11 +503,11 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
489extern unsigned long trace_flags; 503extern unsigned long trace_flags;
490 504
491/* Standard output formatting function used for function return traces */ 505/* Standard output formatting function used for function return traces */
492#ifdef CONFIG_FUNCTION_RET_TRACER 506#ifdef CONFIG_FUNCTION_GRAPH_TRACER
493extern enum print_line_t print_return_function(struct trace_iterator *iter); 507extern enum print_line_t print_graph_function(struct trace_iterator *iter);
494#else 508#else
495static inline enum print_line_t 509static inline enum print_line_t
496print_return_function(struct trace_iterator *iter) 510print_graph_function(struct trace_iterator *iter)
497{ 511{
498 return TRACE_TYPE_UNHANDLED; 512 return TRACE_TYPE_UNHANDLED;
499} 513}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644
index 000000000000..d31d695174aa
--- /dev/null
+++ b/kernel/trace/trace_functions_graph.c
@@ -0,0 +1,175 @@
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16#define TRACE_GRAPH_INDENT 2
17
18#define TRACE_GRAPH_PRINT_OVERRUN 0x1
19static struct tracer_opt trace_opts[] = {
20 /* Display overruns or not */
21 { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
22 { } /* Empty entry */
23};
24
25static struct tracer_flags tracer_flags = {
26 .val = 0, /* Don't display overruns by default */
27 .opts = trace_opts
28};
29
30/* pid on the last trace processed */
31static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
32
33static int graph_trace_init(struct trace_array *tr)
34{
35 int cpu, ret;
36
37 for_each_online_cpu(cpu)
38 tracing_reset(tr, cpu);
39
40 ret = register_ftrace_graph(&trace_graph_return,
41 &trace_graph_entry);
42 if (ret)
43 return ret;
44 tracing_start_cmdline_record();
45
46 return 0;
47}
48
49static void graph_trace_reset(struct trace_array *tr)
50{
51 tracing_stop_cmdline_record();
52 unregister_ftrace_graph();
53}
54
55/* If the pid changed since the last trace, output this event */
56static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
57{
58 char *comm;
59
60 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
61 return 1;
62
63 last_pid[cpu] = pid;
64 comm = trace_find_cmdline(pid);
65
66 return trace_seq_printf(s, "\nCPU[%03d]"
67 " ------------8<---------- thread %s-%d"
68 " ------------8<----------\n\n",
69 cpu, comm, pid);
70}
71
72static enum print_line_t
73print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
74 struct trace_entry *ent, int cpu)
75{
76 int i;
77 int ret;
78
79 if (!verif_pid(s, ent->pid, cpu))
80 return TRACE_TYPE_PARTIAL_LINE;
81
82 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
83 if (!ret)
84 return TRACE_TYPE_PARTIAL_LINE;
85
86 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
87 ret = trace_seq_printf(s, " ");
88 if (!ret)
89 return TRACE_TYPE_PARTIAL_LINE;
90 }
91
92 ret = seq_print_ip_sym(s, call->func, 0);
93 if (!ret)
94 return TRACE_TYPE_PARTIAL_LINE;
95
96 ret = trace_seq_printf(s, "() {\n");
97 if (!ret)
98 return TRACE_TYPE_PARTIAL_LINE;
99 return TRACE_TYPE_HANDLED;
100}
101
102static enum print_line_t
103print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
104 struct trace_entry *ent, int cpu)
105{
106 int i;
107 int ret;
108
109 if (!verif_pid(s, ent->pid, cpu))
110 return TRACE_TYPE_PARTIAL_LINE;
111
112 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
113 if (!ret)
114 return TRACE_TYPE_PARTIAL_LINE;
115
116 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
117 ret = trace_seq_printf(s, " ");
118 if (!ret)
119 return TRACE_TYPE_PARTIAL_LINE;
120 }
121
122 ret = trace_seq_printf(s, "} ");
123 if (!ret)
124 return TRACE_TYPE_PARTIAL_LINE;
125
126 ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
127 if (!ret)
128 return TRACE_TYPE_PARTIAL_LINE;
129
130 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
131 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
132 trace->overrun);
133 if (!ret)
134 return TRACE_TYPE_PARTIAL_LINE;
135 }
136 return TRACE_TYPE_HANDLED;
137}
138
139enum print_line_t
140print_graph_function(struct trace_iterator *iter)
141{
142 struct trace_seq *s = &iter->seq;
143 struct trace_entry *entry = iter->ent;
144
145 switch (entry->type) {
146 case TRACE_GRAPH_ENT: {
147 struct ftrace_graph_ent_entry *field;
148 trace_assign_type(field, entry);
149 return print_graph_entry(&field->graph_ent, s, entry,
150 iter->cpu);
151 }
152 case TRACE_GRAPH_RET: {
153 struct ftrace_graph_ret_entry *field;
154 trace_assign_type(field, entry);
155 return print_graph_return(&field->ret, s, entry, iter->cpu);
156 }
157 default:
158 return TRACE_TYPE_UNHANDLED;
159 }
160}
161
162static struct tracer graph_trace __read_mostly = {
163 .name = "function-graph",
164 .init = graph_trace_init,
165 .reset = graph_trace_reset,
166 .print_line = print_graph_function,
167 .flags = &tracer_flags,
168};
169
170static __init int init_graph_trace(void)
171{
172 return register_tracer(&graph_trace);
173}
174
175device_initcall(init_graph_trace);
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c
deleted file mode 100644
index e00d64509c9c..000000000000
--- a/kernel/trace/trace_functions_return.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 *
3 * Function return tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16
17#define TRACE_RETURN_PRINT_OVERRUN 0x1
18static struct tracer_opt trace_opts[] = {
19 /* Display overruns or not */
20 { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) },
21 { } /* Empty entry */
22};
23
24static struct tracer_flags tracer_flags = {
25 .val = 0, /* Don't display overruns by default */
26 .opts = trace_opts
27};
28
29
30static int return_trace_init(struct trace_array *tr)
31{
32 int cpu;
33 for_each_online_cpu(cpu)
34 tracing_reset(tr, cpu);
35
36 return register_ftrace_return(&trace_function_return);
37}
38
39static void return_trace_reset(struct trace_array *tr)
40{
41 unregister_ftrace_return();
42}
43
44
45enum print_line_t
46print_return_function(struct trace_iterator *iter)
47{
48 struct trace_seq *s = &iter->seq;
49 struct trace_entry *entry = iter->ent;
50 struct ftrace_ret_entry *field;
51 int ret;
52
53 if (entry->type == TRACE_FN_RET) {
54 trace_assign_type(field, entry);
55 ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
56 if (!ret)
57 return TRACE_TYPE_PARTIAL_LINE;
58
59 ret = seq_print_ip_sym(s, field->ip,
60 trace_flags & TRACE_ITER_SYM_MASK);
61 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE;
63
64 ret = trace_seq_printf(s, " (%llu ns)",
65 field->rettime - field->calltime);
66 if (!ret)
67 return TRACE_TYPE_PARTIAL_LINE;
68
69 if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) {
70 ret = trace_seq_printf(s, " (Overruns: %lu)",
71 field->overrun);
72 if (!ret)
73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75
76 ret = trace_seq_printf(s, "\n");
77 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE;
79
80 return TRACE_TYPE_HANDLED;
81 }
82 return TRACE_TYPE_UNHANDLED;
83}
84
85static struct tracer return_trace __read_mostly = {
86 .name = "return",
87 .init = return_trace_init,
88 .reset = return_trace_reset,
89 .print_line = print_return_function,
90 .flags = &tracer_flags,
91};
92
93static __init int init_return_trace(void)
94{
95 return register_tracer(&return_trace);
96}
97
98device_initcall(init_return_trace);
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
new file mode 100644
index 000000000000..a7172a352f62
--- /dev/null
+++ b/kernel/trace/trace_power.c
@@ -0,0 +1,179 @@
1/*
2 * ring buffer based C-state tracer
3 *
4 * Arjan van de Ven <arjan@linux.intel.com>
5 * Copyright (C) 2008 Intel Corporation
6 *
7 * Much is borrowed from trace_boot.c which is
8 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 *
10 */
11
12#include <linux/init.h>
13#include <linux/debugfs.h>
14#include <linux/ftrace.h>
15#include <linux/kallsyms.h>
16#include <linux/module.h>
17
18#include "trace.h"
19
20static struct trace_array *power_trace;
21static int __read_mostly trace_power_enabled;
22
23
24static void start_power_trace(struct trace_array *tr)
25{
26 trace_power_enabled = 1;
27}
28
29static void stop_power_trace(struct trace_array *tr)
30{
31 trace_power_enabled = 0;
32}
33
34
35static int power_trace_init(struct trace_array *tr)
36{
37 int cpu;
38 power_trace = tr;
39
40 trace_power_enabled = 1;
41
42 for_each_cpu_mask(cpu, cpu_possible_map)
43 tracing_reset(tr, cpu);
44 return 0;
45}
46
47static enum print_line_t power_print_line(struct trace_iterator *iter)
48{
49 int ret = 0;
50 struct trace_entry *entry = iter->ent;
51 struct trace_power *field ;
52 struct power_trace *it;
53 struct trace_seq *s = &iter->seq;
54 struct timespec stamp;
55 struct timespec duration;
56
57 trace_assign_type(field, entry);
58 it = &field->state_data;
59 stamp = ktime_to_timespec(it->stamp);
60 duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
61
62 if (entry->type == TRACE_POWER) {
63 if (it->type == POWER_CSTATE)
64 ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
65 stamp.tv_sec,
66 stamp.tv_nsec,
67 it->state, iter->cpu,
68 duration.tv_sec,
69 duration.tv_nsec);
70 if (it->type == POWER_PSTATE)
71 ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
72 stamp.tv_sec,
73 stamp.tv_nsec,
74 it->state, iter->cpu);
75 if (!ret)
76 return TRACE_TYPE_PARTIAL_LINE;
77 return TRACE_TYPE_HANDLED;
78 }
79 return TRACE_TYPE_UNHANDLED;
80}
81
82static struct tracer power_tracer __read_mostly =
83{
84 .name = "power",
85 .init = power_trace_init,
86 .start = start_power_trace,
87 .stop = stop_power_trace,
88 .reset = stop_power_trace,
89 .print_line = power_print_line,
90};
91
92static int init_power_trace(void)
93{
94 return register_tracer(&power_tracer);
95}
96device_initcall(init_power_trace);
97
98void trace_power_start(struct power_trace *it, unsigned int type,
99 unsigned int level)
100{
101 if (!trace_power_enabled)
102 return;
103
104 memset(it, 0, sizeof(struct power_trace));
105 it->state = level;
106 it->type = type;
107 it->stamp = ktime_get();
108}
109EXPORT_SYMBOL_GPL(trace_power_start);
110
111
112void trace_power_end(struct power_trace *it)
113{
114 struct ring_buffer_event *event;
115 struct trace_power *entry;
116 struct trace_array_cpu *data;
117 unsigned long irq_flags;
118 struct trace_array *tr = power_trace;
119
120 if (!trace_power_enabled)
121 return;
122
123 preempt_disable();
124 it->end = ktime_get();
125 data = tr->data[smp_processor_id()];
126
127 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
128 &irq_flags);
129 if (!event)
130 goto out;
131 entry = ring_buffer_event_data(event);
132 tracing_generic_entry_update(&entry->ent, 0, 0);
133 entry->ent.type = TRACE_POWER;
134 entry->state_data = *it;
135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
136
137 trace_wake_up();
138
139 out:
140 preempt_enable();
141}
142EXPORT_SYMBOL_GPL(trace_power_end);
143
144void trace_power_mark(struct power_trace *it, unsigned int type,
145 unsigned int level)
146{
147 struct ring_buffer_event *event;
148 struct trace_power *entry;
149 struct trace_array_cpu *data;
150 unsigned long irq_flags;
151 struct trace_array *tr = power_trace;
152
153 if (!trace_power_enabled)
154 return;
155
156 memset(it, 0, sizeof(struct power_trace));
157 it->state = level;
158 it->type = type;
159 it->stamp = ktime_get();
160 preempt_disable();
161 it->end = it->stamp;
162 data = tr->data[smp_processor_id()];
163
164 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
165 &irq_flags);
166 if (!event)
167 goto out;
168 entry = ring_buffer_event_data(event);
169 tracing_generic_entry_update(&entry->ent, 0, 0);
170 entry->ent.type = TRACE_POWER;
171 entry->state_data = *it;
172 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
173
174 trace_wake_up();
175
176 out:
177 preempt_enable();
178}
179EXPORT_SYMBOL_GPL(trace_power_mark);
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 0197e2f6b544..0b1dc9f9bb06 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -112,6 +112,8 @@ my ($arch, $bits, $objdump, $objcopy, $cc,
112# Acceptable sections to record. 112# Acceptable sections to record.
113my %text_sections = ( 113my %text_sections = (
114 ".text" => 1, 114 ".text" => 1,
115 ".sched.text" => 1,
116 ".spinlock.text" => 1,
115); 117);
116 118
117$objdump = "objdump" if ((length $objdump) == 0); 119$objdump = "objdump" if ((length $objdump) == 0);
diff --git a/scripts/trace/power.pl b/scripts/trace/power.pl
new file mode 100644
index 000000000000..4f729b3501e0
--- /dev/null
+++ b/scripts/trace/power.pl
@@ -0,0 +1,108 @@
1#!/usr/bin/perl
2
3# Copyright 2008, Intel Corporation
4#
5# This file is part of the Linux kernel
6#
7# This program file is free software; you can redistribute it and/or modify it
8# under the terms of the GNU General Public License as published by the
9# Free Software Foundation; version 2 of the License.
10#
11# This program is distributed in the hope that it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14# for more details.
15#
16# You should have received a copy of the GNU General Public License
17# along with this program in a file named COPYING; if not, write to the
18# Free Software Foundation, Inc.,
19# 51 Franklin Street, Fifth Floor,
20# Boston, MA 02110-1301 USA
21#
22# Authors:
23# Arjan van de Ven <arjan@linux.intel.com>
24
25
26#
27# This script turns a cstate ftrace output into a SVG graphic that shows
28# historic C-state information
29#
30#
31# cat /sys/kernel/debug/tracing/trace | perl power.pl > out.svg
32#
33
34my @styles;
35my $base = 0;
36
37my @pstate_last;
38my @pstate_level;
39
40$styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
41$styles[1] = "fill:rgb(0,255,0);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
42$styles[2] = "fill:rgb(255,0,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
43$styles[3] = "fill:rgb(255,255,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
44$styles[4] = "fill:rgb(255,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
45$styles[5] = "fill:rgb(0,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
46$styles[6] = "fill:rgb(0,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
47$styles[7] = "fill:rgb(0,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
48$styles[8] = "fill:rgb(0,25,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
49
50
51print "<?xml version=\"1.0\" standalone=\"no\"?> \n";
52print "<svg width=\"10000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n";
53
54my $scale = 30000.0;
55while (<>) {
56 my $line = $_;
57 if ($line =~ /([0-9\.]+)\] CSTATE: Going to C([0-9]) on cpu ([0-9]+) for ([0-9\.]+)/) {
58 if ($base == 0) {
59 $base = $1;
60 }
61 my $time = $1 - $base;
62 $time = $time * $scale;
63 my $C = $2;
64 my $cpu = $3;
65 my $y = 400 * $cpu;
66 my $duration = $4 * $scale;
67 my $msec = int($4 * 100000)/100.0;
68 my $height = $C * 20;
69 $style = $styles[$C];
70
71 $y = $y + 140 - $height;
72
73 $x2 = $time + 4;
74 $y2 = $y + 4;
75
76
77 print "<rect x=\"$time\" width=\"$duration\" y=\"$y\" height=\"$height\" style=\"$style\"/>\n";
78 print "<text transform=\"translate($x2,$y2) rotate(90)\">C$C $msec</text>\n";
79 }
80 if ($line =~ /([0-9\.]+)\] PSTATE: Going to P([0-9]) on cpu ([0-9]+)/) {
81 my $time = $1 - $base;
82 my $state = $2;
83 my $cpu = $3;
84
85 if (defined($pstate_last[$cpu])) {
86 my $from = $pstate_last[$cpu];
87 my $oldstate = $pstate_state[$cpu];
88 my $duration = ($time-$from) * $scale;
89
90 $from = $from * $scale;
91 my $to = $from + $duration;
92 my $height = 140 - ($oldstate * (140/8));
93
94 my $y = 400 * $cpu + 200 + $height;
95 my $y2 = $y+4;
96 my $style = $styles[8];
97
98 print "<rect x=\"$from\" y=\"$y\" width=\"$duration\" height=\"5\" style=\"$style\"/>\n";
99 print "<text transform=\"translate($from,$y2)\">P$oldstate (cpu $cpu)</text>\n";
100 };
101
102 $pstate_last[$cpu] = $time;
103 $pstate_state[$cpu] = $state;
104 }
105}
106
107
108print "</svg>\n";