diff options
32 files changed, 1510 insertions, 472 deletions
diff --git a/Documentation/ftrace.txt b/Documentation/ftrace.txt index 35a78bc6651d..de05042f11b9 100644 --- a/Documentation/ftrace.txt +++ b/Documentation/ftrace.txt | |||
| @@ -127,6 +127,8 @@ of ftrace. Here is a list of some of the key files: | |||
| 127 | be traced. If a function exists in both set_ftrace_filter | 127 | be traced. If a function exists in both set_ftrace_filter |
| 128 | and set_ftrace_notrace, the function will _not_ be traced. | 128 | and set_ftrace_notrace, the function will _not_ be traced. |
| 129 | 129 | ||
| 130 | set_ftrace_pid: Have the function tracer only trace a single thread. | ||
| 131 | |||
| 130 | available_filter_functions: This lists the functions that ftrace | 132 | available_filter_functions: This lists the functions that ftrace |
| 131 | has processed and can trace. These are the function | 133 | has processed and can trace. These are the function |
| 132 | names that you can pass to "set_ftrace_filter" or | 134 | names that you can pass to "set_ftrace_filter" or |
| @@ -1073,6 +1075,83 @@ For simple one time traces, the above is sufficent. For anything else, | |||
| 1073 | a search through /proc/mounts may be needed to find where the debugfs | 1075 | a search through /proc/mounts may be needed to find where the debugfs |
| 1074 | file-system is mounted. | 1076 | file-system is mounted. |
| 1075 | 1077 | ||
| 1078 | |||
| 1079 | Single thread tracing | ||
| 1080 | --------------------- | ||
| 1081 | |||
| 1082 | By writing into /debug/tracing/set_ftrace_pid you can trace a | ||
| 1083 | single thread. For example: | ||
| 1084 | |||
| 1085 | # cat /debug/tracing/set_ftrace_pid | ||
| 1086 | no pid | ||
| 1087 | # echo 3111 > /debug/tracing/set_ftrace_pid | ||
| 1088 | # cat /debug/tracing/set_ftrace_pid | ||
| 1089 | 3111 | ||
| 1090 | # echo function > /debug/tracing/current_tracer | ||
| 1091 | # cat /debug/tracing/trace | head | ||
| 1092 | # tracer: function | ||
| 1093 | # | ||
| 1094 | # TASK-PID CPU# TIMESTAMP FUNCTION | ||
| 1095 | # | | | | | | ||
| 1096 | yum-updatesd-3111 [003] 1637.254676: finish_task_switch <-thread_return | ||
| 1097 | yum-updatesd-3111 [003] 1637.254681: hrtimer_cancel <-schedule_hrtimeout_range | ||
| 1098 | yum-updatesd-3111 [003] 1637.254682: hrtimer_try_to_cancel <-hrtimer_cancel | ||
| 1099 | yum-updatesd-3111 [003] 1637.254683: lock_hrtimer_base <-hrtimer_try_to_cancel | ||
| 1100 | yum-updatesd-3111 [003] 1637.254685: fget_light <-do_sys_poll | ||
| 1101 | yum-updatesd-3111 [003] 1637.254686: pipe_poll <-do_sys_poll | ||
| 1102 | # echo -1 > /debug/tracing/set_ftrace_pid | ||
| 1103 | # cat /debug/tracing/trace |head | ||
| 1104 | # tracer: function | ||
| 1105 | # | ||
| 1106 | # TASK-PID CPU# TIMESTAMP FUNCTION | ||
| 1107 | # | | | | | | ||
| 1108 | ##### CPU 3 buffer started #### | ||
| 1109 | yum-updatesd-3111 [003] 1701.957688: free_poll_entry <-poll_freewait | ||
| 1110 | yum-updatesd-3111 [003] 1701.957689: remove_wait_queue <-free_poll_entry | ||
| 1111 | yum-updatesd-3111 [003] 1701.957691: fput <-free_poll_entry | ||
| 1112 | yum-updatesd-3111 [003] 1701.957692: audit_syscall_exit <-sysret_audit | ||
| 1113 | yum-updatesd-3111 [003] 1701.957693: path_put <-audit_syscall_exit | ||
| 1114 | |||
| 1115 | If you want to trace a function when executing, you could use | ||
| 1116 | something like this simple program: | ||
| 1117 | |||
| 1118 | #include <stdio.h> | ||
| 1119 | #include <stdlib.h> | ||
| 1120 | #include <sys/types.h> | ||
| 1121 | #include <sys/stat.h> | ||
| 1122 | #include <fcntl.h> | ||
| 1123 | #include <unistd.h> | ||
| 1124 | |||
| 1125 | int main (int argc, char **argv) | ||
| 1126 | { | ||
| 1127 | if (argc < 1) | ||
| 1128 | exit(-1); | ||
| 1129 | |||
| 1130 | if (fork() > 0) { | ||
| 1131 | int fd, ffd; | ||
| 1132 | char line[64]; | ||
| 1133 | int s; | ||
| 1134 | |||
| 1135 | ffd = open("/debug/tracing/current_tracer", O_WRONLY); | ||
| 1136 | if (ffd < 0) | ||
| 1137 | exit(-1); | ||
| 1138 | write(ffd, "nop", 3); | ||
| 1139 | |||
| 1140 | fd = open("/debug/tracing/set_ftrace_pid", O_WRONLY); | ||
| 1141 | s = sprintf(line, "%d\n", getpid()); | ||
| 1142 | write(fd, line, s); | ||
| 1143 | |||
| 1144 | write(ffd, "function", 8); | ||
| 1145 | |||
| 1146 | close(fd); | ||
| 1147 | close(ffd); | ||
| 1148 | |||
| 1149 | execvp(argv[1], argv+1); | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | return 0; | ||
| 1153 | } | ||
| 1154 | |||
| 1076 | dynamic ftrace | 1155 | dynamic ftrace |
| 1077 | -------------- | 1156 | -------------- |
| 1078 | 1157 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e49a4fd718fe..0842b1127684 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -29,7 +29,7 @@ config X86 | |||
| 29 | select HAVE_FTRACE_MCOUNT_RECORD | 29 | select HAVE_FTRACE_MCOUNT_RECORD |
| 30 | select HAVE_DYNAMIC_FTRACE | 30 | select HAVE_DYNAMIC_FTRACE |
| 31 | select HAVE_FUNCTION_TRACER | 31 | select HAVE_FUNCTION_TRACER |
| 32 | select HAVE_FUNCTION_RET_TRACER if X86_32 | 32 | select HAVE_FUNCTION_GRAPH_TRACER if X86_32 |
| 33 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 33 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| 34 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 34 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
| 35 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 35 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 754a3e082f94..7e61b4ceb9a4 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
| @@ -28,7 +28,7 @@ struct dyn_arch_ftrace { | |||
| 28 | #endif /* __ASSEMBLY__ */ | 28 | #endif /* __ASSEMBLY__ */ |
| 29 | #endif /* CONFIG_FUNCTION_TRACER */ | 29 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 30 | 30 | ||
| 31 | #ifdef CONFIG_FUNCTION_RET_TRACER | 31 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 32 | 32 | ||
| 33 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
| 34 | 34 | ||
| @@ -51,6 +51,6 @@ struct ftrace_ret_stack { | |||
| 51 | extern void return_to_handler(void); | 51 | extern void return_to_handler(void); |
| 52 | 52 | ||
| 53 | #endif /* __ASSEMBLY__ */ | 53 | #endif /* __ASSEMBLY__ */ |
| 54 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | 54 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 55 | 55 | ||
| 56 | #endif /* _ASM_X86_FTRACE_H */ | 56 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index af2bc36ca1c4..64939a0c3986 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
| @@ -14,7 +14,7 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | |||
| 14 | CFLAGS_REMOVE_ftrace.o = -pg | 14 | CFLAGS_REMOVE_ftrace.o = -pg |
| 15 | endif | 15 | endif |
| 16 | 16 | ||
| 17 | ifdef CONFIG_FUNCTION_RET_TRACER | 17 | ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 18 | # Don't trace __switch_to() but let it for function tracer | 18 | # Don't trace __switch_to() but let it for function tracer |
| 19 | CFLAGS_REMOVE_process_32.o = -pg | 19 | CFLAGS_REMOVE_process_32.o = -pg |
| 20 | endif | 20 | endif |
| @@ -70,7 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o | |||
| 70 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o | 70 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
| 71 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 71 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
| 72 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 72 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
| 73 | obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o | 73 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
| 74 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 74 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
| 75 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 75 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
| 76 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 76 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 8e48c5d4467d..88ea02dcb622 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
| 34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
| 35 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
| 36 | #include <linux/ftrace.h> | ||
| 36 | 37 | ||
| 37 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
| 38 | #include <acpi/processor.h> | 39 | #include <acpi/processor.h> |
| @@ -391,6 +392,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
| 391 | unsigned int next_perf_state = 0; /* Index into perf table */ | 392 | unsigned int next_perf_state = 0; /* Index into perf table */ |
| 392 | unsigned int i; | 393 | unsigned int i; |
| 393 | int result = 0; | 394 | int result = 0; |
| 395 | struct power_trace it; | ||
| 394 | 396 | ||
| 395 | dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); | 397 | dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); |
| 396 | 398 | ||
| @@ -427,6 +429,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
| 427 | } | 429 | } |
| 428 | } | 430 | } |
| 429 | 431 | ||
| 432 | trace_power_mark(&it, POWER_PSTATE, next_perf_state); | ||
| 433 | |||
| 430 | switch (data->cpu_feature) { | 434 | switch (data->cpu_feature) { |
| 431 | case SYSTEM_INTEL_MSR_CAPABLE: | 435 | case SYSTEM_INTEL_MSR_CAPABLE: |
| 432 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | 436 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 74defe21ba42..958af86186c4 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
| @@ -1174,6 +1174,11 @@ ftrace_call: | |||
| 1174 | popl %edx | 1174 | popl %edx |
| 1175 | popl %ecx | 1175 | popl %ecx |
| 1176 | popl %eax | 1176 | popl %eax |
| 1177 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 1178 | .globl ftrace_graph_call | ||
| 1179 | ftrace_graph_call: | ||
| 1180 | jmp ftrace_stub | ||
| 1181 | #endif | ||
| 1177 | 1182 | ||
| 1178 | .globl ftrace_stub | 1183 | .globl ftrace_stub |
| 1179 | ftrace_stub: | 1184 | ftrace_stub: |
| @@ -1188,9 +1193,9 @@ ENTRY(mcount) | |||
| 1188 | 1193 | ||
| 1189 | cmpl $ftrace_stub, ftrace_trace_function | 1194 | cmpl $ftrace_stub, ftrace_trace_function |
| 1190 | jnz trace | 1195 | jnz trace |
| 1191 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1196 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1192 | cmpl $ftrace_stub, ftrace_function_return | 1197 | cmpl $ftrace_stub, ftrace_graph_return |
| 1193 | jnz ftrace_return_caller | 1198 | jnz ftrace_graph_caller |
| 1194 | #endif | 1199 | #endif |
| 1195 | .globl ftrace_stub | 1200 | .globl ftrace_stub |
| 1196 | ftrace_stub: | 1201 | ftrace_stub: |
| @@ -1215,8 +1220,8 @@ END(mcount) | |||
| 1215 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1220 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 1216 | #endif /* CONFIG_FUNCTION_TRACER */ | 1221 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 1217 | 1222 | ||
| 1218 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1223 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1219 | ENTRY(ftrace_return_caller) | 1224 | ENTRY(ftrace_graph_caller) |
| 1220 | cmpl $0, function_trace_stop | 1225 | cmpl $0, function_trace_stop |
| 1221 | jne ftrace_stub | 1226 | jne ftrace_stub |
| 1222 | 1227 | ||
| @@ -1230,7 +1235,7 @@ ENTRY(ftrace_return_caller) | |||
| 1230 | popl %ecx | 1235 | popl %ecx |
| 1231 | popl %eax | 1236 | popl %eax |
| 1232 | ret | 1237 | ret |
| 1233 | END(ftrace_return_caller) | 1238 | END(ftrace_graph_caller) |
| 1234 | 1239 | ||
| 1235 | .globl return_to_handler | 1240 | .globl return_to_handler |
| 1236 | return_to_handler: | 1241 | return_to_handler: |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index bb137f7297ed..7ef914e6a2f6 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
| @@ -111,7 +111,6 @@ static void ftrace_mod_code(void) | |||
| 111 | */ | 111 | */ |
| 112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | 112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, |
| 113 | MCOUNT_INSN_SIZE); | 113 | MCOUNT_INSN_SIZE); |
| 114 | |||
| 115 | } | 114 | } |
| 116 | 115 | ||
| 117 | void ftrace_nmi_enter(void) | 116 | void ftrace_nmi_enter(void) |
| @@ -323,9 +322,53 @@ int __init ftrace_dyn_arch_init(void *data) | |||
| 323 | } | 322 | } |
| 324 | #endif | 323 | #endif |
| 325 | 324 | ||
| 326 | #ifdef CONFIG_FUNCTION_RET_TRACER | 325 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 326 | |||
| 327 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 328 | extern void ftrace_graph_call(void); | ||
| 329 | |||
| 330 | static int ftrace_mod_jmp(unsigned long ip, | ||
| 331 | int old_offset, int new_offset) | ||
| 332 | { | ||
| 333 | unsigned char code[MCOUNT_INSN_SIZE]; | ||
| 334 | |||
| 335 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | ||
| 336 | return -EFAULT; | ||
| 337 | |||
| 338 | if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) | ||
| 339 | return -EINVAL; | ||
| 340 | |||
| 341 | *(int *)(&code[1]) = new_offset; | ||
| 342 | |||
| 343 | if (do_ftrace_mod_code(ip, &code)) | ||
| 344 | return -EPERM; | ||
| 345 | |||
| 346 | return 0; | ||
| 347 | } | ||
| 348 | |||
| 349 | int ftrace_enable_ftrace_graph_caller(void) | ||
| 350 | { | ||
| 351 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | ||
| 352 | int old_offset, new_offset; | ||
| 353 | |||
| 354 | old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | ||
| 355 | new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | ||
| 356 | |||
| 357 | return ftrace_mod_jmp(ip, old_offset, new_offset); | ||
| 358 | } | ||
| 359 | |||
| 360 | int ftrace_disable_ftrace_graph_caller(void) | ||
| 361 | { | ||
| 362 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | ||
| 363 | int old_offset, new_offset; | ||
| 364 | |||
| 365 | old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | ||
| 366 | new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | ||
| 367 | |||
| 368 | return ftrace_mod_jmp(ip, old_offset, new_offset); | ||
| 369 | } | ||
| 327 | 370 | ||
| 328 | #ifndef CONFIG_DYNAMIC_FTRACE | 371 | #else /* CONFIG_DYNAMIC_FTRACE */ |
| 329 | 372 | ||
| 330 | /* | 373 | /* |
| 331 | * These functions are picked from those used on | 374 | * These functions are picked from those used on |
| @@ -343,11 +386,12 @@ void ftrace_nmi_exit(void) | |||
| 343 | { | 386 | { |
| 344 | atomic_dec(&in_nmi); | 387 | atomic_dec(&in_nmi); |
| 345 | } | 388 | } |
| 389 | |||
| 346 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 390 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
| 347 | 391 | ||
| 348 | /* Add a function return address to the trace stack on thread info.*/ | 392 | /* Add a function return address to the trace stack on thread info.*/ |
| 349 | static int push_return_trace(unsigned long ret, unsigned long long time, | 393 | static int push_return_trace(unsigned long ret, unsigned long long time, |
| 350 | unsigned long func) | 394 | unsigned long func, int *depth) |
| 351 | { | 395 | { |
| 352 | int index; | 396 | int index; |
| 353 | 397 | ||
| @@ -365,21 +409,22 @@ static int push_return_trace(unsigned long ret, unsigned long long time, | |||
| 365 | current->ret_stack[index].ret = ret; | 409 | current->ret_stack[index].ret = ret; |
| 366 | current->ret_stack[index].func = func; | 410 | current->ret_stack[index].func = func; |
| 367 | current->ret_stack[index].calltime = time; | 411 | current->ret_stack[index].calltime = time; |
| 412 | *depth = index; | ||
| 368 | 413 | ||
| 369 | return 0; | 414 | return 0; |
| 370 | } | 415 | } |
| 371 | 416 | ||
| 372 | /* Retrieve a function return address to the trace stack on thread info.*/ | 417 | /* Retrieve a function return address to the trace stack on thread info.*/ |
| 373 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | 418 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
| 374 | unsigned long *func, unsigned long *overrun) | ||
| 375 | { | 419 | { |
| 376 | int index; | 420 | int index; |
| 377 | 421 | ||
| 378 | index = current->curr_ret_stack; | 422 | index = current->curr_ret_stack; |
| 379 | *ret = current->ret_stack[index].ret; | 423 | *ret = current->ret_stack[index].ret; |
| 380 | *func = current->ret_stack[index].func; | 424 | trace->func = current->ret_stack[index].func; |
| 381 | *time = current->ret_stack[index].calltime; | 425 | trace->calltime = current->ret_stack[index].calltime; |
| 382 | *overrun = atomic_read(¤t->trace_overrun); | 426 | trace->overrun = atomic_read(¤t->trace_overrun); |
| 427 | trace->depth = index; | ||
| 383 | current->curr_ret_stack--; | 428 | current->curr_ret_stack--; |
| 384 | } | 429 | } |
| 385 | 430 | ||
| @@ -389,13 +434,14 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time, | |||
| 389 | */ | 434 | */ |
| 390 | unsigned long ftrace_return_to_handler(void) | 435 | unsigned long ftrace_return_to_handler(void) |
| 391 | { | 436 | { |
| 392 | struct ftrace_retfunc trace; | 437 | struct ftrace_graph_ret trace; |
| 393 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func, | 438 | unsigned long ret; |
| 394 | &trace.overrun); | 439 | |
| 440 | pop_return_trace(&trace, &ret); | ||
| 395 | trace.rettime = cpu_clock(raw_smp_processor_id()); | 441 | trace.rettime = cpu_clock(raw_smp_processor_id()); |
| 396 | ftrace_function_return(&trace); | 442 | ftrace_graph_return(&trace); |
| 397 | 443 | ||
| 398 | return trace.ret; | 444 | return ret; |
| 399 | } | 445 | } |
| 400 | 446 | ||
| 401 | /* | 447 | /* |
| @@ -407,6 +453,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
| 407 | unsigned long old; | 453 | unsigned long old; |
| 408 | unsigned long long calltime; | 454 | unsigned long long calltime; |
| 409 | int faulted; | 455 | int faulted; |
| 456 | struct ftrace_graph_ent trace; | ||
| 410 | unsigned long return_hooker = (unsigned long) | 457 | unsigned long return_hooker = (unsigned long) |
| 411 | &return_to_handler; | 458 | &return_to_handler; |
| 412 | 459 | ||
| @@ -440,20 +487,27 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
| 440 | ); | 487 | ); |
| 441 | 488 | ||
| 442 | if (WARN_ON(faulted)) { | 489 | if (WARN_ON(faulted)) { |
| 443 | unregister_ftrace_return(); | 490 | unregister_ftrace_graph(); |
| 444 | return; | 491 | return; |
| 445 | } | 492 | } |
| 446 | 493 | ||
| 447 | if (WARN_ON(!__kernel_text_address(old))) { | 494 | if (WARN_ON(!__kernel_text_address(old))) { |
| 448 | unregister_ftrace_return(); | 495 | unregister_ftrace_graph(); |
| 449 | *parent = old; | 496 | *parent = old; |
| 450 | return; | 497 | return; |
| 451 | } | 498 | } |
| 452 | 499 | ||
| 453 | calltime = cpu_clock(raw_smp_processor_id()); | 500 | calltime = cpu_clock(raw_smp_processor_id()); |
| 454 | 501 | ||
| 455 | if (push_return_trace(old, calltime, self_addr) == -EBUSY) | 502 | if (push_return_trace(old, calltime, |
| 503 | self_addr, &trace.depth) == -EBUSY) { | ||
| 456 | *parent = old; | 504 | *parent = old; |
| 505 | return; | ||
| 506 | } | ||
| 507 | |||
| 508 | trace.func = self_addr; | ||
| 509 | ftrace_graph_entry(&trace); | ||
| 510 | |||
| 457 | } | 511 | } |
| 458 | 512 | ||
| 459 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | 513 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c622772744d8..c27af49a4ede 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
| 8 | #include <linux/pm.h> | 8 | #include <linux/pm.h> |
| 9 | #include <linux/clockchips.h> | 9 | #include <linux/clockchips.h> |
| 10 | #include <linux/ftrace.h> | ||
| 10 | #include <asm/system.h> | 11 | #include <asm/system.h> |
| 11 | 12 | ||
| 12 | unsigned long idle_halt; | 13 | unsigned long idle_halt; |
| @@ -100,6 +101,9 @@ static inline int hlt_use_halt(void) | |||
| 100 | void default_idle(void) | 101 | void default_idle(void) |
| 101 | { | 102 | { |
| 102 | if (hlt_use_halt()) { | 103 | if (hlt_use_halt()) { |
| 104 | struct power_trace it; | ||
| 105 | |||
| 106 | trace_power_start(&it, POWER_CSTATE, 1); | ||
| 103 | current_thread_info()->status &= ~TS_POLLING; | 107 | current_thread_info()->status &= ~TS_POLLING; |
| 104 | /* | 108 | /* |
| 105 | * TS_POLLING-cleared state must be visible before we | 109 | * TS_POLLING-cleared state must be visible before we |
| @@ -112,6 +116,7 @@ void default_idle(void) | |||
| 112 | else | 116 | else |
| 113 | local_irq_enable(); | 117 | local_irq_enable(); |
| 114 | current_thread_info()->status |= TS_POLLING; | 118 | current_thread_info()->status |= TS_POLLING; |
| 119 | trace_power_end(&it); | ||
| 115 | } else { | 120 | } else { |
| 116 | local_irq_enable(); | 121 | local_irq_enable(); |
| 117 | /* loop is done by the caller */ | 122 | /* loop is done by the caller */ |
| @@ -154,24 +159,31 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); | |||
| 154 | */ | 159 | */ |
| 155 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | 160 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) |
| 156 | { | 161 | { |
| 162 | struct power_trace it; | ||
| 163 | |||
| 164 | trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); | ||
| 157 | if (!need_resched()) { | 165 | if (!need_resched()) { |
| 158 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 166 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
| 159 | smp_mb(); | 167 | smp_mb(); |
| 160 | if (!need_resched()) | 168 | if (!need_resched()) |
| 161 | __mwait(ax, cx); | 169 | __mwait(ax, cx); |
| 162 | } | 170 | } |
| 171 | trace_power_end(&it); | ||
| 163 | } | 172 | } |
| 164 | 173 | ||
| 165 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | 174 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ |
| 166 | static void mwait_idle(void) | 175 | static void mwait_idle(void) |
| 167 | { | 176 | { |
| 177 | struct power_trace it; | ||
| 168 | if (!need_resched()) { | 178 | if (!need_resched()) { |
| 179 | trace_power_start(&it, POWER_CSTATE, 1); | ||
| 169 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 180 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
| 170 | smp_mb(); | 181 | smp_mb(); |
| 171 | if (!need_resched()) | 182 | if (!need_resched()) |
| 172 | __sti_mwait(0, 0); | 183 | __sti_mwait(0, 0); |
| 173 | else | 184 | else |
| 174 | local_irq_enable(); | 185 | local_irq_enable(); |
| 186 | trace_power_end(&it); | ||
| 175 | } else | 187 | } else |
| 176 | local_irq_enable(); | 188 | local_irq_enable(); |
| 177 | } | 189 | } |
| @@ -183,9 +195,13 @@ static void mwait_idle(void) | |||
| 183 | */ | 195 | */ |
| 184 | static void poll_idle(void) | 196 | static void poll_idle(void) |
| 185 | { | 197 | { |
| 198 | struct power_trace it; | ||
| 199 | |||
| 200 | trace_power_start(&it, POWER_CSTATE, 0); | ||
| 186 | local_irq_enable(); | 201 | local_irq_enable(); |
| 187 | while (!need_resched()) | 202 | while (!need_resched()) |
| 188 | cpu_relax(); | 203 | cpu_relax(); |
| 204 | trace_power_end(&it); | ||
| 189 | } | 205 | } |
| 190 | 206 | ||
| 191 | /* | 207 | /* |
diff --git a/block/Kconfig b/block/Kconfig index 1ab7c15c8d7a..290b219fad9c 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
| @@ -47,6 +47,7 @@ config BLK_DEV_IO_TRACE | |||
| 47 | depends on SYSFS | 47 | depends on SYSFS |
| 48 | select RELAY | 48 | select RELAY |
| 49 | select DEBUG_FS | 49 | select DEBUG_FS |
| 50 | select TRACEPOINTS | ||
| 50 | help | 51 | help |
| 51 | Say Y here if you want to be able to trace the block layer actions | 52 | Say Y here if you want to be able to trace the block layer actions |
| 52 | on a given queue. Tracing allows you to see any traffic happening | 53 | on a given queue. Tracing allows you to see any traffic happening |
diff --git a/block/blk-core.c b/block/blk-core.c index 10e8a64a5a5b..0c06cf5aaaf8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -28,9 +28,23 @@ | |||
| 28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
| 29 | #include <linux/blktrace_api.h> | 29 | #include <linux/blktrace_api.h> |
| 30 | #include <linux/fault-inject.h> | 30 | #include <linux/fault-inject.h> |
| 31 | #include <trace/block.h> | ||
| 31 | 32 | ||
| 32 | #include "blk.h" | 33 | #include "blk.h" |
| 33 | 34 | ||
| 35 | DEFINE_TRACE(block_plug); | ||
| 36 | DEFINE_TRACE(block_unplug_io); | ||
| 37 | DEFINE_TRACE(block_unplug_timer); | ||
| 38 | DEFINE_TRACE(block_getrq); | ||
| 39 | DEFINE_TRACE(block_sleeprq); | ||
| 40 | DEFINE_TRACE(block_rq_requeue); | ||
| 41 | DEFINE_TRACE(block_bio_backmerge); | ||
| 42 | DEFINE_TRACE(block_bio_frontmerge); | ||
| 43 | DEFINE_TRACE(block_bio_queue); | ||
| 44 | DEFINE_TRACE(block_rq_complete); | ||
| 45 | DEFINE_TRACE(block_remap); /* Also used in drivers/md/dm.c */ | ||
| 46 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); | ||
| 47 | |||
| 34 | static int __make_request(struct request_queue *q, struct bio *bio); | 48 | static int __make_request(struct request_queue *q, struct bio *bio); |
| 35 | 49 | ||
| 36 | /* | 50 | /* |
| @@ -205,7 +219,7 @@ void blk_plug_device(struct request_queue *q) | |||
| 205 | 219 | ||
| 206 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { | 220 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { |
| 207 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 221 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
| 208 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | 222 | trace_block_plug(q); |
| 209 | } | 223 | } |
| 210 | } | 224 | } |
| 211 | EXPORT_SYMBOL(blk_plug_device); | 225 | EXPORT_SYMBOL(blk_plug_device); |
| @@ -292,9 +306,7 @@ void blk_unplug_work(struct work_struct *work) | |||
| 292 | struct request_queue *q = | 306 | struct request_queue *q = |
| 293 | container_of(work, struct request_queue, unplug_work); | 307 | container_of(work, struct request_queue, unplug_work); |
| 294 | 308 | ||
| 295 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | 309 | trace_block_unplug_io(q); |
| 296 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
| 297 | |||
| 298 | q->unplug_fn(q); | 310 | q->unplug_fn(q); |
| 299 | } | 311 | } |
| 300 | 312 | ||
| @@ -302,9 +314,7 @@ void blk_unplug_timeout(unsigned long data) | |||
| 302 | { | 314 | { |
| 303 | struct request_queue *q = (struct request_queue *)data; | 315 | struct request_queue *q = (struct request_queue *)data; |
| 304 | 316 | ||
| 305 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, | 317 | trace_block_unplug_timer(q); |
| 306 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
| 307 | |||
| 308 | kblockd_schedule_work(q, &q->unplug_work); | 318 | kblockd_schedule_work(q, &q->unplug_work); |
| 309 | } | 319 | } |
| 310 | 320 | ||
| @@ -314,9 +324,7 @@ void blk_unplug(struct request_queue *q) | |||
| 314 | * devices don't necessarily have an ->unplug_fn defined | 324 | * devices don't necessarily have an ->unplug_fn defined |
| 315 | */ | 325 | */ |
| 316 | if (q->unplug_fn) { | 326 | if (q->unplug_fn) { |
| 317 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | 327 | trace_block_unplug_io(q); |
| 318 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
| 319 | |||
| 320 | q->unplug_fn(q); | 328 | q->unplug_fn(q); |
| 321 | } | 329 | } |
| 322 | } | 330 | } |
| @@ -822,7 +830,7 @@ rq_starved: | |||
| 822 | if (ioc_batching(q, ioc)) | 830 | if (ioc_batching(q, ioc)) |
| 823 | ioc->nr_batch_requests--; | 831 | ioc->nr_batch_requests--; |
| 824 | 832 | ||
| 825 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | 833 | trace_block_getrq(q, bio, rw); |
| 826 | out: | 834 | out: |
| 827 | return rq; | 835 | return rq; |
| 828 | } | 836 | } |
| @@ -848,7 +856,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
| 848 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | 856 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, |
| 849 | TASK_UNINTERRUPTIBLE); | 857 | TASK_UNINTERRUPTIBLE); |
| 850 | 858 | ||
| 851 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | 859 | trace_block_sleeprq(q, bio, rw); |
| 852 | 860 | ||
| 853 | __generic_unplug_device(q); | 861 | __generic_unplug_device(q); |
| 854 | spin_unlock_irq(q->queue_lock); | 862 | spin_unlock_irq(q->queue_lock); |
| @@ -928,7 +936,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) | |||
| 928 | { | 936 | { |
| 929 | blk_delete_timer(rq); | 937 | blk_delete_timer(rq); |
| 930 | blk_clear_rq_complete(rq); | 938 | blk_clear_rq_complete(rq); |
| 931 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 939 | trace_block_rq_requeue(q, rq); |
| 932 | 940 | ||
| 933 | if (blk_rq_tagged(rq)) | 941 | if (blk_rq_tagged(rq)) |
| 934 | blk_queue_end_tag(q, rq); | 942 | blk_queue_end_tag(q, rq); |
| @@ -1167,7 +1175,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
| 1167 | if (!ll_back_merge_fn(q, req, bio)) | 1175 | if (!ll_back_merge_fn(q, req, bio)) |
| 1168 | break; | 1176 | break; |
| 1169 | 1177 | ||
| 1170 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | 1178 | trace_block_bio_backmerge(q, bio); |
| 1171 | 1179 | ||
| 1172 | req->biotail->bi_next = bio; | 1180 | req->biotail->bi_next = bio; |
| 1173 | req->biotail = bio; | 1181 | req->biotail = bio; |
| @@ -1186,7 +1194,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
| 1186 | if (!ll_front_merge_fn(q, req, bio)) | 1194 | if (!ll_front_merge_fn(q, req, bio)) |
| 1187 | break; | 1195 | break; |
| 1188 | 1196 | ||
| 1189 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | 1197 | trace_block_bio_frontmerge(q, bio); |
| 1190 | 1198 | ||
| 1191 | bio->bi_next = req->bio; | 1199 | bio->bi_next = req->bio; |
| 1192 | req->bio = bio; | 1200 | req->bio = bio; |
| @@ -1269,7 +1277,7 @@ static inline void blk_partition_remap(struct bio *bio) | |||
| 1269 | bio->bi_sector += p->start_sect; | 1277 | bio->bi_sector += p->start_sect; |
| 1270 | bio->bi_bdev = bdev->bd_contains; | 1278 | bio->bi_bdev = bdev->bd_contains; |
| 1271 | 1279 | ||
| 1272 | blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio, | 1280 | trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, |
| 1273 | bdev->bd_dev, bio->bi_sector, | 1281 | bdev->bd_dev, bio->bi_sector, |
| 1274 | bio->bi_sector - p->start_sect); | 1282 | bio->bi_sector - p->start_sect); |
| 1275 | } | 1283 | } |
| @@ -1441,10 +1449,10 @@ end_io: | |||
| 1441 | goto end_io; | 1449 | goto end_io; |
| 1442 | 1450 | ||
| 1443 | if (old_sector != -1) | 1451 | if (old_sector != -1) |
| 1444 | blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, | 1452 | trace_block_remap(q, bio, old_dev, bio->bi_sector, |
| 1445 | old_sector); | 1453 | old_sector); |
| 1446 | 1454 | ||
| 1447 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | 1455 | trace_block_bio_queue(q, bio); |
| 1448 | 1456 | ||
| 1449 | old_sector = bio->bi_sector; | 1457 | old_sector = bio->bi_sector; |
| 1450 | old_dev = bio->bi_bdev->bd_dev; | 1458 | old_dev = bio->bi_bdev->bd_dev; |
| @@ -1656,7 +1664,7 @@ static int __end_that_request_first(struct request *req, int error, | |||
| 1656 | int total_bytes, bio_nbytes, next_idx = 0; | 1664 | int total_bytes, bio_nbytes, next_idx = 0; |
| 1657 | struct bio *bio; | 1665 | struct bio *bio; |
| 1658 | 1666 | ||
| 1659 | blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); | 1667 | trace_block_rq_complete(req->q, req); |
| 1660 | 1668 | ||
| 1661 | /* | 1669 | /* |
| 1662 | * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual | 1670 | * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual |
diff --git a/block/blktrace.c b/block/blktrace.c index 85049a7e7a17..b0a2cae886db 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
| @@ -23,10 +23,18 @@ | |||
| 23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
| 24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
| 25 | #include <linux/time.h> | 25 | #include <linux/time.h> |
| 26 | #include <trace/block.h> | ||
| 26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
| 27 | 28 | ||
| 28 | static unsigned int blktrace_seq __read_mostly = 1; | 29 | static unsigned int blktrace_seq __read_mostly = 1; |
| 29 | 30 | ||
| 31 | /* Global reference count of probes */ | ||
| 32 | static DEFINE_MUTEX(blk_probe_mutex); | ||
| 33 | static atomic_t blk_probes_ref = ATOMIC_INIT(0); | ||
| 34 | |||
| 35 | static int blk_register_tracepoints(void); | ||
| 36 | static void blk_unregister_tracepoints(void); | ||
| 37 | |||
| 30 | /* | 38 | /* |
| 31 | * Send out a notify message. | 39 | * Send out a notify message. |
| 32 | */ | 40 | */ |
| @@ -119,7 +127,7 @@ static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK | |||
| 119 | * The worker for the various blk_add_trace*() types. Fills out a | 127 | * The worker for the various blk_add_trace*() types. Fills out a |
| 120 | * blk_io_trace structure and places it in a per-cpu subbuffer. | 128 | * blk_io_trace structure and places it in a per-cpu subbuffer. |
| 121 | */ | 129 | */ |
| 122 | void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | 130 | static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, |
| 123 | int rw, u32 what, int error, int pdu_len, void *pdu_data) | 131 | int rw, u32 what, int error, int pdu_len, void *pdu_data) |
| 124 | { | 132 | { |
| 125 | struct task_struct *tsk = current; | 133 | struct task_struct *tsk = current; |
| @@ -177,8 +185,6 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
| 177 | local_irq_restore(flags); | 185 | local_irq_restore(flags); |
| 178 | } | 186 | } |
| 179 | 187 | ||
| 180 | EXPORT_SYMBOL_GPL(__blk_add_trace); | ||
| 181 | |||
| 182 | static struct dentry *blk_tree_root; | 188 | static struct dentry *blk_tree_root; |
| 183 | static DEFINE_MUTEX(blk_tree_mutex); | 189 | static DEFINE_MUTEX(blk_tree_mutex); |
| 184 | static unsigned int root_users; | 190 | static unsigned int root_users; |
| @@ -237,6 +243,10 @@ static void blk_trace_cleanup(struct blk_trace *bt) | |||
| 237 | free_percpu(bt->sequence); | 243 | free_percpu(bt->sequence); |
| 238 | free_percpu(bt->msg_data); | 244 | free_percpu(bt->msg_data); |
| 239 | kfree(bt); | 245 | kfree(bt); |
| 246 | mutex_lock(&blk_probe_mutex); | ||
| 247 | if (atomic_dec_and_test(&blk_probes_ref)) | ||
| 248 | blk_unregister_tracepoints(); | ||
| 249 | mutex_unlock(&blk_probe_mutex); | ||
| 240 | } | 250 | } |
| 241 | 251 | ||
| 242 | int blk_trace_remove(struct request_queue *q) | 252 | int blk_trace_remove(struct request_queue *q) |
| @@ -428,6 +438,14 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
| 428 | bt->pid = buts->pid; | 438 | bt->pid = buts->pid; |
| 429 | bt->trace_state = Blktrace_setup; | 439 | bt->trace_state = Blktrace_setup; |
| 430 | 440 | ||
| 441 | mutex_lock(&blk_probe_mutex); | ||
| 442 | if (atomic_add_return(1, &blk_probes_ref) == 1) { | ||
| 443 | ret = blk_register_tracepoints(); | ||
| 444 | if (ret) | ||
| 445 | goto probe_err; | ||
| 446 | } | ||
| 447 | mutex_unlock(&blk_probe_mutex); | ||
| 448 | |||
| 431 | ret = -EBUSY; | 449 | ret = -EBUSY; |
| 432 | old_bt = xchg(&q->blk_trace, bt); | 450 | old_bt = xchg(&q->blk_trace, bt); |
| 433 | if (old_bt) { | 451 | if (old_bt) { |
| @@ -436,6 +454,9 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
| 436 | } | 454 | } |
| 437 | 455 | ||
| 438 | return 0; | 456 | return 0; |
| 457 | probe_err: | ||
| 458 | atomic_dec(&blk_probes_ref); | ||
| 459 | mutex_unlock(&blk_probe_mutex); | ||
| 439 | err: | 460 | err: |
| 440 | if (dir) | 461 | if (dir) |
| 441 | blk_remove_tree(dir); | 462 | blk_remove_tree(dir); |
| @@ -562,3 +583,308 @@ void blk_trace_shutdown(struct request_queue *q) | |||
| 562 | blk_trace_remove(q); | 583 | blk_trace_remove(q); |
| 563 | } | 584 | } |
| 564 | } | 585 | } |
| 586 | |||
| 587 | /* | ||
| 588 | * blktrace probes | ||
| 589 | */ | ||
| 590 | |||
| 591 | /** | ||
| 592 | * blk_add_trace_rq - Add a trace for a request oriented action | ||
| 593 | * @q: queue the io is for | ||
| 594 | * @rq: the source request | ||
| 595 | * @what: the action | ||
| 596 | * | ||
| 597 | * Description: | ||
| 598 | * Records an action against a request. Will log the bio offset + size. | ||
| 599 | * | ||
| 600 | **/ | ||
| 601 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | ||
| 602 | u32 what) | ||
| 603 | { | ||
| 604 | struct blk_trace *bt = q->blk_trace; | ||
| 605 | int rw = rq->cmd_flags & 0x03; | ||
| 606 | |||
| 607 | if (likely(!bt)) | ||
| 608 | return; | ||
| 609 | |||
| 610 | if (blk_discard_rq(rq)) | ||
| 611 | rw |= (1 << BIO_RW_DISCARD); | ||
| 612 | |||
| 613 | if (blk_pc_request(rq)) { | ||
| 614 | what |= BLK_TC_ACT(BLK_TC_PC); | ||
| 615 | __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, | ||
| 616 | sizeof(rq->cmd), rq->cmd); | ||
| 617 | } else { | ||
| 618 | what |= BLK_TC_ACT(BLK_TC_FS); | ||
| 619 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | ||
| 620 | rw, what, rq->errors, 0, NULL); | ||
| 621 | } | ||
| 622 | } | ||
| 623 | |||
| 624 | static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq) | ||
| 625 | { | ||
| 626 | blk_add_trace_rq(q, rq, BLK_TA_ABORT); | ||
| 627 | } | ||
| 628 | |||
| 629 | static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq) | ||
| 630 | { | ||
| 631 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | ||
| 632 | } | ||
| 633 | |||
| 634 | static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq) | ||
| 635 | { | ||
| 636 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | ||
| 637 | } | ||
| 638 | |||
| 639 | static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq) | ||
| 640 | { | ||
| 641 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | ||
| 642 | } | ||
| 643 | |||
| 644 | static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq) | ||
| 645 | { | ||
| 646 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | ||
| 647 | } | ||
| 648 | |||
| 649 | /** | ||
| 650 | * blk_add_trace_bio - Add a trace for a bio oriented action | ||
| 651 | * @q: queue the io is for | ||
| 652 | * @bio: the source bio | ||
| 653 | * @what: the action | ||
| 654 | * | ||
| 655 | * Description: | ||
| 656 | * Records an action against a bio. Will log the bio offset + size. | ||
| 657 | * | ||
| 658 | **/ | ||
| 659 | static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | ||
| 660 | u32 what) | ||
| 661 | { | ||
| 662 | struct blk_trace *bt = q->blk_trace; | ||
| 663 | |||
| 664 | if (likely(!bt)) | ||
| 665 | return; | ||
| 666 | |||
| 667 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, | ||
| 668 | !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | ||
| 669 | } | ||
| 670 | |||
| 671 | static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio) | ||
| 672 | { | ||
| 673 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); | ||
| 674 | } | ||
| 675 | |||
| 676 | static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio) | ||
| 677 | { | ||
| 678 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); | ||
| 679 | } | ||
| 680 | |||
| 681 | static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio) | ||
| 682 | { | ||
| 683 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | ||
| 684 | } | ||
| 685 | |||
| 686 | static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio) | ||
| 687 | { | ||
| 688 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | ||
| 689 | } | ||
| 690 | |||
| 691 | static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio) | ||
| 692 | { | ||
| 693 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | ||
| 694 | } | ||
| 695 | |||
| 696 | static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw) | ||
| 697 | { | ||
| 698 | if (bio) | ||
| 699 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ); | ||
| 700 | else { | ||
| 701 | struct blk_trace *bt = q->blk_trace; | ||
| 702 | |||
| 703 | if (bt) | ||
| 704 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); | ||
| 705 | } | ||
| 706 | } | ||
| 707 | |||
| 708 | |||
| 709 | static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw) | ||
| 710 | { | ||
| 711 | if (bio) | ||
| 712 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); | ||
| 713 | else { | ||
| 714 | struct blk_trace *bt = q->blk_trace; | ||
| 715 | |||
| 716 | if (bt) | ||
| 717 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); | ||
| 718 | } | ||
| 719 | } | ||
| 720 | |||
| 721 | static void blk_add_trace_plug(struct request_queue *q) | ||
| 722 | { | ||
| 723 | struct blk_trace *bt = q->blk_trace; | ||
| 724 | |||
| 725 | if (bt) | ||
| 726 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | ||
| 727 | } | ||
| 728 | |||
| 729 | static void blk_add_trace_unplug_io(struct request_queue *q) | ||
| 730 | { | ||
| 731 | struct blk_trace *bt = q->blk_trace; | ||
| 732 | |||
| 733 | if (bt) { | ||
| 734 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | ||
| 735 | __be64 rpdu = cpu_to_be64(pdu); | ||
| 736 | |||
| 737 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, | ||
| 738 | sizeof(rpdu), &rpdu); | ||
| 739 | } | ||
| 740 | } | ||
| 741 | |||
| 742 | static void blk_add_trace_unplug_timer(struct request_queue *q) | ||
| 743 | { | ||
| 744 | struct blk_trace *bt = q->blk_trace; | ||
| 745 | |||
| 746 | if (bt) { | ||
| 747 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | ||
| 748 | __be64 rpdu = cpu_to_be64(pdu); | ||
| 749 | |||
| 750 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, | ||
| 751 | sizeof(rpdu), &rpdu); | ||
| 752 | } | ||
| 753 | } | ||
| 754 | |||
| 755 | static void blk_add_trace_split(struct request_queue *q, struct bio *bio, | ||
| 756 | unsigned int pdu) | ||
| 757 | { | ||
| 758 | struct blk_trace *bt = q->blk_trace; | ||
| 759 | |||
| 760 | if (bt) { | ||
| 761 | __be64 rpdu = cpu_to_be64(pdu); | ||
| 762 | |||
| 763 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, | ||
| 764 | BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), | ||
| 765 | sizeof(rpdu), &rpdu); | ||
| 766 | } | ||
| 767 | } | ||
| 768 | |||
| 769 | /** | ||
| 770 | * blk_add_trace_remap - Add a trace for a remap operation | ||
| 771 | * @q: queue the io is for | ||
| 772 | * @bio: the source bio | ||
| 773 | * @dev: target device | ||
| 774 | * @from: source sector | ||
| 775 | * @to: target sector | ||
| 776 | * | ||
| 777 | * Description: | ||
| 778 | * Device mapper or raid target sometimes need to split a bio because | ||
| 779 | * it spans a stripe (or similar). Add a trace for that action. | ||
| 780 | * | ||
| 781 | **/ | ||
| 782 | static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | ||
| 783 | dev_t dev, sector_t from, sector_t to) | ||
| 784 | { | ||
| 785 | struct blk_trace *bt = q->blk_trace; | ||
| 786 | struct blk_io_trace_remap r; | ||
| 787 | |||
| 788 | if (likely(!bt)) | ||
| 789 | return; | ||
| 790 | |||
| 791 | r.device = cpu_to_be32(dev); | ||
| 792 | r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); | ||
| 793 | r.sector = cpu_to_be64(to); | ||
| 794 | |||
| 795 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, | ||
| 796 | !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | ||
| 797 | } | ||
| 798 | |||
| 799 | /** | ||
| 800 | * blk_add_driver_data - Add binary message with driver-specific data | ||
| 801 | * @q: queue the io is for | ||
| 802 | * @rq: io request | ||
| 803 | * @data: driver-specific data | ||
| 804 | * @len: length of driver-specific data | ||
| 805 | * | ||
| 806 | * Description: | ||
| 807 | * Some drivers might want to write driver-specific data per request. | ||
| 808 | * | ||
| 809 | **/ | ||
| 810 | void blk_add_driver_data(struct request_queue *q, | ||
| 811 | struct request *rq, | ||
| 812 | void *data, size_t len) | ||
| 813 | { | ||
| 814 | struct blk_trace *bt = q->blk_trace; | ||
| 815 | |||
| 816 | if (likely(!bt)) | ||
| 817 | return; | ||
| 818 | |||
| 819 | if (blk_pc_request(rq)) | ||
| 820 | __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, | ||
| 821 | rq->errors, len, data); | ||
| 822 | else | ||
| 823 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | ||
| 824 | 0, BLK_TA_DRV_DATA, rq->errors, len, data); | ||
| 825 | } | ||
| 826 | EXPORT_SYMBOL_GPL(blk_add_driver_data); | ||
| 827 | |||
| 828 | static int blk_register_tracepoints(void) | ||
| 829 | { | ||
| 830 | int ret; | ||
| 831 | |||
| 832 | ret = register_trace_block_rq_abort(blk_add_trace_rq_abort); | ||
| 833 | WARN_ON(ret); | ||
| 834 | ret = register_trace_block_rq_insert(blk_add_trace_rq_insert); | ||
| 835 | WARN_ON(ret); | ||
| 836 | ret = register_trace_block_rq_issue(blk_add_trace_rq_issue); | ||
| 837 | WARN_ON(ret); | ||
| 838 | ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue); | ||
| 839 | WARN_ON(ret); | ||
| 840 | ret = register_trace_block_rq_complete(blk_add_trace_rq_complete); | ||
| 841 | WARN_ON(ret); | ||
| 842 | ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce); | ||
| 843 | WARN_ON(ret); | ||
| 844 | ret = register_trace_block_bio_complete(blk_add_trace_bio_complete); | ||
| 845 | WARN_ON(ret); | ||
| 846 | ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); | ||
| 847 | WARN_ON(ret); | ||
| 848 | ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); | ||
| 849 | WARN_ON(ret); | ||
| 850 | ret = register_trace_block_bio_queue(blk_add_trace_bio_queue); | ||
| 851 | WARN_ON(ret); | ||
| 852 | ret = register_trace_block_getrq(blk_add_trace_getrq); | ||
| 853 | WARN_ON(ret); | ||
| 854 | ret = register_trace_block_sleeprq(blk_add_trace_sleeprq); | ||
| 855 | WARN_ON(ret); | ||
| 856 | ret = register_trace_block_plug(blk_add_trace_plug); | ||
| 857 | WARN_ON(ret); | ||
| 858 | ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer); | ||
| 859 | WARN_ON(ret); | ||
| 860 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io); | ||
| 861 | WARN_ON(ret); | ||
| 862 | ret = register_trace_block_split(blk_add_trace_split); | ||
| 863 | WARN_ON(ret); | ||
| 864 | ret = register_trace_block_remap(blk_add_trace_remap); | ||
| 865 | WARN_ON(ret); | ||
| 866 | return 0; | ||
| 867 | } | ||
| 868 | |||
| 869 | static void blk_unregister_tracepoints(void) | ||
| 870 | { | ||
| 871 | unregister_trace_block_remap(blk_add_trace_remap); | ||
| 872 | unregister_trace_block_split(blk_add_trace_split); | ||
| 873 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); | ||
| 874 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer); | ||
| 875 | unregister_trace_block_plug(blk_add_trace_plug); | ||
| 876 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq); | ||
| 877 | unregister_trace_block_getrq(blk_add_trace_getrq); | ||
| 878 | unregister_trace_block_bio_queue(blk_add_trace_bio_queue); | ||
| 879 | unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); | ||
| 880 | unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); | ||
| 881 | unregister_trace_block_bio_complete(blk_add_trace_bio_complete); | ||
| 882 | unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce); | ||
| 883 | unregister_trace_block_rq_complete(blk_add_trace_rq_complete); | ||
| 884 | unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue); | ||
| 885 | unregister_trace_block_rq_issue(blk_add_trace_rq_issue); | ||
| 886 | unregister_trace_block_rq_insert(blk_add_trace_rq_insert); | ||
| 887 | unregister_trace_block_rq_abort(blk_add_trace_rq_abort); | ||
| 888 | |||
| 889 | tracepoint_synchronize_unregister(); | ||
| 890 | } | ||
diff --git a/block/elevator.c b/block/elevator.c index 9ac82dde99dd..e5677fe4f412 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/compiler.h> | 33 | #include <linux/compiler.h> |
| 34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
| 35 | #include <linux/blktrace_api.h> | 35 | #include <linux/blktrace_api.h> |
| 36 | #include <trace/block.h> | ||
| 36 | #include <linux/hash.h> | 37 | #include <linux/hash.h> |
| 37 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
| 38 | 39 | ||
| @@ -41,6 +42,8 @@ | |||
| 41 | static DEFINE_SPINLOCK(elv_list_lock); | 42 | static DEFINE_SPINLOCK(elv_list_lock); |
| 42 | static LIST_HEAD(elv_list); | 43 | static LIST_HEAD(elv_list); |
| 43 | 44 | ||
| 45 | DEFINE_TRACE(block_rq_abort); | ||
| 46 | |||
| 44 | /* | 47 | /* |
| 45 | * Merge hash stuff. | 48 | * Merge hash stuff. |
| 46 | */ | 49 | */ |
| @@ -52,6 +55,9 @@ static const int elv_hash_shift = 6; | |||
| 52 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | 55 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
| 53 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | 56 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
| 54 | 57 | ||
| 58 | DEFINE_TRACE(block_rq_insert); | ||
| 59 | DEFINE_TRACE(block_rq_issue); | ||
| 60 | |||
| 55 | /* | 61 | /* |
| 56 | * Query io scheduler to see if the current process issuing bio may be | 62 | * Query io scheduler to see if the current process issuing bio may be |
| 57 | * merged with rq. | 63 | * merged with rq. |
| @@ -586,7 +592,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
| 586 | unsigned ordseq; | 592 | unsigned ordseq; |
| 587 | int unplug_it = 1; | 593 | int unplug_it = 1; |
| 588 | 594 | ||
| 589 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | 595 | trace_block_rq_insert(q, rq); |
| 590 | 596 | ||
| 591 | rq->q = q; | 597 | rq->q = q; |
| 592 | 598 | ||
| @@ -772,7 +778,7 @@ struct request *elv_next_request(struct request_queue *q) | |||
| 772 | * not be passed by new incoming requests | 778 | * not be passed by new incoming requests |
| 773 | */ | 779 | */ |
| 774 | rq->cmd_flags |= REQ_STARTED; | 780 | rq->cmd_flags |= REQ_STARTED; |
| 775 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 781 | trace_block_rq_issue(q, rq); |
| 776 | } | 782 | } |
| 777 | 783 | ||
| 778 | if (!q->boundary_rq || q->boundary_rq == rq) { | 784 | if (!q->boundary_rq || q->boundary_rq == rq) { |
| @@ -921,7 +927,7 @@ void elv_abort_queue(struct request_queue *q) | |||
| 921 | while (!list_empty(&q->queue_head)) { | 927 | while (!list_empty(&q->queue_head)) { |
| 922 | rq = list_entry_rq(q->queue_head.next); | 928 | rq = list_entry_rq(q->queue_head.next); |
| 923 | rq->cmd_flags |= REQ_QUIET; | 929 | rq->cmd_flags |= REQ_QUIET; |
| 924 | blk_add_trace_rq(q, rq, BLK_TA_ABORT); | 930 | trace_block_rq_abort(q, rq); |
| 925 | __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); | 931 | __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); |
| 926 | } | 932 | } |
| 927 | } | 933 | } |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c99e4728ff41..343094c3feeb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/idr.h> | 21 | #include <linux/idr.h> |
| 22 | #include <linux/hdreg.h> | 22 | #include <linux/hdreg.h> |
| 23 | #include <linux/blktrace_api.h> | 23 | #include <linux/blktrace_api.h> |
| 24 | #include <trace/block.h> | ||
| 24 | 25 | ||
| 25 | #define DM_MSG_PREFIX "core" | 26 | #define DM_MSG_PREFIX "core" |
| 26 | 27 | ||
| @@ -51,6 +52,8 @@ struct dm_target_io { | |||
| 51 | union map_info info; | 52 | union map_info info; |
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 55 | DEFINE_TRACE(block_bio_complete); | ||
| 56 | |||
| 54 | union map_info *dm_get_mapinfo(struct bio *bio) | 57 | union map_info *dm_get_mapinfo(struct bio *bio) |
| 55 | { | 58 | { |
| 56 | if (bio && bio->bi_private) | 59 | if (bio && bio->bi_private) |
| @@ -504,8 +507,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
| 504 | end_io_acct(io); | 507 | end_io_acct(io); |
| 505 | 508 | ||
| 506 | if (io->error != DM_ENDIO_REQUEUE) { | 509 | if (io->error != DM_ENDIO_REQUEUE) { |
| 507 | blk_add_trace_bio(io->md->queue, io->bio, | 510 | trace_block_bio_complete(io->md->queue, io->bio); |
| 508 | BLK_TA_COMPLETE); | ||
| 509 | 511 | ||
| 510 | bio_endio(io->bio, io->error); | 512 | bio_endio(io->bio, io->error); |
| 511 | } | 513 | } |
| @@ -598,7 +600,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
| 598 | if (r == DM_MAPIO_REMAPPED) { | 600 | if (r == DM_MAPIO_REMAPPED) { |
| 599 | /* the bio has been remapped so dispatch it */ | 601 | /* the bio has been remapped so dispatch it */ |
| 600 | 602 | ||
| 601 | blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, | 603 | trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, |
| 602 | tio->io->bio->bi_bdev->bd_dev, | 604 | tio->io->bio->bi_bdev->bd_dev, |
| 603 | clone->bi_sector, sector); | 605 | clone->bi_sector, sector); |
| 604 | 606 | ||
| @@ -26,8 +26,11 @@ | |||
| 26 | #include <linux/mempool.h> | 26 | #include <linux/mempool.h> |
| 27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
| 28 | #include <linux/blktrace_api.h> | 28 | #include <linux/blktrace_api.h> |
| 29 | #include <trace/block.h> | ||
| 29 | #include <scsi/sg.h> /* for struct sg_iovec */ | 30 | #include <scsi/sg.h> /* for struct sg_iovec */ |
| 30 | 31 | ||
| 32 | DEFINE_TRACE(block_split); | ||
| 33 | |||
| 31 | static struct kmem_cache *bio_slab __read_mostly; | 34 | static struct kmem_cache *bio_slab __read_mostly; |
| 32 | 35 | ||
| 33 | static mempool_t *bio_split_pool __read_mostly; | 36 | static mempool_t *bio_split_pool __read_mostly; |
| @@ -1263,7 +1266,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
| 1263 | if (!bp) | 1266 | if (!bp) |
| 1264 | return bp; | 1267 | return bp; |
| 1265 | 1268 | ||
| 1266 | blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, | 1269 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, |
| 1267 | bi->bi_sector + first_sectors); | 1270 | bi->bi_sector + first_sectors); |
| 1268 | 1271 | ||
| 1269 | BUG_ON(bi->bi_vcnt != 1); | 1272 | BUG_ON(bi->bi_vcnt != 1); |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index bdf505d33e77..1dba3493d520 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -160,7 +160,6 @@ struct blk_trace { | |||
| 160 | 160 | ||
| 161 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); | 161 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); |
| 162 | extern void blk_trace_shutdown(struct request_queue *); | 162 | extern void blk_trace_shutdown(struct request_queue *); |
| 163 | extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); | ||
| 164 | extern int do_blk_trace_setup(struct request_queue *q, | 163 | extern int do_blk_trace_setup(struct request_queue *q, |
| 165 | char *name, dev_t dev, struct blk_user_trace_setup *buts); | 164 | char *name, dev_t dev, struct blk_user_trace_setup *buts); |
| 166 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | 165 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); |
| @@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | |||
| 186 | } while (0) | 185 | } while (0) |
| 187 | #define BLK_TN_MAX_MSG 128 | 186 | #define BLK_TN_MAX_MSG 128 |
| 188 | 187 | ||
| 189 | /** | 188 | extern void blk_add_driver_data(struct request_queue *q, struct request *rq, |
| 190 | * blk_add_trace_rq - Add a trace for a request oriented action | 189 | void *data, size_t len); |
| 191 | * @q: queue the io is for | ||
| 192 | * @rq: the source request | ||
| 193 | * @what: the action | ||
| 194 | * | ||
| 195 | * Description: | ||
| 196 | * Records an action against a request. Will log the bio offset + size. | ||
| 197 | * | ||
| 198 | **/ | ||
| 199 | static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, | ||
| 200 | u32 what) | ||
| 201 | { | ||
| 202 | struct blk_trace *bt = q->blk_trace; | ||
| 203 | int rw = rq->cmd_flags & 0x03; | ||
| 204 | |||
| 205 | if (likely(!bt)) | ||
| 206 | return; | ||
| 207 | |||
| 208 | if (blk_discard_rq(rq)) | ||
| 209 | rw |= (1 << BIO_RW_DISCARD); | ||
| 210 | |||
| 211 | if (blk_pc_request(rq)) { | ||
| 212 | what |= BLK_TC_ACT(BLK_TC_PC); | ||
| 213 | __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); | ||
| 214 | } else { | ||
| 215 | what |= BLK_TC_ACT(BLK_TC_FS); | ||
| 216 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); | ||
| 217 | } | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 221 | * blk_add_trace_bio - Add a trace for a bio oriented action | ||
| 222 | * @q: queue the io is for | ||
| 223 | * @bio: the source bio | ||
| 224 | * @what: the action | ||
| 225 | * | ||
| 226 | * Description: | ||
| 227 | * Records an action against a bio. Will log the bio offset + size. | ||
| 228 | * | ||
| 229 | **/ | ||
| 230 | static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | ||
| 231 | u32 what) | ||
| 232 | { | ||
| 233 | struct blk_trace *bt = q->blk_trace; | ||
| 234 | |||
| 235 | if (likely(!bt)) | ||
| 236 | return; | ||
| 237 | |||
| 238 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | ||
| 239 | } | ||
| 240 | |||
| 241 | /** | ||
| 242 | * blk_add_trace_generic - Add a trace for a generic action | ||
| 243 | * @q: queue the io is for | ||
| 244 | * @bio: the source bio | ||
| 245 | * @rw: the data direction | ||
| 246 | * @what: the action | ||
| 247 | * | ||
| 248 | * Description: | ||
| 249 | * Records a simple trace | ||
| 250 | * | ||
| 251 | **/ | ||
| 252 | static inline void blk_add_trace_generic(struct request_queue *q, | ||
| 253 | struct bio *bio, int rw, u32 what) | ||
| 254 | { | ||
| 255 | struct blk_trace *bt = q->blk_trace; | ||
| 256 | |||
| 257 | if (likely(!bt)) | ||
| 258 | return; | ||
| 259 | |||
| 260 | if (bio) | ||
| 261 | blk_add_trace_bio(q, bio, what); | ||
| 262 | else | ||
| 263 | __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); | ||
| 264 | } | ||
| 265 | |||
| 266 | /** | ||
| 267 | * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload | ||
| 268 | * @q: queue the io is for | ||
| 269 | * @what: the action | ||
| 270 | * @bio: the source bio | ||
| 271 | * @pdu: the integer payload | ||
| 272 | * | ||
| 273 | * Description: | ||
| 274 | * Adds a trace with some integer payload. This might be an unplug | ||
| 275 | * option given as the action, with the depth at unplug time given | ||
| 276 | * as the payload | ||
| 277 | * | ||
| 278 | **/ | ||
| 279 | static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what, | ||
| 280 | struct bio *bio, unsigned int pdu) | ||
| 281 | { | ||
| 282 | struct blk_trace *bt = q->blk_trace; | ||
| 283 | __be64 rpdu = cpu_to_be64(pdu); | ||
| 284 | |||
| 285 | if (likely(!bt)) | ||
| 286 | return; | ||
| 287 | |||
| 288 | if (bio) | ||
| 289 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); | ||
| 290 | else | ||
| 291 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); | ||
| 292 | } | ||
| 293 | |||
| 294 | /** | ||
| 295 | * blk_add_trace_remap - Add a trace for a remap operation | ||
| 296 | * @q: queue the io is for | ||
| 297 | * @bio: the source bio | ||
| 298 | * @dev: target device | ||
| 299 | * @from: source sector | ||
| 300 | * @to: target sector | ||
| 301 | * | ||
| 302 | * Description: | ||
| 303 | * Device mapper or raid target sometimes need to split a bio because | ||
| 304 | * it spans a stripe (or similar). Add a trace for that action. | ||
| 305 | * | ||
| 306 | **/ | ||
| 307 | static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | ||
| 308 | dev_t dev, sector_t from, sector_t to) | ||
| 309 | { | ||
| 310 | struct blk_trace *bt = q->blk_trace; | ||
| 311 | struct blk_io_trace_remap r; | ||
| 312 | |||
| 313 | if (likely(!bt)) | ||
| 314 | return; | ||
| 315 | |||
| 316 | r.device = cpu_to_be32(dev); | ||
| 317 | r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); | ||
| 318 | r.sector = cpu_to_be64(to); | ||
| 319 | |||
| 320 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | ||
| 321 | } | ||
| 322 | |||
| 323 | /** | ||
| 324 | * blk_add_driver_data - Add binary message with driver-specific data | ||
| 325 | * @q: queue the io is for | ||
| 326 | * @rq: io request | ||
| 327 | * @data: driver-specific data | ||
| 328 | * @len: length of driver-specific data | ||
| 329 | * | ||
| 330 | * Description: | ||
| 331 | * Some drivers might want to write driver-specific data per request. | ||
| 332 | * | ||
| 333 | **/ | ||
| 334 | static inline void blk_add_driver_data(struct request_queue *q, | ||
| 335 | struct request *rq, | ||
| 336 | void *data, size_t len) | ||
| 337 | { | ||
| 338 | struct blk_trace *bt = q->blk_trace; | ||
| 339 | |||
| 340 | if (likely(!bt)) | ||
| 341 | return; | ||
| 342 | |||
| 343 | if (blk_pc_request(rq)) | ||
| 344 | __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, | ||
| 345 | rq->errors, len, data); | ||
| 346 | else | ||
| 347 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | ||
| 348 | 0, BLK_TA_DRV_DATA, rq->errors, len, data); | ||
| 349 | } | ||
| 350 | |||
| 351 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | 190 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
| 352 | char __user *arg); | 191 | char __user *arg); |
| 353 | extern int blk_trace_startstop(struct request_queue *q, int start); | 192 | extern int blk_trace_startstop(struct request_queue *q, int start); |
| @@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q); | |||
| 356 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | 195 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
| 357 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | 196 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
| 358 | #define blk_trace_shutdown(q) do { } while (0) | 197 | #define blk_trace_shutdown(q) do { } while (0) |
| 359 | #define blk_add_trace_rq(q, rq, what) do { } while (0) | ||
| 360 | #define blk_add_trace_bio(q, rq, what) do { } while (0) | ||
| 361 | #define blk_add_trace_generic(q, rq, rw, what) do { } while (0) | ||
| 362 | #define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) | ||
| 363 | #define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) | ||
| 364 | #define blk_add_driver_data(q, rq, data, len) do {} while (0) | ||
| 365 | #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) | 198 | #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) |
| 199 | #define blk_add_driver_data(q, rq, data, len) do {} while (0) | ||
| 366 | #define blk_trace_setup(q, name, dev, arg) (-ENOTTY) | 200 | #define blk_trace_setup(q, name, dev, arg) (-ENOTTY) |
| 367 | #define blk_trace_startstop(q, start) (-ENOTTY) | 201 | #define blk_trace_startstop(q, start) (-ENOTTY) |
| 368 | #define blk_trace_remove(q) (-ENOTTY) | 202 | #define blk_trace_remove(q) (-ENOTTY) |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 7854d87b97b2..afba918c623c 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -115,8 +115,13 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func); | |||
| 115 | extern void ftrace_caller(void); | 115 | extern void ftrace_caller(void); |
| 116 | extern void ftrace_call(void); | 116 | extern void ftrace_call(void); |
| 117 | extern void mcount_call(void); | 117 | extern void mcount_call(void); |
| 118 | #ifdef CONFIG_FUNCTION_RET_TRACER | 118 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 119 | extern void ftrace_return_caller(void); | 119 | extern void ftrace_graph_caller(void); |
| 120 | extern int ftrace_enable_ftrace_graph_caller(void); | ||
| 121 | extern int ftrace_disable_ftrace_graph_caller(void); | ||
| 122 | #else | ||
| 123 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } | ||
| 124 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | ||
| 120 | #endif | 125 | #endif |
| 121 | 126 | ||
| 122 | /** | 127 | /** |
| @@ -311,35 +316,77 @@ ftrace_init_module(struct module *mod, | |||
| 311 | unsigned long *start, unsigned long *end) { } | 316 | unsigned long *start, unsigned long *end) { } |
| 312 | #endif | 317 | #endif |
| 313 | 318 | ||
| 319 | enum { | ||
| 320 | POWER_NONE = 0, | ||
| 321 | POWER_CSTATE = 1, | ||
| 322 | POWER_PSTATE = 2, | ||
| 323 | }; | ||
| 324 | |||
| 325 | struct power_trace { | ||
| 326 | #ifdef CONFIG_POWER_TRACER | ||
| 327 | ktime_t stamp; | ||
| 328 | ktime_t end; | ||
| 329 | int type; | ||
| 330 | int state; | ||
| 331 | #endif | ||
| 332 | }; | ||
| 333 | |||
| 334 | #ifdef CONFIG_POWER_TRACER | ||
| 335 | extern void trace_power_start(struct power_trace *it, unsigned int type, | ||
| 336 | unsigned int state); | ||
| 337 | extern void trace_power_mark(struct power_trace *it, unsigned int type, | ||
| 338 | unsigned int state); | ||
| 339 | extern void trace_power_end(struct power_trace *it); | ||
| 340 | #else | ||
| 341 | static inline void trace_power_start(struct power_trace *it, unsigned int type, | ||
| 342 | unsigned int state) { } | ||
| 343 | static inline void trace_power_mark(struct power_trace *it, unsigned int type, | ||
| 344 | unsigned int state) { } | ||
| 345 | static inline void trace_power_end(struct power_trace *it) { } | ||
| 346 | #endif | ||
| 347 | |||
| 348 | |||
| 349 | /* | ||
| 350 | * Structure that defines an entry function trace. | ||
| 351 | */ | ||
| 352 | struct ftrace_graph_ent { | ||
| 353 | unsigned long func; /* Current function */ | ||
| 354 | int depth; | ||
| 355 | }; | ||
| 314 | 356 | ||
| 315 | /* | 357 | /* |
| 316 | * Structure that defines a return function trace. | 358 | * Structure that defines a return function trace. |
| 317 | */ | 359 | */ |
| 318 | struct ftrace_retfunc { | 360 | struct ftrace_graph_ret { |
| 319 | unsigned long ret; /* Return address */ | ||
| 320 | unsigned long func; /* Current function */ | 361 | unsigned long func; /* Current function */ |
| 321 | unsigned long long calltime; | 362 | unsigned long long calltime; |
| 322 | unsigned long long rettime; | 363 | unsigned long long rettime; |
| 323 | /* Number of functions that overran the depth limit for current task */ | 364 | /* Number of functions that overran the depth limit for current task */ |
| 324 | unsigned long overrun; | 365 | unsigned long overrun; |
| 366 | int depth; | ||
| 325 | }; | 367 | }; |
| 326 | 368 | ||
| 327 | #ifdef CONFIG_FUNCTION_RET_TRACER | 369 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 328 | #define FTRACE_RETFUNC_DEPTH 50 | 370 | #define FTRACE_RETFUNC_DEPTH 50 |
| 329 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | 371 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 |
| 330 | /* Type of a callback handler of tracing return function */ | 372 | /* Type of the callback handlers for tracing function graph*/ |
| 331 | typedef void (*trace_function_return_t)(struct ftrace_retfunc *); | 373 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ |
| 374 | typedef void (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | ||
| 375 | |||
| 376 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, | ||
| 377 | trace_func_graph_ent_t entryfunc); | ||
| 378 | |||
| 379 | /* The current handlers in use */ | ||
| 380 | extern trace_func_graph_ret_t ftrace_graph_return; | ||
| 381 | extern trace_func_graph_ent_t ftrace_graph_entry; | ||
| 332 | 382 | ||
| 333 | extern int register_ftrace_return(trace_function_return_t func); | 383 | extern void unregister_ftrace_graph(void); |
| 334 | /* The current handler in use */ | ||
| 335 | extern trace_function_return_t ftrace_function_return; | ||
| 336 | extern void unregister_ftrace_return(void); | ||
| 337 | 384 | ||
| 338 | extern void ftrace_retfunc_init_task(struct task_struct *t); | 385 | extern void ftrace_graph_init_task(struct task_struct *t); |
| 339 | extern void ftrace_retfunc_exit_task(struct task_struct *t); | 386 | extern void ftrace_graph_exit_task(struct task_struct *t); |
| 340 | #else | 387 | #else |
| 341 | static inline void ftrace_retfunc_init_task(struct task_struct *t) { } | 388 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
| 342 | static inline void ftrace_retfunc_exit_task(struct task_struct *t) { } | 389 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } |
| 343 | #endif | 390 | #endif |
| 344 | 391 | ||
| 345 | #endif /* _LINUX_FTRACE_H */ | 392 | #endif /* _LINUX_FTRACE_H */ |
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 0b4df55d7a74..366a054d0b05 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #define _LINUX_FTRACE_IRQ_H | 2 | #define _LINUX_FTRACE_IRQ_H |
| 3 | 3 | ||
| 4 | 4 | ||
| 5 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_RET_TRACER) | 5 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) |
| 6 | extern void ftrace_nmi_enter(void); | 6 | extern void ftrace_nmi_enter(void); |
| 7 | extern void ftrace_nmi_exit(void); | 7 | extern void ftrace_nmi_exit(void); |
| 8 | #else | 8 | #else |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d02a0ca70ee9..7ad48f2a2758 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1365,7 +1365,7 @@ struct task_struct { | |||
| 1365 | unsigned long default_timer_slack_ns; | 1365 | unsigned long default_timer_slack_ns; |
| 1366 | 1366 | ||
| 1367 | struct list_head *scm_work_list; | 1367 | struct list_head *scm_work_list; |
| 1368 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1368 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1369 | /* Index of current stored adress in ret_stack */ | 1369 | /* Index of current stored adress in ret_stack */ |
| 1370 | int curr_ret_stack; | 1370 | int curr_ret_stack; |
| 1371 | /* Stack of return addresses for return function tracing */ | 1371 | /* Stack of return addresses for return function tracing */ |
diff --git a/include/trace/block.h b/include/trace/block.h new file mode 100644 index 000000000000..25c6a1fd5b77 --- /dev/null +++ b/include/trace/block.h | |||
| @@ -0,0 +1,76 @@ | |||
| 1 | #ifndef _TRACE_BLOCK_H | ||
| 2 | #define _TRACE_BLOCK_H | ||
| 3 | |||
| 4 | #include <linux/blkdev.h> | ||
| 5 | #include <linux/tracepoint.h> | ||
| 6 | |||
| 7 | DECLARE_TRACE(block_rq_abort, | ||
| 8 | TPPROTO(struct request_queue *q, struct request *rq), | ||
| 9 | TPARGS(q, rq)); | ||
| 10 | |||
| 11 | DECLARE_TRACE(block_rq_insert, | ||
| 12 | TPPROTO(struct request_queue *q, struct request *rq), | ||
| 13 | TPARGS(q, rq)); | ||
| 14 | |||
| 15 | DECLARE_TRACE(block_rq_issue, | ||
| 16 | TPPROTO(struct request_queue *q, struct request *rq), | ||
| 17 | TPARGS(q, rq)); | ||
| 18 | |||
| 19 | DECLARE_TRACE(block_rq_requeue, | ||
| 20 | TPPROTO(struct request_queue *q, struct request *rq), | ||
| 21 | TPARGS(q, rq)); | ||
| 22 | |||
| 23 | DECLARE_TRACE(block_rq_complete, | ||
| 24 | TPPROTO(struct request_queue *q, struct request *rq), | ||
| 25 | TPARGS(q, rq)); | ||
| 26 | |||
| 27 | DECLARE_TRACE(block_bio_bounce, | ||
| 28 | TPPROTO(struct request_queue *q, struct bio *bio), | ||
| 29 | TPARGS(q, bio)); | ||
| 30 | |||
| 31 | DECLARE_TRACE(block_bio_complete, | ||
| 32 | TPPROTO(struct request_queue *q, struct bio *bio), | ||
| 33 | TPARGS(q, bio)); | ||
| 34 | |||
| 35 | DECLARE_TRACE(block_bio_backmerge, | ||
| 36 | TPPROTO(struct request_queue *q, struct bio *bio), | ||
| 37 | TPARGS(q, bio)); | ||
| 38 | |||
| 39 | DECLARE_TRACE(block_bio_frontmerge, | ||
| 40 | TPPROTO(struct request_queue *q, struct bio *bio), | ||
| 41 | TPARGS(q, bio)); | ||
| 42 | |||
| 43 | DECLARE_TRACE(block_bio_queue, | ||
| 44 | TPPROTO(struct request_queue *q, struct bio *bio), | ||
| 45 | TPARGS(q, bio)); | ||
| 46 | |||
| 47 | DECLARE_TRACE(block_getrq, | ||
| 48 | TPPROTO(struct request_queue *q, struct bio *bio, int rw), | ||
| 49 | TPARGS(q, bio, rw)); | ||
| 50 | |||
| 51 | DECLARE_TRACE(block_sleeprq, | ||
| 52 | TPPROTO(struct request_queue *q, struct bio *bio, int rw), | ||
| 53 | TPARGS(q, bio, rw)); | ||
| 54 | |||
| 55 | DECLARE_TRACE(block_plug, | ||
| 56 | TPPROTO(struct request_queue *q), | ||
| 57 | TPARGS(q)); | ||
| 58 | |||
| 59 | DECLARE_TRACE(block_unplug_timer, | ||
| 60 | TPPROTO(struct request_queue *q), | ||
| 61 | TPARGS(q)); | ||
| 62 | |||
| 63 | DECLARE_TRACE(block_unplug_io, | ||
| 64 | TPPROTO(struct request_queue *q), | ||
| 65 | TPARGS(q)); | ||
| 66 | |||
| 67 | DECLARE_TRACE(block_split, | ||
| 68 | TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), | ||
| 69 | TPARGS(q, bio, pdu)); | ||
| 70 | |||
| 71 | DECLARE_TRACE(block_remap, | ||
| 72 | TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev, | ||
| 73 | sector_t from, sector_t to), | ||
| 74 | TPARGS(q, bio, dev, from, to)); | ||
| 75 | |||
| 76 | #endif | ||
diff --git a/kernel/Makefile b/kernel/Makefile index 03a45e7e87b7..703cf3b7389c 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -21,7 +21,7 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg | |||
| 21 | CFLAGS_REMOVE_sched_clock.o = -pg | 21 | CFLAGS_REMOVE_sched_clock.o = -pg |
| 22 | CFLAGS_REMOVE_sched.o = -pg | 22 | CFLAGS_REMOVE_sched.o = -pg |
| 23 | endif | 23 | endif |
| 24 | ifdef CONFIG_FUNCTION_RET_TRACER | 24 | ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 25 | CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address() | 25 | CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address() |
| 26 | CFLAGS_REMOVE_module.o = -pg # For __module_text_address() | 26 | CFLAGS_REMOVE_module.o = -pg # For __module_text_address() |
| 27 | endif | 27 | endif |
diff --git a/kernel/fork.c b/kernel/fork.c index d6e1a3205f62..5f82a999c032 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -140,7 +140,7 @@ void free_task(struct task_struct *tsk) | |||
| 140 | prop_local_destroy_single(&tsk->dirties); | 140 | prop_local_destroy_single(&tsk->dirties); |
| 141 | free_thread_info(tsk->stack); | 141 | free_thread_info(tsk->stack); |
| 142 | rt_mutex_debug_task_free(tsk); | 142 | rt_mutex_debug_task_free(tsk); |
| 143 | ftrace_retfunc_exit_task(tsk); | 143 | ftrace_graph_exit_task(tsk); |
| 144 | free_task_struct(tsk); | 144 | free_task_struct(tsk); |
| 145 | } | 145 | } |
| 146 | EXPORT_SYMBOL(free_task); | 146 | EXPORT_SYMBOL(free_task); |
| @@ -1271,7 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1271 | total_forks++; | 1271 | total_forks++; |
| 1272 | spin_unlock(¤t->sighand->siglock); | 1272 | spin_unlock(¤t->sighand->siglock); |
| 1273 | write_unlock_irq(&tasklist_lock); | 1273 | write_unlock_irq(&tasklist_lock); |
| 1274 | ftrace_retfunc_init_task(p); | 1274 | ftrace_graph_init_task(p); |
| 1275 | proc_fork_connector(p); | 1275 | proc_fork_connector(p); |
| 1276 | cgroup_post_fork(p); | 1276 | cgroup_post_fork(p); |
| 1277 | return p; | 1277 | return p; |
diff --git a/kernel/sched.c b/kernel/sched.c index 388d9db044ab..52490bf6b884 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -5901,7 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5901 | * The idle tasks have their own, simple scheduling class: | 5901 | * The idle tasks have their own, simple scheduling class: |
| 5902 | */ | 5902 | */ |
| 5903 | idle->sched_class = &idle_sched_class; | 5903 | idle->sched_class = &idle_sched_class; |
| 5904 | ftrace_retfunc_init_task(idle); | 5904 | ftrace_graph_init_task(idle); |
| 5905 | } | 5905 | } |
| 5906 | 5906 | ||
| 5907 | /* | 5907 | /* |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 620feadff67a..8b6b673b4d6c 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -12,7 +12,7 @@ config NOP_TRACER | |||
| 12 | config HAVE_FUNCTION_TRACER | 12 | config HAVE_FUNCTION_TRACER |
| 13 | bool | 13 | bool |
| 14 | 14 | ||
| 15 | config HAVE_FUNCTION_RET_TRACER | 15 | config HAVE_FUNCTION_GRAPH_TRACER |
| 16 | bool | 16 | bool |
| 17 | 17 | ||
| 18 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | 18 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| @@ -63,15 +63,18 @@ config FUNCTION_TRACER | |||
| 63 | (the bootup default), then the overhead of the instructions is very | 63 | (the bootup default), then the overhead of the instructions is very |
| 64 | small and not measurable even in micro-benchmarks. | 64 | small and not measurable even in micro-benchmarks. |
| 65 | 65 | ||
| 66 | config FUNCTION_RET_TRACER | 66 | config FUNCTION_GRAPH_TRACER |
| 67 | bool "Kernel Function return Tracer" | 67 | bool "Kernel Function Graph Tracer" |
| 68 | depends on HAVE_FUNCTION_RET_TRACER | 68 | depends on HAVE_FUNCTION_GRAPH_TRACER |
| 69 | depends on FUNCTION_TRACER | 69 | depends on FUNCTION_TRACER |
| 70 | help | 70 | help |
| 71 | Enable the kernel to trace a function at its return. | 71 | Enable the kernel to trace a function at both its return |
| 72 | It's first purpose is to trace the duration of functions. | 72 | and its entry. |
| 73 | This is done by setting the current return address on the thread | 73 | It's first purpose is to trace the duration of functions and |
| 74 | info structure of the current task. | 74 | draw a call graph for each thread with some informations like |
| 75 | the return value. | ||
| 76 | This is done by setting the current return address on the current | ||
| 77 | task structure into a stack of calls. | ||
| 75 | 78 | ||
| 76 | config IRQSOFF_TRACER | 79 | config IRQSOFF_TRACER |
| 77 | bool "Interrupts-off Latency Tracer" | 80 | bool "Interrupts-off Latency Tracer" |
| @@ -217,6 +220,17 @@ config BRANCH_TRACER | |||
| 217 | 220 | ||
| 218 | Say N if unsure. | 221 | Say N if unsure. |
| 219 | 222 | ||
| 223 | config POWER_TRACER | ||
| 224 | bool "Trace power consumption behavior" | ||
| 225 | depends on DEBUG_KERNEL | ||
| 226 | depends on X86 | ||
| 227 | select TRACING | ||
| 228 | help | ||
| 229 | This tracer helps developers to analyze and optimize the kernels | ||
| 230 | power management decisions, specifically the C-state and P-state | ||
| 231 | behavior. | ||
| 232 | |||
| 233 | |||
| 220 | config STACK_TRACER | 234 | config STACK_TRACER |
| 221 | bool "Trace max stack" | 235 | bool "Trace max stack" |
| 222 | depends on HAVE_FUNCTION_TRACER | 236 | depends on HAVE_FUNCTION_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index cef4bcb4e822..62dc561b6676 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -29,8 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o | |||
| 29 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o | 29 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o |
| 30 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | 30 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o |
| 31 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | 31 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o |
| 32 | obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o | 32 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o |
| 33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
| 34 | obj-$(CONFIG_BTS_TRACER) += trace_bts.o | 34 | obj-$(CONFIG_BTS_TRACER) += trace_bts.o |
| 35 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | ||
| 35 | 36 | ||
| 36 | libftrace-y := ftrace.o | 37 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 53042f118f23..cbf8b09f63a5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -47,12 +47,12 @@ | |||
| 47 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
| 48 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
| 49 | 49 | ||
| 50 | /* ftrace_pid_trace >= 0 will only trace threads with this pid */ | ||
| 51 | static int ftrace_pid_trace = -1; | ||
| 52 | |||
| 50 | /* Quick disabling of function tracer. */ | 53 | /* Quick disabling of function tracer. */ |
| 51 | int function_trace_stop; | 54 | int function_trace_stop; |
| 52 | 55 | ||
| 53 | /* By default, current tracing type is normal tracing. */ | ||
| 54 | enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
| 55 | |||
| 56 | /* | 56 | /* |
| 57 | * ftrace_disabled is set when an anomaly is discovered. | 57 | * ftrace_disabled is set when an anomaly is discovered. |
| 58 | * ftrace_disabled is much stronger than ftrace_enabled. | 58 | * ftrace_disabled is much stronger than ftrace_enabled. |
| @@ -61,6 +61,7 @@ static int ftrace_disabled __read_mostly; | |||
| 61 | 61 | ||
| 62 | static DEFINE_SPINLOCK(ftrace_lock); | 62 | static DEFINE_SPINLOCK(ftrace_lock); |
| 63 | static DEFINE_MUTEX(ftrace_sysctl_lock); | 63 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
| 64 | static DEFINE_MUTEX(ftrace_start_lock); | ||
| 64 | 65 | ||
| 65 | static struct ftrace_ops ftrace_list_end __read_mostly = | 66 | static struct ftrace_ops ftrace_list_end __read_mostly = |
| 66 | { | 67 | { |
| @@ -70,6 +71,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
| 70 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 71 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
| 71 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 72 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
| 72 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 73 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
| 74 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | ||
| 73 | 75 | ||
| 74 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 76 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
| 75 | { | 77 | { |
| @@ -86,6 +88,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
| 86 | }; | 88 | }; |
| 87 | } | 89 | } |
| 88 | 90 | ||
| 91 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | ||
| 92 | { | ||
| 93 | if (current->pid != ftrace_pid_trace) | ||
| 94 | return; | ||
| 95 | |||
| 96 | ftrace_pid_function(ip, parent_ip); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void set_ftrace_pid_function(ftrace_func_t func) | ||
| 100 | { | ||
| 101 | /* do not set ftrace_pid_function to itself! */ | ||
| 102 | if (func != ftrace_pid_func) | ||
| 103 | ftrace_pid_function = func; | ||
| 104 | } | ||
| 105 | |||
| 89 | /** | 106 | /** |
| 90 | * clear_ftrace_function - reset the ftrace function | 107 | * clear_ftrace_function - reset the ftrace function |
| 91 | * | 108 | * |
| @@ -96,6 +113,7 @@ void clear_ftrace_function(void) | |||
| 96 | { | 113 | { |
| 97 | ftrace_trace_function = ftrace_stub; | 114 | ftrace_trace_function = ftrace_stub; |
| 98 | __ftrace_trace_function = ftrace_stub; | 115 | __ftrace_trace_function = ftrace_stub; |
| 116 | ftrace_pid_function = ftrace_stub; | ||
| 99 | } | 117 | } |
| 100 | 118 | ||
| 101 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 119 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| @@ -128,20 +146,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
| 128 | ftrace_list = ops; | 146 | ftrace_list = ops; |
| 129 | 147 | ||
| 130 | if (ftrace_enabled) { | 148 | if (ftrace_enabled) { |
| 149 | ftrace_func_t func; | ||
| 150 | |||
| 151 | if (ops->next == &ftrace_list_end) | ||
| 152 | func = ops->func; | ||
| 153 | else | ||
| 154 | func = ftrace_list_func; | ||
| 155 | |||
| 156 | if (ftrace_pid_trace >= 0) { | ||
| 157 | set_ftrace_pid_function(func); | ||
| 158 | func = ftrace_pid_func; | ||
| 159 | } | ||
| 160 | |||
| 131 | /* | 161 | /* |
| 132 | * For one func, simply call it directly. | 162 | * For one func, simply call it directly. |
| 133 | * For more than one func, call the chain. | 163 | * For more than one func, call the chain. |
| 134 | */ | 164 | */ |
| 135 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 165 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| 136 | if (ops->next == &ftrace_list_end) | 166 | ftrace_trace_function = func; |
| 137 | ftrace_trace_function = ops->func; | ||
| 138 | else | ||
| 139 | ftrace_trace_function = ftrace_list_func; | ||
| 140 | #else | 167 | #else |
| 141 | if (ops->next == &ftrace_list_end) | 168 | __ftrace_trace_function = func; |
| 142 | __ftrace_trace_function = ops->func; | ||
| 143 | else | ||
| 144 | __ftrace_trace_function = ftrace_list_func; | ||
| 145 | ftrace_trace_function = ftrace_test_stop_func; | 169 | ftrace_trace_function = ftrace_test_stop_func; |
| 146 | #endif | 170 | #endif |
| 147 | } | 171 | } |
| @@ -182,8 +206,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 182 | 206 | ||
| 183 | if (ftrace_enabled) { | 207 | if (ftrace_enabled) { |
| 184 | /* If we only have one func left, then call that directly */ | 208 | /* If we only have one func left, then call that directly */ |
| 185 | if (ftrace_list->next == &ftrace_list_end) | 209 | if (ftrace_list->next == &ftrace_list_end) { |
| 186 | ftrace_trace_function = ftrace_list->func; | 210 | ftrace_func_t func = ftrace_list->func; |
| 211 | |||
| 212 | if (ftrace_pid_trace >= 0) { | ||
| 213 | set_ftrace_pid_function(func); | ||
| 214 | func = ftrace_pid_func; | ||
| 215 | } | ||
| 216 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 217 | ftrace_trace_function = func; | ||
| 218 | #else | ||
| 219 | __ftrace_trace_function = func; | ||
| 220 | #endif | ||
| 221 | } | ||
| 187 | } | 222 | } |
| 188 | 223 | ||
| 189 | out: | 224 | out: |
| @@ -192,6 +227,38 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 192 | return ret; | 227 | return ret; |
| 193 | } | 228 | } |
| 194 | 229 | ||
| 230 | static void ftrace_update_pid_func(void) | ||
| 231 | { | ||
| 232 | ftrace_func_t func; | ||
| 233 | |||
| 234 | /* should not be called from interrupt context */ | ||
| 235 | spin_lock(&ftrace_lock); | ||
| 236 | |||
| 237 | if (ftrace_trace_function == ftrace_stub) | ||
| 238 | goto out; | ||
| 239 | |||
| 240 | func = ftrace_trace_function; | ||
| 241 | |||
| 242 | if (ftrace_pid_trace >= 0) { | ||
| 243 | set_ftrace_pid_function(func); | ||
| 244 | func = ftrace_pid_func; | ||
| 245 | } else { | ||
| 246 | if (func != ftrace_pid_func) | ||
| 247 | goto out; | ||
| 248 | |||
| 249 | set_ftrace_pid_function(func); | ||
| 250 | } | ||
| 251 | |||
| 252 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 253 | ftrace_trace_function = func; | ||
| 254 | #else | ||
| 255 | __ftrace_trace_function = func; | ||
| 256 | #endif | ||
| 257 | |||
| 258 | out: | ||
| 259 | spin_unlock(&ftrace_lock); | ||
| 260 | } | ||
| 261 | |||
| 195 | #ifdef CONFIG_DYNAMIC_FTRACE | 262 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 196 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 263 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
| 197 | # error Dynamic ftrace depends on MCOUNT_RECORD | 264 | # error Dynamic ftrace depends on MCOUNT_RECORD |
| @@ -211,6 +278,8 @@ enum { | |||
| 211 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 278 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
| 212 | FTRACE_ENABLE_MCOUNT = (1 << 3), | 279 | FTRACE_ENABLE_MCOUNT = (1 << 3), |
| 213 | FTRACE_DISABLE_MCOUNT = (1 << 4), | 280 | FTRACE_DISABLE_MCOUNT = (1 << 4), |
| 281 | FTRACE_START_FUNC_RET = (1 << 5), | ||
| 282 | FTRACE_STOP_FUNC_RET = (1 << 6), | ||
| 214 | }; | 283 | }; |
| 215 | 284 | ||
| 216 | static int ftrace_filtered; | 285 | static int ftrace_filtered; |
| @@ -395,14 +464,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
| 395 | unsigned long ip, fl; | 464 | unsigned long ip, fl; |
| 396 | unsigned long ftrace_addr; | 465 | unsigned long ftrace_addr; |
| 397 | 466 | ||
| 398 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
| 399 | if (ftrace_tracing_type == FTRACE_TYPE_ENTER) | ||
| 400 | ftrace_addr = (unsigned long)ftrace_caller; | ||
| 401 | else | ||
| 402 | ftrace_addr = (unsigned long)ftrace_return_caller; | ||
| 403 | #else | ||
| 404 | ftrace_addr = (unsigned long)ftrace_caller; | 467 | ftrace_addr = (unsigned long)ftrace_caller; |
| 405 | #endif | ||
| 406 | 468 | ||
| 407 | ip = rec->ip; | 469 | ip = rec->ip; |
| 408 | 470 | ||
| @@ -535,6 +597,11 @@ static int __ftrace_modify_code(void *data) | |||
| 535 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 597 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
| 536 | ftrace_update_ftrace_func(ftrace_trace_function); | 598 | ftrace_update_ftrace_func(ftrace_trace_function); |
| 537 | 599 | ||
| 600 | if (*command & FTRACE_START_FUNC_RET) | ||
| 601 | ftrace_enable_ftrace_graph_caller(); | ||
| 602 | else if (*command & FTRACE_STOP_FUNC_RET) | ||
| 603 | ftrace_disable_ftrace_graph_caller(); | ||
| 604 | |||
| 538 | return 0; | 605 | return 0; |
| 539 | } | 606 | } |
| 540 | 607 | ||
| @@ -545,12 +612,22 @@ static void ftrace_run_update_code(int command) | |||
| 545 | 612 | ||
| 546 | static ftrace_func_t saved_ftrace_func; | 613 | static ftrace_func_t saved_ftrace_func; |
| 547 | static int ftrace_start_up; | 614 | static int ftrace_start_up; |
| 548 | static DEFINE_MUTEX(ftrace_start_lock); | ||
| 549 | 615 | ||
| 550 | static void ftrace_startup(void) | 616 | static void ftrace_startup_enable(int command) |
| 551 | { | 617 | { |
| 552 | int command = 0; | 618 | if (saved_ftrace_func != ftrace_trace_function) { |
| 619 | saved_ftrace_func = ftrace_trace_function; | ||
| 620 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
| 621 | } | ||
| 622 | |||
| 623 | if (!command || !ftrace_enabled) | ||
| 624 | return; | ||
| 625 | |||
| 626 | ftrace_run_update_code(command); | ||
| 627 | } | ||
| 553 | 628 | ||
| 629 | static void ftrace_startup(int command) | ||
| 630 | { | ||
| 554 | if (unlikely(ftrace_disabled)) | 631 | if (unlikely(ftrace_disabled)) |
| 555 | return; | 632 | return; |
| 556 | 633 | ||
| @@ -558,23 +635,13 @@ static void ftrace_startup(void) | |||
| 558 | ftrace_start_up++; | 635 | ftrace_start_up++; |
| 559 | command |= FTRACE_ENABLE_CALLS; | 636 | command |= FTRACE_ENABLE_CALLS; |
| 560 | 637 | ||
| 561 | if (saved_ftrace_func != ftrace_trace_function) { | 638 | ftrace_startup_enable(command); |
| 562 | saved_ftrace_func = ftrace_trace_function; | ||
| 563 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
| 564 | } | ||
| 565 | 639 | ||
| 566 | if (!command || !ftrace_enabled) | ||
| 567 | goto out; | ||
| 568 | |||
| 569 | ftrace_run_update_code(command); | ||
| 570 | out: | ||
| 571 | mutex_unlock(&ftrace_start_lock); | 640 | mutex_unlock(&ftrace_start_lock); |
| 572 | } | 641 | } |
| 573 | 642 | ||
| 574 | static void ftrace_shutdown(void) | 643 | static void ftrace_shutdown(int command) |
| 575 | { | 644 | { |
| 576 | int command = 0; | ||
| 577 | |||
| 578 | if (unlikely(ftrace_disabled)) | 645 | if (unlikely(ftrace_disabled)) |
| 579 | return; | 646 | return; |
| 580 | 647 | ||
| @@ -1262,13 +1329,10 @@ static struct file_operations ftrace_notrace_fops = { | |||
| 1262 | .release = ftrace_notrace_release, | 1329 | .release = ftrace_notrace_release, |
| 1263 | }; | 1330 | }; |
| 1264 | 1331 | ||
| 1265 | static __init int ftrace_init_debugfs(void) | 1332 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
| 1266 | { | 1333 | { |
| 1267 | struct dentry *d_tracer; | ||
| 1268 | struct dentry *entry; | 1334 | struct dentry *entry; |
| 1269 | 1335 | ||
| 1270 | d_tracer = tracing_init_dentry(); | ||
| 1271 | |||
| 1272 | entry = debugfs_create_file("available_filter_functions", 0444, | 1336 | entry = debugfs_create_file("available_filter_functions", 0444, |
| 1273 | d_tracer, NULL, &ftrace_avail_fops); | 1337 | d_tracer, NULL, &ftrace_avail_fops); |
| 1274 | if (!entry) | 1338 | if (!entry) |
| @@ -1295,8 +1359,6 @@ static __init int ftrace_init_debugfs(void) | |||
| 1295 | return 0; | 1359 | return 0; |
| 1296 | } | 1360 | } |
| 1297 | 1361 | ||
| 1298 | fs_initcall(ftrace_init_debugfs); | ||
| 1299 | |||
| 1300 | static int ftrace_convert_nops(struct module *mod, | 1362 | static int ftrace_convert_nops(struct module *mod, |
| 1301 | unsigned long *start, | 1363 | unsigned long *start, |
| 1302 | unsigned long *end) | 1364 | unsigned long *end) |
| @@ -1382,12 +1444,101 @@ static int __init ftrace_nodyn_init(void) | |||
| 1382 | } | 1444 | } |
| 1383 | device_initcall(ftrace_nodyn_init); | 1445 | device_initcall(ftrace_nodyn_init); |
| 1384 | 1446 | ||
| 1385 | # define ftrace_startup() do { } while (0) | 1447 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
| 1386 | # define ftrace_shutdown() do { } while (0) | 1448 | static inline void ftrace_startup_enable(int command) { } |
| 1449 | /* Keep as macros so we do not need to define the commands */ | ||
| 1450 | # define ftrace_startup(command) do { } while (0) | ||
| 1451 | # define ftrace_shutdown(command) do { } while (0) | ||
| 1387 | # define ftrace_startup_sysctl() do { } while (0) | 1452 | # define ftrace_startup_sysctl() do { } while (0) |
| 1388 | # define ftrace_shutdown_sysctl() do { } while (0) | 1453 | # define ftrace_shutdown_sysctl() do { } while (0) |
| 1389 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1454 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 1390 | 1455 | ||
| 1456 | static ssize_t | ||
| 1457 | ftrace_pid_read(struct file *file, char __user *ubuf, | ||
| 1458 | size_t cnt, loff_t *ppos) | ||
| 1459 | { | ||
| 1460 | char buf[64]; | ||
| 1461 | int r; | ||
| 1462 | |||
| 1463 | if (ftrace_pid_trace >= 0) | ||
| 1464 | r = sprintf(buf, "%u\n", ftrace_pid_trace); | ||
| 1465 | else | ||
| 1466 | r = sprintf(buf, "no pid\n"); | ||
| 1467 | |||
| 1468 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | static ssize_t | ||
| 1472 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | ||
| 1473 | size_t cnt, loff_t *ppos) | ||
| 1474 | { | ||
| 1475 | char buf[64]; | ||
| 1476 | long val; | ||
| 1477 | int ret; | ||
| 1478 | |||
| 1479 | if (cnt >= sizeof(buf)) | ||
| 1480 | return -EINVAL; | ||
| 1481 | |||
| 1482 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 1483 | return -EFAULT; | ||
| 1484 | |||
| 1485 | buf[cnt] = 0; | ||
| 1486 | |||
| 1487 | ret = strict_strtol(buf, 10, &val); | ||
| 1488 | if (ret < 0) | ||
| 1489 | return ret; | ||
| 1490 | |||
| 1491 | mutex_lock(&ftrace_start_lock); | ||
| 1492 | if (ret < 0) { | ||
| 1493 | /* disable pid tracing */ | ||
| 1494 | if (ftrace_pid_trace < 0) | ||
| 1495 | goto out; | ||
| 1496 | ftrace_pid_trace = -1; | ||
| 1497 | |||
| 1498 | } else { | ||
| 1499 | |||
| 1500 | if (ftrace_pid_trace == val) | ||
| 1501 | goto out; | ||
| 1502 | |||
| 1503 | ftrace_pid_trace = val; | ||
| 1504 | } | ||
| 1505 | |||
| 1506 | /* update the function call */ | ||
| 1507 | ftrace_update_pid_func(); | ||
| 1508 | ftrace_startup_enable(0); | ||
| 1509 | |||
| 1510 | out: | ||
| 1511 | mutex_unlock(&ftrace_start_lock); | ||
| 1512 | |||
| 1513 | return cnt; | ||
| 1514 | } | ||
| 1515 | |||
| 1516 | static struct file_operations ftrace_pid_fops = { | ||
| 1517 | .read = ftrace_pid_read, | ||
| 1518 | .write = ftrace_pid_write, | ||
| 1519 | }; | ||
| 1520 | |||
| 1521 | static __init int ftrace_init_debugfs(void) | ||
| 1522 | { | ||
| 1523 | struct dentry *d_tracer; | ||
| 1524 | struct dentry *entry; | ||
| 1525 | |||
| 1526 | d_tracer = tracing_init_dentry(); | ||
| 1527 | if (!d_tracer) | ||
| 1528 | return 0; | ||
| 1529 | |||
| 1530 | ftrace_init_dyn_debugfs(d_tracer); | ||
| 1531 | |||
| 1532 | entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, | ||
| 1533 | NULL, &ftrace_pid_fops); | ||
| 1534 | if (!entry) | ||
| 1535 | pr_warning("Could not create debugfs " | ||
| 1536 | "'set_ftrace_pid' entry\n"); | ||
| 1537 | return 0; | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | fs_initcall(ftrace_init_debugfs); | ||
| 1541 | |||
| 1391 | /** | 1542 | /** |
| 1392 | * ftrace_kill - kill ftrace | 1543 | * ftrace_kill - kill ftrace |
| 1393 | * | 1544 | * |
| @@ -1422,15 +1573,9 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
| 1422 | 1573 | ||
| 1423 | mutex_lock(&ftrace_sysctl_lock); | 1574 | mutex_lock(&ftrace_sysctl_lock); |
| 1424 | 1575 | ||
| 1425 | if (ftrace_tracing_type == FTRACE_TYPE_RETURN) { | ||
| 1426 | ret = -EBUSY; | ||
| 1427 | goto out; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | ret = __register_ftrace_function(ops); | 1576 | ret = __register_ftrace_function(ops); |
| 1431 | ftrace_startup(); | 1577 | ftrace_startup(0); |
| 1432 | 1578 | ||
| 1433 | out: | ||
| 1434 | mutex_unlock(&ftrace_sysctl_lock); | 1579 | mutex_unlock(&ftrace_sysctl_lock); |
| 1435 | return ret; | 1580 | return ret; |
| 1436 | } | 1581 | } |
| @@ -1447,7 +1592,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 1447 | 1592 | ||
| 1448 | mutex_lock(&ftrace_sysctl_lock); | 1593 | mutex_lock(&ftrace_sysctl_lock); |
| 1449 | ret = __unregister_ftrace_function(ops); | 1594 | ret = __unregister_ftrace_function(ops); |
| 1450 | ftrace_shutdown(); | 1595 | ftrace_shutdown(0); |
| 1451 | mutex_unlock(&ftrace_sysctl_lock); | 1596 | mutex_unlock(&ftrace_sysctl_lock); |
| 1452 | 1597 | ||
| 1453 | return ret; | 1598 | return ret; |
| @@ -1496,14 +1641,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 1496 | return ret; | 1641 | return ret; |
| 1497 | } | 1642 | } |
| 1498 | 1643 | ||
| 1499 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1644 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1500 | |||
| 1501 | static atomic_t ftrace_retfunc_active; | ||
| 1502 | 1645 | ||
| 1503 | /* The callback that hooks the return of a function */ | 1646 | static atomic_t ftrace_graph_active; |
| 1504 | trace_function_return_t ftrace_function_return = | ||
| 1505 | (trace_function_return_t)ftrace_stub; | ||
| 1506 | 1647 | ||
| 1648 | /* The callbacks that hook a function */ | ||
| 1649 | trace_func_graph_ret_t ftrace_graph_return = | ||
| 1650 | (trace_func_graph_ret_t)ftrace_stub; | ||
| 1651 | trace_func_graph_ent_t ftrace_graph_entry = | ||
| 1652 | (trace_func_graph_ent_t)ftrace_stub; | ||
| 1507 | 1653 | ||
| 1508 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | 1654 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
| 1509 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | 1655 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
| @@ -1549,7 +1695,7 @@ free: | |||
| 1549 | } | 1695 | } |
| 1550 | 1696 | ||
| 1551 | /* Allocate a return stack for each task */ | 1697 | /* Allocate a return stack for each task */ |
| 1552 | static int start_return_tracing(void) | 1698 | static int start_graph_tracing(void) |
| 1553 | { | 1699 | { |
| 1554 | struct ftrace_ret_stack **ret_stack_list; | 1700 | struct ftrace_ret_stack **ret_stack_list; |
| 1555 | int ret; | 1701 | int ret; |
| @@ -1569,52 +1715,46 @@ static int start_return_tracing(void) | |||
| 1569 | return ret; | 1715 | return ret; |
| 1570 | } | 1716 | } |
| 1571 | 1717 | ||
| 1572 | int register_ftrace_return(trace_function_return_t func) | 1718 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
| 1719 | trace_func_graph_ent_t entryfunc) | ||
| 1573 | { | 1720 | { |
| 1574 | int ret = 0; | 1721 | int ret = 0; |
| 1575 | 1722 | ||
| 1576 | mutex_lock(&ftrace_sysctl_lock); | 1723 | mutex_lock(&ftrace_sysctl_lock); |
| 1577 | 1724 | ||
| 1578 | /* | 1725 | atomic_inc(&ftrace_graph_active); |
| 1579 | * Don't launch return tracing if normal function | 1726 | ret = start_graph_tracing(); |
| 1580 | * tracing is already running. | ||
| 1581 | */ | ||
| 1582 | if (ftrace_trace_function != ftrace_stub) { | ||
| 1583 | ret = -EBUSY; | ||
| 1584 | goto out; | ||
| 1585 | } | ||
| 1586 | atomic_inc(&ftrace_retfunc_active); | ||
| 1587 | ret = start_return_tracing(); | ||
| 1588 | if (ret) { | 1727 | if (ret) { |
| 1589 | atomic_dec(&ftrace_retfunc_active); | 1728 | atomic_dec(&ftrace_graph_active); |
| 1590 | goto out; | 1729 | goto out; |
| 1591 | } | 1730 | } |
| 1592 | ftrace_tracing_type = FTRACE_TYPE_RETURN; | 1731 | |
| 1593 | ftrace_function_return = func; | 1732 | ftrace_graph_return = retfunc; |
| 1594 | ftrace_startup(); | 1733 | ftrace_graph_entry = entryfunc; |
| 1734 | |||
| 1735 | ftrace_startup(FTRACE_START_FUNC_RET); | ||
| 1595 | 1736 | ||
| 1596 | out: | 1737 | out: |
| 1597 | mutex_unlock(&ftrace_sysctl_lock); | 1738 | mutex_unlock(&ftrace_sysctl_lock); |
| 1598 | return ret; | 1739 | return ret; |
| 1599 | } | 1740 | } |
| 1600 | 1741 | ||
| 1601 | void unregister_ftrace_return(void) | 1742 | void unregister_ftrace_graph(void) |
| 1602 | { | 1743 | { |
| 1603 | mutex_lock(&ftrace_sysctl_lock); | 1744 | mutex_lock(&ftrace_sysctl_lock); |
| 1604 | 1745 | ||
| 1605 | atomic_dec(&ftrace_retfunc_active); | 1746 | atomic_dec(&ftrace_graph_active); |
| 1606 | ftrace_function_return = (trace_function_return_t)ftrace_stub; | 1747 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 1607 | ftrace_shutdown(); | 1748 | ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub; |
| 1608 | /* Restore normal tracing type */ | 1749 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
| 1609 | ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
| 1610 | 1750 | ||
| 1611 | mutex_unlock(&ftrace_sysctl_lock); | 1751 | mutex_unlock(&ftrace_sysctl_lock); |
| 1612 | } | 1752 | } |
| 1613 | 1753 | ||
| 1614 | /* Allocate a return stack for newly created task */ | 1754 | /* Allocate a return stack for newly created task */ |
| 1615 | void ftrace_retfunc_init_task(struct task_struct *t) | 1755 | void ftrace_graph_init_task(struct task_struct *t) |
| 1616 | { | 1756 | { |
| 1617 | if (atomic_read(&ftrace_retfunc_active)) { | 1757 | if (atomic_read(&ftrace_graph_active)) { |
| 1618 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 1758 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
| 1619 | * sizeof(struct ftrace_ret_stack), | 1759 | * sizeof(struct ftrace_ret_stack), |
| 1620 | GFP_KERNEL); | 1760 | GFP_KERNEL); |
| @@ -1626,7 +1766,7 @@ void ftrace_retfunc_init_task(struct task_struct *t) | |||
| 1626 | t->ret_stack = NULL; | 1766 | t->ret_stack = NULL; |
| 1627 | } | 1767 | } |
| 1628 | 1768 | ||
| 1629 | void ftrace_retfunc_exit_task(struct task_struct *t) | 1769 | void ftrace_graph_exit_task(struct task_struct *t) |
| 1630 | { | 1770 | { |
| 1631 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | 1771 | struct ftrace_ret_stack *ret_stack = t->ret_stack; |
| 1632 | 1772 | ||
| @@ -1638,5 +1778,3 @@ void ftrace_retfunc_exit_task(struct task_struct *t) | |||
| 1638 | } | 1778 | } |
| 1639 | #endif | 1779 | #endif |
| 1640 | 1780 | ||
| 1641 | |||
| 1642 | |||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8df8fdd69c95..5811e0a5f732 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -804,7 +804,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 804 | spin_unlock(&trace_cmdline_lock); | 804 | spin_unlock(&trace_cmdline_lock); |
| 805 | } | 805 | } |
| 806 | 806 | ||
| 807 | static char *trace_find_cmdline(int pid) | 807 | char *trace_find_cmdline(int pid) |
| 808 | { | 808 | { |
| 809 | char *cmdline = "<...>"; | 809 | char *cmdline = "<...>"; |
| 810 | unsigned map; | 810 | unsigned map; |
| @@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
| 878 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 878 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
| 879 | } | 879 | } |
| 880 | 880 | ||
| 881 | #ifdef CONFIG_FUNCTION_RET_TRACER | 881 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 882 | static void __trace_function_return(struct trace_array *tr, | 882 | static void __trace_graph_entry(struct trace_array *tr, |
| 883 | struct trace_array_cpu *data, | 883 | struct trace_array_cpu *data, |
| 884 | struct ftrace_retfunc *trace, | 884 | struct ftrace_graph_ent *trace, |
| 885 | unsigned long flags, | 885 | unsigned long flags, |
| 886 | int pc) | 886 | int pc) |
| 887 | { | 887 | { |
| 888 | struct ring_buffer_event *event; | 888 | struct ring_buffer_event *event; |
| 889 | struct ftrace_ret_entry *entry; | 889 | struct ftrace_graph_ent_entry *entry; |
| 890 | unsigned long irq_flags; | 890 | unsigned long irq_flags; |
| 891 | 891 | ||
| 892 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 892 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
| @@ -898,12 +898,32 @@ static void __trace_function_return(struct trace_array *tr, | |||
| 898 | return; | 898 | return; |
| 899 | entry = ring_buffer_event_data(event); | 899 | entry = ring_buffer_event_data(event); |
| 900 | tracing_generic_entry_update(&entry->ent, flags, pc); | 900 | tracing_generic_entry_update(&entry->ent, flags, pc); |
| 901 | entry->ent.type = TRACE_FN_RET; | 901 | entry->ent.type = TRACE_GRAPH_ENT; |
| 902 | entry->ip = trace->func; | 902 | entry->graph_ent = *trace; |
| 903 | entry->parent_ip = trace->ret; | 903 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); |
| 904 | entry->rettime = trace->rettime; | 904 | } |
| 905 | entry->calltime = trace->calltime; | 905 | |
| 906 | entry->overrun = trace->overrun; | 906 | static void __trace_graph_return(struct trace_array *tr, |
| 907 | struct trace_array_cpu *data, | ||
| 908 | struct ftrace_graph_ret *trace, | ||
| 909 | unsigned long flags, | ||
| 910 | int pc) | ||
| 911 | { | ||
| 912 | struct ring_buffer_event *event; | ||
| 913 | struct ftrace_graph_ret_entry *entry; | ||
| 914 | unsigned long irq_flags; | ||
| 915 | |||
| 916 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
| 917 | return; | ||
| 918 | |||
| 919 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
| 920 | &irq_flags); | ||
| 921 | if (!event) | ||
| 922 | return; | ||
| 923 | entry = ring_buffer_event_data(event); | ||
| 924 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 925 | entry->ent.type = TRACE_GRAPH_RET; | ||
| 926 | entry->ret = *trace; | ||
| 907 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 927 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); |
| 908 | } | 928 | } |
| 909 | #endif | 929 | #endif |
| @@ -1177,8 +1197,29 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 1177 | local_irq_restore(flags); | 1197 | local_irq_restore(flags); |
| 1178 | } | 1198 | } |
| 1179 | 1199 | ||
| 1180 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1200 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1181 | void trace_function_return(struct ftrace_retfunc *trace) | 1201 | void trace_graph_entry(struct ftrace_graph_ent *trace) |
| 1202 | { | ||
| 1203 | struct trace_array *tr = &global_trace; | ||
| 1204 | struct trace_array_cpu *data; | ||
| 1205 | unsigned long flags; | ||
| 1206 | long disabled; | ||
| 1207 | int cpu; | ||
| 1208 | int pc; | ||
| 1209 | |||
| 1210 | raw_local_irq_save(flags); | ||
| 1211 | cpu = raw_smp_processor_id(); | ||
| 1212 | data = tr->data[cpu]; | ||
| 1213 | disabled = atomic_inc_return(&data->disabled); | ||
| 1214 | if (likely(disabled == 1)) { | ||
| 1215 | pc = preempt_count(); | ||
| 1216 | __trace_graph_entry(tr, data, trace, flags, pc); | ||
| 1217 | } | ||
| 1218 | atomic_dec(&data->disabled); | ||
| 1219 | raw_local_irq_restore(flags); | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
| 1182 | { | 1223 | { |
| 1183 | struct trace_array *tr = &global_trace; | 1224 | struct trace_array *tr = &global_trace; |
| 1184 | struct trace_array_cpu *data; | 1225 | struct trace_array_cpu *data; |
| @@ -1193,12 +1234,12 @@ void trace_function_return(struct ftrace_retfunc *trace) | |||
| 1193 | disabled = atomic_inc_return(&data->disabled); | 1234 | disabled = atomic_inc_return(&data->disabled); |
| 1194 | if (likely(disabled == 1)) { | 1235 | if (likely(disabled == 1)) { |
| 1195 | pc = preempt_count(); | 1236 | pc = preempt_count(); |
| 1196 | __trace_function_return(tr, data, trace, flags, pc); | 1237 | __trace_graph_return(tr, data, trace, flags, pc); |
| 1197 | } | 1238 | } |
| 1198 | atomic_dec(&data->disabled); | 1239 | atomic_dec(&data->disabled); |
| 1199 | raw_local_irq_restore(flags); | 1240 | raw_local_irq_restore(flags); |
| 1200 | } | 1241 | } |
| 1201 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | 1242 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 1202 | 1243 | ||
| 1203 | static struct ftrace_ops trace_ops __read_mostly = | 1244 | static struct ftrace_ops trace_ops __read_mostly = |
| 1204 | { | 1245 | { |
| @@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
| 2000 | trace_seq_print_cont(s, iter); | 2041 | trace_seq_print_cont(s, iter); |
| 2001 | break; | 2042 | break; |
| 2002 | } | 2043 | } |
| 2003 | case TRACE_FN_RET: { | 2044 | case TRACE_GRAPH_RET: { |
| 2004 | return print_return_function(iter); | 2045 | return print_graph_function(iter); |
| 2005 | break; | 2046 | } |
| 2047 | case TRACE_GRAPH_ENT: { | ||
| 2048 | return print_graph_function(iter); | ||
| 2006 | } | 2049 | } |
| 2007 | case TRACE_BRANCH: { | 2050 | case TRACE_BRANCH: { |
| 2008 | struct trace_branch *field; | 2051 | struct trace_branch *field; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 3abd645e8af2..f96f4e787ff3 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -25,9 +25,11 @@ enum trace_type { | |||
| 25 | TRACE_BRANCH, | 25 | TRACE_BRANCH, |
| 26 | TRACE_BOOT_CALL, | 26 | TRACE_BOOT_CALL, |
| 27 | TRACE_BOOT_RET, | 27 | TRACE_BOOT_RET, |
| 28 | TRACE_FN_RET, | 28 | TRACE_GRAPH_RET, |
| 29 | TRACE_GRAPH_ENT, | ||
| 29 | TRACE_USER_STACK, | 30 | TRACE_USER_STACK, |
| 30 | TRACE_BTS, | 31 | TRACE_BTS, |
| 32 | TRACE_POWER, | ||
| 31 | 33 | ||
| 32 | __TRACE_LAST_TYPE | 34 | __TRACE_LAST_TYPE |
| 33 | }; | 35 | }; |
| @@ -56,14 +58,16 @@ struct ftrace_entry { | |||
| 56 | unsigned long parent_ip; | 58 | unsigned long parent_ip; |
| 57 | }; | 59 | }; |
| 58 | 60 | ||
| 61 | /* Function call entry */ | ||
| 62 | struct ftrace_graph_ent_entry { | ||
| 63 | struct trace_entry ent; | ||
| 64 | struct ftrace_graph_ent graph_ent; | ||
| 65 | }; | ||
| 66 | |||
| 59 | /* Function return entry */ | 67 | /* Function return entry */ |
| 60 | struct ftrace_ret_entry { | 68 | struct ftrace_graph_ret_entry { |
| 61 | struct trace_entry ent; | 69 | struct trace_entry ent; |
| 62 | unsigned long ip; | 70 | struct ftrace_graph_ret ret; |
| 63 | unsigned long parent_ip; | ||
| 64 | unsigned long long calltime; | ||
| 65 | unsigned long long rettime; | ||
| 66 | unsigned long overrun; | ||
| 67 | }; | 71 | }; |
| 68 | extern struct tracer boot_tracer; | 72 | extern struct tracer boot_tracer; |
| 69 | 73 | ||
| @@ -160,6 +164,11 @@ struct bts_entry { | |||
| 160 | unsigned long to; | 164 | unsigned long to; |
| 161 | }; | 165 | }; |
| 162 | 166 | ||
| 167 | struct trace_power { | ||
| 168 | struct trace_entry ent; | ||
| 169 | struct power_trace state_data; | ||
| 170 | }; | ||
| 171 | |||
| 163 | /* | 172 | /* |
| 164 | * trace_flag_type is an enumeration that holds different | 173 | * trace_flag_type is an enumeration that holds different |
| 165 | * states when a trace occurs. These are: | 174 | * states when a trace occurs. These are: |
| @@ -264,8 +273,12 @@ extern void __ftrace_bad_type(void); | |||
| 264 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ | 273 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
| 265 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | 274 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ |
| 266 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | 275 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
| 267 | IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ | 276 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
| 277 | TRACE_GRAPH_ENT); \ | ||
| 278 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | ||
| 279 | TRACE_GRAPH_RET); \ | ||
| 268 | IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ | 280 | IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ |
| 281 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | ||
| 269 | __ftrace_bad_type(); \ | 282 | __ftrace_bad_type(); \ |
| 270 | } while (0) | 283 | } while (0) |
| 271 | 284 | ||
| @@ -397,9 +410,9 @@ void trace_function(struct trace_array *tr, | |||
| 397 | unsigned long ip, | 410 | unsigned long ip, |
| 398 | unsigned long parent_ip, | 411 | unsigned long parent_ip, |
| 399 | unsigned long flags, int pc); | 412 | unsigned long flags, int pc); |
| 400 | void | ||
| 401 | trace_function_return(struct ftrace_retfunc *trace); | ||
| 402 | 413 | ||
| 414 | void trace_graph_return(struct ftrace_graph_ret *trace); | ||
| 415 | void trace_graph_entry(struct ftrace_graph_ent *trace); | ||
| 403 | void trace_bts(struct trace_array *tr, | 416 | void trace_bts(struct trace_array *tr, |
| 404 | unsigned long from, | 417 | unsigned long from, |
| 405 | unsigned long to); | 418 | unsigned long to); |
| @@ -444,6 +457,7 @@ struct tracer_switch_ops { | |||
| 444 | struct tracer_switch_ops *next; | 457 | struct tracer_switch_ops *next; |
| 445 | }; | 458 | }; |
| 446 | 459 | ||
| 460 | char *trace_find_cmdline(int pid); | ||
| 447 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 461 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
| 448 | 462 | ||
| 449 | #ifdef CONFIG_DYNAMIC_FTRACE | 463 | #ifdef CONFIG_DYNAMIC_FTRACE |
| @@ -489,11 +503,11 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); | |||
| 489 | extern unsigned long trace_flags; | 503 | extern unsigned long trace_flags; |
| 490 | 504 | ||
| 491 | /* Standard output formatting function used for function return traces */ | 505 | /* Standard output formatting function used for function return traces */ |
| 492 | #ifdef CONFIG_FUNCTION_RET_TRACER | 506 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 493 | extern enum print_line_t print_return_function(struct trace_iterator *iter); | 507 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); |
| 494 | #else | 508 | #else |
| 495 | static inline enum print_line_t | 509 | static inline enum print_line_t |
| 496 | print_return_function(struct trace_iterator *iter) | 510 | print_graph_function(struct trace_iterator *iter) |
| 497 | { | 511 | { |
| 498 | return TRACE_TYPE_UNHANDLED; | 512 | return TRACE_TYPE_UNHANDLED; |
| 499 | } | 513 | } |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c new file mode 100644 index 000000000000..d31d695174aa --- /dev/null +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -0,0 +1,175 @@ | |||
| 1 | /* | ||
| 2 | * | ||
| 3 | * Function graph tracer. | ||
| 4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
| 5 | * Mostly borrowed from function tracer which | ||
| 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | #include <linux/debugfs.h> | ||
| 10 | #include <linux/uaccess.h> | ||
| 11 | #include <linux/ftrace.h> | ||
| 12 | #include <linux/fs.h> | ||
| 13 | |||
| 14 | #include "trace.h" | ||
| 15 | |||
| 16 | #define TRACE_GRAPH_INDENT 2 | ||
| 17 | |||
| 18 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | ||
| 19 | static struct tracer_opt trace_opts[] = { | ||
| 20 | /* Display overruns or not */ | ||
| 21 | { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | ||
| 22 | { } /* Empty entry */ | ||
| 23 | }; | ||
| 24 | |||
| 25 | static struct tracer_flags tracer_flags = { | ||
| 26 | .val = 0, /* Don't display overruns by default */ | ||
| 27 | .opts = trace_opts | ||
| 28 | }; | ||
| 29 | |||
| 30 | /* pid on the last trace processed */ | ||
| 31 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | ||
| 32 | |||
| 33 | static int graph_trace_init(struct trace_array *tr) | ||
| 34 | { | ||
| 35 | int cpu, ret; | ||
| 36 | |||
| 37 | for_each_online_cpu(cpu) | ||
| 38 | tracing_reset(tr, cpu); | ||
| 39 | |||
| 40 | ret = register_ftrace_graph(&trace_graph_return, | ||
| 41 | &trace_graph_entry); | ||
| 42 | if (ret) | ||
| 43 | return ret; | ||
| 44 | tracing_start_cmdline_record(); | ||
| 45 | |||
| 46 | return 0; | ||
| 47 | } | ||
| 48 | |||
| 49 | static void graph_trace_reset(struct trace_array *tr) | ||
| 50 | { | ||
| 51 | tracing_stop_cmdline_record(); | ||
| 52 | unregister_ftrace_graph(); | ||
| 53 | } | ||
| 54 | |||
| 55 | /* If the pid changed since the last trace, output this event */ | ||
| 56 | static int verif_pid(struct trace_seq *s, pid_t pid, int cpu) | ||
| 57 | { | ||
| 58 | char *comm; | ||
| 59 | |||
| 60 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | ||
| 61 | return 1; | ||
| 62 | |||
| 63 | last_pid[cpu] = pid; | ||
| 64 | comm = trace_find_cmdline(pid); | ||
| 65 | |||
| 66 | return trace_seq_printf(s, "\nCPU[%03d]" | ||
| 67 | " ------------8<---------- thread %s-%d" | ||
| 68 | " ------------8<----------\n\n", | ||
| 69 | cpu, comm, pid); | ||
| 70 | } | ||
| 71 | |||
| 72 | static enum print_line_t | ||
| 73 | print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s, | ||
| 74 | struct trace_entry *ent, int cpu) | ||
| 75 | { | ||
| 76 | int i; | ||
| 77 | int ret; | ||
| 78 | |||
| 79 | if (!verif_pid(s, ent->pid, cpu)) | ||
| 80 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 81 | |||
| 82 | ret = trace_seq_printf(s, "CPU[%03d] ", cpu); | ||
| 83 | if (!ret) | ||
| 84 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 85 | |||
| 86 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | ||
| 87 | ret = trace_seq_printf(s, " "); | ||
| 88 | if (!ret) | ||
| 89 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 90 | } | ||
| 91 | |||
| 92 | ret = seq_print_ip_sym(s, call->func, 0); | ||
| 93 | if (!ret) | ||
| 94 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 95 | |||
| 96 | ret = trace_seq_printf(s, "() {\n"); | ||
| 97 | if (!ret) | ||
| 98 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 99 | return TRACE_TYPE_HANDLED; | ||
| 100 | } | ||
| 101 | |||
| 102 | static enum print_line_t | ||
| 103 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | ||
| 104 | struct trace_entry *ent, int cpu) | ||
| 105 | { | ||
| 106 | int i; | ||
| 107 | int ret; | ||
| 108 | |||
| 109 | if (!verif_pid(s, ent->pid, cpu)) | ||
| 110 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 111 | |||
| 112 | ret = trace_seq_printf(s, "CPU[%03d] ", cpu); | ||
| 113 | if (!ret) | ||
| 114 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 115 | |||
| 116 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | ||
| 117 | ret = trace_seq_printf(s, " "); | ||
| 118 | if (!ret) | ||
| 119 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 120 | } | ||
| 121 | |||
| 122 | ret = trace_seq_printf(s, "} "); | ||
| 123 | if (!ret) | ||
| 124 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 125 | |||
| 126 | ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime); | ||
| 127 | if (!ret) | ||
| 128 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 129 | |||
| 130 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | ||
| 131 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | ||
| 132 | trace->overrun); | ||
| 133 | if (!ret) | ||
| 134 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 135 | } | ||
| 136 | return TRACE_TYPE_HANDLED; | ||
| 137 | } | ||
| 138 | |||
| 139 | enum print_line_t | ||
| 140 | print_graph_function(struct trace_iterator *iter) | ||
| 141 | { | ||
| 142 | struct trace_seq *s = &iter->seq; | ||
| 143 | struct trace_entry *entry = iter->ent; | ||
| 144 | |||
| 145 | switch (entry->type) { | ||
| 146 | case TRACE_GRAPH_ENT: { | ||
| 147 | struct ftrace_graph_ent_entry *field; | ||
| 148 | trace_assign_type(field, entry); | ||
| 149 | return print_graph_entry(&field->graph_ent, s, entry, | ||
| 150 | iter->cpu); | ||
| 151 | } | ||
| 152 | case TRACE_GRAPH_RET: { | ||
| 153 | struct ftrace_graph_ret_entry *field; | ||
| 154 | trace_assign_type(field, entry); | ||
| 155 | return print_graph_return(&field->ret, s, entry, iter->cpu); | ||
| 156 | } | ||
| 157 | default: | ||
| 158 | return TRACE_TYPE_UNHANDLED; | ||
| 159 | } | ||
| 160 | } | ||
| 161 | |||
| 162 | static struct tracer graph_trace __read_mostly = { | ||
| 163 | .name = "function-graph", | ||
| 164 | .init = graph_trace_init, | ||
| 165 | .reset = graph_trace_reset, | ||
| 166 | .print_line = print_graph_function, | ||
| 167 | .flags = &tracer_flags, | ||
| 168 | }; | ||
| 169 | |||
| 170 | static __init int init_graph_trace(void) | ||
| 171 | { | ||
| 172 | return register_tracer(&graph_trace); | ||
| 173 | } | ||
| 174 | |||
| 175 | device_initcall(init_graph_trace); | ||
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c deleted file mode 100644 index e00d64509c9c..000000000000 --- a/kernel/trace/trace_functions_return.c +++ /dev/null | |||
| @@ -1,98 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * | ||
| 3 | * Function return tracer. | ||
| 4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
| 5 | * Mostly borrowed from function tracer which | ||
| 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | #include <linux/debugfs.h> | ||
| 10 | #include <linux/uaccess.h> | ||
| 11 | #include <linux/ftrace.h> | ||
| 12 | #include <linux/fs.h> | ||
| 13 | |||
| 14 | #include "trace.h" | ||
| 15 | |||
| 16 | |||
| 17 | #define TRACE_RETURN_PRINT_OVERRUN 0x1 | ||
| 18 | static struct tracer_opt trace_opts[] = { | ||
| 19 | /* Display overruns or not */ | ||
| 20 | { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) }, | ||
| 21 | { } /* Empty entry */ | ||
| 22 | }; | ||
| 23 | |||
| 24 | static struct tracer_flags tracer_flags = { | ||
| 25 | .val = 0, /* Don't display overruns by default */ | ||
| 26 | .opts = trace_opts | ||
| 27 | }; | ||
| 28 | |||
| 29 | |||
| 30 | static int return_trace_init(struct trace_array *tr) | ||
| 31 | { | ||
| 32 | int cpu; | ||
| 33 | for_each_online_cpu(cpu) | ||
| 34 | tracing_reset(tr, cpu); | ||
| 35 | |||
| 36 | return register_ftrace_return(&trace_function_return); | ||
| 37 | } | ||
| 38 | |||
| 39 | static void return_trace_reset(struct trace_array *tr) | ||
| 40 | { | ||
| 41 | unregister_ftrace_return(); | ||
| 42 | } | ||
| 43 | |||
| 44 | |||
| 45 | enum print_line_t | ||
| 46 | print_return_function(struct trace_iterator *iter) | ||
| 47 | { | ||
| 48 | struct trace_seq *s = &iter->seq; | ||
| 49 | struct trace_entry *entry = iter->ent; | ||
| 50 | struct ftrace_ret_entry *field; | ||
| 51 | int ret; | ||
| 52 | |||
| 53 | if (entry->type == TRACE_FN_RET) { | ||
| 54 | trace_assign_type(field, entry); | ||
| 55 | ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip); | ||
| 56 | if (!ret) | ||
| 57 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 58 | |||
| 59 | ret = seq_print_ip_sym(s, field->ip, | ||
| 60 | trace_flags & TRACE_ITER_SYM_MASK); | ||
| 61 | if (!ret) | ||
| 62 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 63 | |||
| 64 | ret = trace_seq_printf(s, " (%llu ns)", | ||
| 65 | field->rettime - field->calltime); | ||
| 66 | if (!ret) | ||
| 67 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 68 | |||
| 69 | if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) { | ||
| 70 | ret = trace_seq_printf(s, " (Overruns: %lu)", | ||
| 71 | field->overrun); | ||
| 72 | if (!ret) | ||
| 73 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 74 | } | ||
| 75 | |||
| 76 | ret = trace_seq_printf(s, "\n"); | ||
| 77 | if (!ret) | ||
| 78 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 79 | |||
| 80 | return TRACE_TYPE_HANDLED; | ||
| 81 | } | ||
| 82 | return TRACE_TYPE_UNHANDLED; | ||
| 83 | } | ||
| 84 | |||
| 85 | static struct tracer return_trace __read_mostly = { | ||
| 86 | .name = "return", | ||
| 87 | .init = return_trace_init, | ||
| 88 | .reset = return_trace_reset, | ||
| 89 | .print_line = print_return_function, | ||
| 90 | .flags = &tracer_flags, | ||
| 91 | }; | ||
| 92 | |||
| 93 | static __init int init_return_trace(void) | ||
| 94 | { | ||
| 95 | return register_tracer(&return_trace); | ||
| 96 | } | ||
| 97 | |||
| 98 | device_initcall(init_return_trace); | ||
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c new file mode 100644 index 000000000000..a7172a352f62 --- /dev/null +++ b/kernel/trace/trace_power.c | |||
| @@ -0,0 +1,179 @@ | |||
| 1 | /* | ||
| 2 | * ring buffer based C-state tracer | ||
| 3 | * | ||
| 4 | * Arjan van de Ven <arjan@linux.intel.com> | ||
| 5 | * Copyright (C) 2008 Intel Corporation | ||
| 6 | * | ||
| 7 | * Much is borrowed from trace_boot.c which is | ||
| 8 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/debugfs.h> | ||
| 14 | #include <linux/ftrace.h> | ||
| 15 | #include <linux/kallsyms.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | |||
| 18 | #include "trace.h" | ||
| 19 | |||
| 20 | static struct trace_array *power_trace; | ||
| 21 | static int __read_mostly trace_power_enabled; | ||
| 22 | |||
| 23 | |||
| 24 | static void start_power_trace(struct trace_array *tr) | ||
| 25 | { | ||
| 26 | trace_power_enabled = 1; | ||
| 27 | } | ||
| 28 | |||
| 29 | static void stop_power_trace(struct trace_array *tr) | ||
| 30 | { | ||
| 31 | trace_power_enabled = 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | |||
| 35 | static int power_trace_init(struct trace_array *tr) | ||
| 36 | { | ||
| 37 | int cpu; | ||
| 38 | power_trace = tr; | ||
| 39 | |||
| 40 | trace_power_enabled = 1; | ||
| 41 | |||
| 42 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
| 43 | tracing_reset(tr, cpu); | ||
| 44 | return 0; | ||
| 45 | } | ||
| 46 | |||
| 47 | static enum print_line_t power_print_line(struct trace_iterator *iter) | ||
| 48 | { | ||
| 49 | int ret = 0; | ||
| 50 | struct trace_entry *entry = iter->ent; | ||
| 51 | struct trace_power *field ; | ||
| 52 | struct power_trace *it; | ||
| 53 | struct trace_seq *s = &iter->seq; | ||
| 54 | struct timespec stamp; | ||
| 55 | struct timespec duration; | ||
| 56 | |||
| 57 | trace_assign_type(field, entry); | ||
| 58 | it = &field->state_data; | ||
| 59 | stamp = ktime_to_timespec(it->stamp); | ||
| 60 | duration = ktime_to_timespec(ktime_sub(it->end, it->stamp)); | ||
| 61 | |||
| 62 | if (entry->type == TRACE_POWER) { | ||
| 63 | if (it->type == POWER_CSTATE) | ||
| 64 | ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n", | ||
| 65 | stamp.tv_sec, | ||
| 66 | stamp.tv_nsec, | ||
| 67 | it->state, iter->cpu, | ||
| 68 | duration.tv_sec, | ||
| 69 | duration.tv_nsec); | ||
| 70 | if (it->type == POWER_PSTATE) | ||
| 71 | ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n", | ||
| 72 | stamp.tv_sec, | ||
| 73 | stamp.tv_nsec, | ||
| 74 | it->state, iter->cpu); | ||
| 75 | if (!ret) | ||
| 76 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 77 | return TRACE_TYPE_HANDLED; | ||
| 78 | } | ||
| 79 | return TRACE_TYPE_UNHANDLED; | ||
| 80 | } | ||
| 81 | |||
| 82 | static struct tracer power_tracer __read_mostly = | ||
| 83 | { | ||
| 84 | .name = "power", | ||
| 85 | .init = power_trace_init, | ||
| 86 | .start = start_power_trace, | ||
| 87 | .stop = stop_power_trace, | ||
| 88 | .reset = stop_power_trace, | ||
| 89 | .print_line = power_print_line, | ||
| 90 | }; | ||
| 91 | |||
| 92 | static int init_power_trace(void) | ||
| 93 | { | ||
| 94 | return register_tracer(&power_tracer); | ||
| 95 | } | ||
| 96 | device_initcall(init_power_trace); | ||
| 97 | |||
| 98 | void trace_power_start(struct power_trace *it, unsigned int type, | ||
| 99 | unsigned int level) | ||
| 100 | { | ||
| 101 | if (!trace_power_enabled) | ||
| 102 | return; | ||
| 103 | |||
| 104 | memset(it, 0, sizeof(struct power_trace)); | ||
| 105 | it->state = level; | ||
| 106 | it->type = type; | ||
| 107 | it->stamp = ktime_get(); | ||
| 108 | } | ||
| 109 | EXPORT_SYMBOL_GPL(trace_power_start); | ||
| 110 | |||
| 111 | |||
| 112 | void trace_power_end(struct power_trace *it) | ||
| 113 | { | ||
| 114 | struct ring_buffer_event *event; | ||
| 115 | struct trace_power *entry; | ||
| 116 | struct trace_array_cpu *data; | ||
| 117 | unsigned long irq_flags; | ||
| 118 | struct trace_array *tr = power_trace; | ||
| 119 | |||
| 120 | if (!trace_power_enabled) | ||
| 121 | return; | ||
| 122 | |||
| 123 | preempt_disable(); | ||
| 124 | it->end = ktime_get(); | ||
| 125 | data = tr->data[smp_processor_id()]; | ||
| 126 | |||
| 127 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
| 128 | &irq_flags); | ||
| 129 | if (!event) | ||
| 130 | goto out; | ||
| 131 | entry = ring_buffer_event_data(event); | ||
| 132 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
| 133 | entry->ent.type = TRACE_POWER; | ||
| 134 | entry->state_data = *it; | ||
| 135 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
| 136 | |||
| 137 | trace_wake_up(); | ||
| 138 | |||
| 139 | out: | ||
| 140 | preempt_enable(); | ||
| 141 | } | ||
| 142 | EXPORT_SYMBOL_GPL(trace_power_end); | ||
| 143 | |||
| 144 | void trace_power_mark(struct power_trace *it, unsigned int type, | ||
| 145 | unsigned int level) | ||
| 146 | { | ||
| 147 | struct ring_buffer_event *event; | ||
| 148 | struct trace_power *entry; | ||
| 149 | struct trace_array_cpu *data; | ||
| 150 | unsigned long irq_flags; | ||
| 151 | struct trace_array *tr = power_trace; | ||
| 152 | |||
| 153 | if (!trace_power_enabled) | ||
| 154 | return; | ||
| 155 | |||
| 156 | memset(it, 0, sizeof(struct power_trace)); | ||
| 157 | it->state = level; | ||
| 158 | it->type = type; | ||
| 159 | it->stamp = ktime_get(); | ||
| 160 | preempt_disable(); | ||
| 161 | it->end = it->stamp; | ||
| 162 | data = tr->data[smp_processor_id()]; | ||
| 163 | |||
| 164 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
| 165 | &irq_flags); | ||
| 166 | if (!event) | ||
| 167 | goto out; | ||
| 168 | entry = ring_buffer_event_data(event); | ||
| 169 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
| 170 | entry->ent.type = TRACE_POWER; | ||
| 171 | entry->state_data = *it; | ||
| 172 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
| 173 | |||
| 174 | trace_wake_up(); | ||
| 175 | |||
| 176 | out: | ||
| 177 | preempt_enable(); | ||
| 178 | } | ||
| 179 | EXPORT_SYMBOL_GPL(trace_power_mark); | ||
diff --git a/mm/bounce.c b/mm/bounce.c index 06722c403058..bf0cf7c8387b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/hash.h> | 14 | #include <linux/hash.h> |
| 15 | #include <linux/highmem.h> | 15 | #include <linux/highmem.h> |
| 16 | #include <linux/blktrace_api.h> | 16 | #include <linux/blktrace_api.h> |
| 17 | #include <trace/block.h> | ||
| 17 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
| 18 | 19 | ||
| 19 | #define POOL_SIZE 64 | 20 | #define POOL_SIZE 64 |
| @@ -21,6 +22,8 @@ | |||
| 21 | 22 | ||
| 22 | static mempool_t *page_pool, *isa_page_pool; | 23 | static mempool_t *page_pool, *isa_page_pool; |
| 23 | 24 | ||
| 25 | DEFINE_TRACE(block_bio_bounce); | ||
| 26 | |||
| 24 | #ifdef CONFIG_HIGHMEM | 27 | #ifdef CONFIG_HIGHMEM |
| 25 | static __init int init_emergency_pool(void) | 28 | static __init int init_emergency_pool(void) |
| 26 | { | 29 | { |
| @@ -222,7 +225,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
| 222 | if (!bio) | 225 | if (!bio) |
| 223 | return; | 226 | return; |
| 224 | 227 | ||
| 225 | blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); | 228 | trace_block_bio_bounce(q, *bio_orig); |
| 226 | 229 | ||
| 227 | /* | 230 | /* |
| 228 | * at least one page was bounced, fill in possible non-highmem | 231 | * at least one page was bounced, fill in possible non-highmem |
diff --git a/scripts/trace/power.pl b/scripts/trace/power.pl new file mode 100644 index 000000000000..4f729b3501e0 --- /dev/null +++ b/scripts/trace/power.pl | |||
| @@ -0,0 +1,108 @@ | |||
| 1 | #!/usr/bin/perl | ||
| 2 | |||
| 3 | # Copyright 2008, Intel Corporation | ||
| 4 | # | ||
| 5 | # This file is part of the Linux kernel | ||
| 6 | # | ||
| 7 | # This program file is free software; you can redistribute it and/or modify it | ||
| 8 | # under the terms of the GNU General Public License as published by the | ||
| 9 | # Free Software Foundation; version 2 of the License. | ||
| 10 | # | ||
| 11 | # This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 12 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 13 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 14 | # for more details. | ||
| 15 | # | ||
| 16 | # You should have received a copy of the GNU General Public License | ||
| 17 | # along with this program in a file named COPYING; if not, write to the | ||
| 18 | # Free Software Foundation, Inc., | ||
| 19 | # 51 Franklin Street, Fifth Floor, | ||
| 20 | # Boston, MA 02110-1301 USA | ||
| 21 | # | ||
| 22 | # Authors: | ||
| 23 | # Arjan van de Ven <arjan@linux.intel.com> | ||
| 24 | |||
| 25 | |||
| 26 | # | ||
| 27 | # This script turns a cstate ftrace output into a SVG graphic that shows | ||
| 28 | # historic C-state information | ||
| 29 | # | ||
| 30 | # | ||
| 31 | # cat /sys/kernel/debug/tracing/trace | perl power.pl > out.svg | ||
| 32 | # | ||
| 33 | |||
| 34 | my @styles; | ||
| 35 | my $base = 0; | ||
| 36 | |||
| 37 | my @pstate_last; | ||
| 38 | my @pstate_level; | ||
| 39 | |||
| 40 | $styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 41 | $styles[1] = "fill:rgb(0,255,0);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 42 | $styles[2] = "fill:rgb(255,0,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 43 | $styles[3] = "fill:rgb(255,255,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 44 | $styles[4] = "fill:rgb(255,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 45 | $styles[5] = "fill:rgb(0,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 46 | $styles[6] = "fill:rgb(0,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 47 | $styles[7] = "fill:rgb(0,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 48 | $styles[8] = "fill:rgb(0,25,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; | ||
| 49 | |||
| 50 | |||
| 51 | print "<?xml version=\"1.0\" standalone=\"no\"?> \n"; | ||
| 52 | print "<svg width=\"10000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n"; | ||
| 53 | |||
| 54 | my $scale = 30000.0; | ||
| 55 | while (<>) { | ||
| 56 | my $line = $_; | ||
| 57 | if ($line =~ /([0-9\.]+)\] CSTATE: Going to C([0-9]) on cpu ([0-9]+) for ([0-9\.]+)/) { | ||
| 58 | if ($base == 0) { | ||
| 59 | $base = $1; | ||
| 60 | } | ||
| 61 | my $time = $1 - $base; | ||
| 62 | $time = $time * $scale; | ||
| 63 | my $C = $2; | ||
| 64 | my $cpu = $3; | ||
| 65 | my $y = 400 * $cpu; | ||
| 66 | my $duration = $4 * $scale; | ||
| 67 | my $msec = int($4 * 100000)/100.0; | ||
| 68 | my $height = $C * 20; | ||
| 69 | $style = $styles[$C]; | ||
| 70 | |||
| 71 | $y = $y + 140 - $height; | ||
| 72 | |||
| 73 | $x2 = $time + 4; | ||
| 74 | $y2 = $y + 4; | ||
| 75 | |||
| 76 | |||
| 77 | print "<rect x=\"$time\" width=\"$duration\" y=\"$y\" height=\"$height\" style=\"$style\"/>\n"; | ||
| 78 | print "<text transform=\"translate($x2,$y2) rotate(90)\">C$C $msec</text>\n"; | ||
| 79 | } | ||
| 80 | if ($line =~ /([0-9\.]+)\] PSTATE: Going to P([0-9]) on cpu ([0-9]+)/) { | ||
| 81 | my $time = $1 - $base; | ||
| 82 | my $state = $2; | ||
| 83 | my $cpu = $3; | ||
| 84 | |||
| 85 | if (defined($pstate_last[$cpu])) { | ||
| 86 | my $from = $pstate_last[$cpu]; | ||
| 87 | my $oldstate = $pstate_state[$cpu]; | ||
| 88 | my $duration = ($time-$from) * $scale; | ||
| 89 | |||
| 90 | $from = $from * $scale; | ||
| 91 | my $to = $from + $duration; | ||
| 92 | my $height = 140 - ($oldstate * (140/8)); | ||
| 93 | |||
| 94 | my $y = 400 * $cpu + 200 + $height; | ||
| 95 | my $y2 = $y+4; | ||
| 96 | my $style = $styles[8]; | ||
| 97 | |||
| 98 | print "<rect x=\"$from\" y=\"$y\" width=\"$duration\" height=\"5\" style=\"$style\"/>\n"; | ||
| 99 | print "<text transform=\"translate($from,$y2)\">P$oldstate (cpu $cpu)</text>\n"; | ||
| 100 | }; | ||
| 101 | |||
| 102 | $pstate_last[$cpu] = $time; | ||
| 103 | $pstate_state[$cpu] = $state; | ||
| 104 | } | ||
| 105 | } | ||
| 106 | |||
| 107 | |||
| 108 | print "</svg>\n"; | ||
