diff options
| -rw-r--r-- | kernel/trace/Kconfig | 7 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 5 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 65 | ||||
| -rw-r--r-- | kernel/trace/trace_boot.c | 11 |
4 files changed, 70 insertions, 18 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index dde1d46f77e5..28f2644484d9 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -164,9 +164,8 @@ config BOOT_TRACER | |||
| 164 | representation of the delays during initcalls - but the raw | 164 | representation of the delays during initcalls - but the raw |
| 165 | /debug/tracing/trace text output is readable too. | 165 | /debug/tracing/trace text output is readable too. |
| 166 | 166 | ||
| 167 | ( Note that tracing self tests can't be enabled if this tracer is | 167 | You must pass in ftrace=initcall to the kernel command line |
| 168 | selected, because the self-tests are an initcall as well and that | 168 | to enable this on bootup. |
| 169 | would invalidate the boot trace. ) | ||
| 170 | 169 | ||
| 171 | config TRACE_BRANCH_PROFILING | 170 | config TRACE_BRANCH_PROFILING |
| 172 | bool "Trace likely/unlikely profiler" | 171 | bool "Trace likely/unlikely profiler" |
| @@ -326,7 +325,7 @@ config FTRACE_SELFTEST | |||
| 326 | 325 | ||
| 327 | config FTRACE_STARTUP_TEST | 326 | config FTRACE_STARTUP_TEST |
| 328 | bool "Perform a startup test on ftrace" | 327 | bool "Perform a startup test on ftrace" |
| 329 | depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER | 328 | depends on TRACING && DEBUG_KERNEL |
| 330 | select FTRACE_SELFTEST | 329 | select FTRACE_SELFTEST |
| 331 | help | 330 | help |
| 332 | This option performs a series of startup tests on ftrace. On bootup | 331 | This option performs a series of startup tests on ftrace. On bootup |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7e9a20b69939..68610031780b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -1729,9 +1729,12 @@ static void clear_ftrace_pid(struct pid *pid) | |||
| 1729 | { | 1729 | { |
| 1730 | struct task_struct *p; | 1730 | struct task_struct *p; |
| 1731 | 1731 | ||
| 1732 | rcu_read_lock(); | ||
| 1732 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1733 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
| 1733 | clear_tsk_trace_trace(p); | 1734 | clear_tsk_trace_trace(p); |
| 1734 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1735 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
| 1736 | rcu_read_unlock(); | ||
| 1737 | |||
| 1735 | put_pid(pid); | 1738 | put_pid(pid); |
| 1736 | } | 1739 | } |
| 1737 | 1740 | ||
| @@ -1739,9 +1742,11 @@ static void set_ftrace_pid(struct pid *pid) | |||
| 1739 | { | 1742 | { |
| 1740 | struct task_struct *p; | 1743 | struct task_struct *p; |
| 1741 | 1744 | ||
| 1745 | rcu_read_lock(); | ||
| 1742 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1746 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
| 1743 | set_tsk_trace_trace(p); | 1747 | set_tsk_trace_trace(p); |
| 1744 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1748 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
| 1749 | rcu_read_unlock(); | ||
| 1745 | } | 1750 | } |
| 1746 | 1751 | ||
| 1747 | static void clear_ftrace_pid_task(struct pid **pid) | 1752 | static void clear_ftrace_pid_task(struct pid **pid) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 152d0969adf8..bbdfaa2cbdb9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -53,6 +53,11 @@ unsigned long __read_mostly tracing_thresh; | |||
| 53 | */ | 53 | */ |
| 54 | static bool __read_mostly tracing_selftest_running; | 54 | static bool __read_mostly tracing_selftest_running; |
| 55 | 55 | ||
| 56 | /* | ||
| 57 | * If a tracer is running, we do not want to run SELFTEST. | ||
| 58 | */ | ||
| 59 | static bool __read_mostly tracing_selftest_disabled; | ||
| 60 | |||
| 56 | /* For tracers that don't implement custom flags */ | 61 | /* For tracers that don't implement custom flags */ |
| 57 | static struct tracer_opt dummy_tracer_opt[] = { | 62 | static struct tracer_opt dummy_tracer_opt[] = { |
| 58 | { } | 63 | { } |
| @@ -110,14 +115,19 @@ static cpumask_var_t __read_mostly tracing_buffer_mask; | |||
| 110 | */ | 115 | */ |
| 111 | int ftrace_dump_on_oops; | 116 | int ftrace_dump_on_oops; |
| 112 | 117 | ||
| 113 | static int tracing_set_tracer(char *buf); | 118 | static int tracing_set_tracer(const char *buf); |
| 119 | |||
| 120 | #define BOOTUP_TRACER_SIZE 100 | ||
| 121 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | ||
| 122 | static char *default_bootup_tracer; | ||
| 114 | 123 | ||
| 115 | static int __init set_ftrace(char *str) | 124 | static int __init set_ftrace(char *str) |
| 116 | { | 125 | { |
| 117 | tracing_set_tracer(str); | 126 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); |
| 127 | default_bootup_tracer = bootup_tracer_buf; | ||
| 118 | return 1; | 128 | return 1; |
| 119 | } | 129 | } |
| 120 | __setup("ftrace", set_ftrace); | 130 | __setup("ftrace=", set_ftrace); |
| 121 | 131 | ||
| 122 | static int __init set_ftrace_dump_on_oops(char *str) | 132 | static int __init set_ftrace_dump_on_oops(char *str) |
| 123 | { | 133 | { |
| @@ -469,7 +479,7 @@ int register_tracer(struct tracer *type) | |||
| 469 | type->flags->opts = dummy_tracer_opt; | 479 | type->flags->opts = dummy_tracer_opt; |
| 470 | 480 | ||
| 471 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 481 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 472 | if (type->selftest) { | 482 | if (type->selftest && !tracing_selftest_disabled) { |
| 473 | struct tracer *saved_tracer = current_trace; | 483 | struct tracer *saved_tracer = current_trace; |
| 474 | struct trace_array *tr = &global_trace; | 484 | struct trace_array *tr = &global_trace; |
| 475 | int i; | 485 | int i; |
| @@ -511,8 +521,25 @@ int register_tracer(struct tracer *type) | |||
| 511 | out: | 521 | out: |
| 512 | tracing_selftest_running = false; | 522 | tracing_selftest_running = false; |
| 513 | mutex_unlock(&trace_types_lock); | 523 | mutex_unlock(&trace_types_lock); |
| 514 | lock_kernel(); | ||
| 515 | 524 | ||
| 525 | if (!ret && default_bootup_tracer) { | ||
| 526 | if (!strncmp(default_bootup_tracer, type->name, | ||
| 527 | BOOTUP_TRACER_SIZE)) { | ||
| 528 | printk(KERN_INFO "Starting tracer '%s'\n", | ||
| 529 | type->name); | ||
| 530 | /* Do we want this tracer to start on bootup? */ | ||
| 531 | tracing_set_tracer(type->name); | ||
| 532 | default_bootup_tracer = NULL; | ||
| 533 | /* disable other selftests, since this will break it. */ | ||
| 534 | tracing_selftest_disabled = 1; | ||
| 535 | #ifdef CONFIG_FTRACE_STARTUP_TEST | ||
| 536 | printk(KERN_INFO "Disabling FTRACE selftests due" | ||
| 537 | " to running tracer '%s'\n", type->name); | ||
| 538 | #endif | ||
| 539 | } | ||
| 540 | } | ||
| 541 | |||
| 542 | lock_kernel(); | ||
| 516 | return ret; | 543 | return ret; |
| 517 | } | 544 | } |
| 518 | 545 | ||
| @@ -2166,7 +2193,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
| 2166 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2193 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
| 2167 | } | 2194 | } |
| 2168 | 2195 | ||
| 2169 | static int tracing_set_tracer(char *buf) | 2196 | static int tracing_set_tracer(const char *buf) |
| 2170 | { | 2197 | { |
| 2171 | struct trace_array *tr = &global_trace; | 2198 | struct trace_array *tr = &global_trace; |
| 2172 | struct tracer *t; | 2199 | struct tracer *t; |
| @@ -3061,12 +3088,9 @@ __init static int tracer_alloc_buffers(void) | |||
| 3061 | trace_init_cmdlines(); | 3088 | trace_init_cmdlines(); |
| 3062 | 3089 | ||
| 3063 | register_tracer(&nop_trace); | 3090 | register_tracer(&nop_trace); |
| 3091 | current_trace = &nop_trace; | ||
| 3064 | #ifdef CONFIG_BOOT_TRACER | 3092 | #ifdef CONFIG_BOOT_TRACER |
| 3065 | register_tracer(&boot_tracer); | 3093 | register_tracer(&boot_tracer); |
| 3066 | current_trace = &boot_tracer; | ||
| 3067 | current_trace->init(&global_trace); | ||
| 3068 | #else | ||
| 3069 | current_trace = &nop_trace; | ||
| 3070 | #endif | 3094 | #endif |
| 3071 | /* All seems OK, enable tracing */ | 3095 | /* All seems OK, enable tracing */ |
| 3072 | tracing_disabled = 0; | 3096 | tracing_disabled = 0; |
| @@ -3084,5 +3108,26 @@ out_free_buffer_mask: | |||
| 3084 | out: | 3108 | out: |
| 3085 | return ret; | 3109 | return ret; |
| 3086 | } | 3110 | } |
| 3111 | |||
| 3112 | __init static int clear_boot_tracer(void) | ||
| 3113 | { | ||
| 3114 | /* | ||
| 3115 | * The default tracer at boot buffer is an init section. | ||
| 3116 | * This function is called in lateinit. If we did not | ||
| 3117 | * find the boot tracer, then clear it out, to prevent | ||
| 3118 | * later registration from accessing the buffer that is | ||
| 3119 | * about to be freed. | ||
| 3120 | */ | ||
| 3121 | if (!default_bootup_tracer) | ||
| 3122 | return 0; | ||
| 3123 | |||
| 3124 | printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | ||
| 3125 | default_bootup_tracer); | ||
| 3126 | default_bootup_tracer = NULL; | ||
| 3127 | |||
| 3128 | return 0; | ||
| 3129 | } | ||
| 3130 | |||
| 3087 | early_initcall(tracer_alloc_buffers); | 3131 | early_initcall(tracer_alloc_buffers); |
| 3088 | fs_initcall(tracer_init_debugfs); | 3132 | fs_initcall(tracer_init_debugfs); |
| 3133 | late_initcall(clear_boot_tracer); | ||
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 0e94b3d091f7..1f07895977a0 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
| @@ -28,13 +28,13 @@ void start_boot_trace(void) | |||
| 28 | 28 | ||
| 29 | void enable_boot_trace(void) | 29 | void enable_boot_trace(void) |
| 30 | { | 30 | { |
| 31 | if (pre_initcalls_finished) | 31 | if (boot_trace && pre_initcalls_finished) |
| 32 | tracing_start_sched_switch_record(); | 32 | tracing_start_sched_switch_record(); |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | void disable_boot_trace(void) | 35 | void disable_boot_trace(void) |
| 36 | { | 36 | { |
| 37 | if (pre_initcalls_finished) | 37 | if (boot_trace && pre_initcalls_finished) |
| 38 | tracing_stop_sched_switch_record(); | 38 | tracing_stop_sched_switch_record(); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| @@ -43,6 +43,9 @@ static int boot_trace_init(struct trace_array *tr) | |||
| 43 | int cpu; | 43 | int cpu; |
| 44 | boot_trace = tr; | 44 | boot_trace = tr; |
| 45 | 45 | ||
| 46 | if (!tr) | ||
| 47 | return 0; | ||
| 48 | |||
| 46 | for_each_cpu(cpu, cpu_possible_mask) | 49 | for_each_cpu(cpu, cpu_possible_mask) |
| 47 | tracing_reset(tr, cpu); | 50 | tracing_reset(tr, cpu); |
| 48 | 51 | ||
| @@ -132,7 +135,7 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
| 132 | unsigned long irq_flags; | 135 | unsigned long irq_flags; |
| 133 | struct trace_array *tr = boot_trace; | 136 | struct trace_array *tr = boot_trace; |
| 134 | 137 | ||
| 135 | if (!pre_initcalls_finished) | 138 | if (!tr || !pre_initcalls_finished) |
| 136 | return; | 139 | return; |
| 137 | 140 | ||
| 138 | /* Get its name now since this function could | 141 | /* Get its name now since this function could |
| @@ -164,7 +167,7 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
| 164 | unsigned long irq_flags; | 167 | unsigned long irq_flags; |
| 165 | struct trace_array *tr = boot_trace; | 168 | struct trace_array *tr = boot_trace; |
| 166 | 169 | ||
| 167 | if (!pre_initcalls_finished) | 170 | if (!tr || !pre_initcalls_finished) |
| 168 | return; | 171 | return; |
| 169 | 172 | ||
| 170 | sprint_symbol(bt->func, (unsigned long)fn); | 173 | sprint_symbol(bt->func, (unsigned long)fn); |
