diff options
author | Steven Rostedt (Red Hat) <srostedt@redhat.com> | 2013-03-07 22:48:09 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:35:53 -0400 |
commit | 55034cd6e648155393b0d665eef76b38d49ad6bf (patch) | |
tree | 3d51bc6a1f8c7bdf7f728113b3853f4c20441be9 /kernel/trace/trace.c | |
parent | f4e781c0a89d5810729772290441ac7d61f321ec (diff) |
tracing: Add alloc_snapshot kernel command line parameter
If debugging the kernel, and the developer wants to use
tracing_snapshot() in places where tracing_snapshot_alloc() may
be difficult (or more likely, the developer is lazy and doesn't
want to bother with tracing_snapshot_alloc() at all), then adding
alloc_snapshot
to the kernel command line parameter will tell ftrace to allocate
the snapshot buffer (if configured) when it allocates the main
tracing buffer.
I also noticed that ring_buffer_expanded and tracing_selftest_disabled
had inconsistent use of boolean "true" and "false" with "0" and "1".
I cleaned that up too.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 81 |
1 files changed, 48 insertions, 33 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 57b4220d96a9..4021a5e66412 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -47,7 +47,7 @@ | |||
47 | * On boot up, the ring buffer is set to the minimum size, so that | 47 | * On boot up, the ring buffer is set to the minimum size, so that |
48 | * we do not waste memory on systems that are not using tracing. | 48 | * we do not waste memory on systems that are not using tracing. |
49 | */ | 49 | */ |
50 | int ring_buffer_expanded; | 50 | bool ring_buffer_expanded; |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * We need to change this state when a selftest is running. | 53 | * We need to change this state when a selftest is running. |
@@ -121,12 +121,14 @@ static int tracing_set_tracer(const char *buf); | |||
121 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 121 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
122 | static char *default_bootup_tracer; | 122 | static char *default_bootup_tracer; |
123 | 123 | ||
124 | static bool allocate_snapshot; | ||
125 | |||
124 | static int __init set_cmdline_ftrace(char *str) | 126 | static int __init set_cmdline_ftrace(char *str) |
125 | { | 127 | { |
126 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 128 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
127 | default_bootup_tracer = bootup_tracer_buf; | 129 | default_bootup_tracer = bootup_tracer_buf; |
128 | /* We are using ftrace early, expand it */ | 130 | /* We are using ftrace early, expand it */ |
129 | ring_buffer_expanded = 1; | 131 | ring_buffer_expanded = true; |
130 | return 1; | 132 | return 1; |
131 | } | 133 | } |
132 | __setup("ftrace=", set_cmdline_ftrace); | 134 | __setup("ftrace=", set_cmdline_ftrace); |
@@ -147,6 +149,15 @@ static int __init set_ftrace_dump_on_oops(char *str) | |||
147 | } | 149 | } |
148 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 150 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
149 | 151 | ||
152 | static int __init alloc_snapshot(char *str) | ||
153 | { | ||
154 | allocate_snapshot = true; | ||
155 | /* We also need the main ring buffer expanded */ | ||
156 | ring_buffer_expanded = true; | ||
157 | return 1; | ||
158 | } | ||
159 | __setup("alloc_snapshot", alloc_snapshot); | ||
160 | |||
150 | 161 | ||
151 | static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; | 162 | static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; |
152 | static char *trace_boot_options __initdata; | 163 | static char *trace_boot_options __initdata; |
@@ -951,7 +962,7 @@ int register_tracer(struct tracer *type) | |||
951 | tracing_set_tracer(type->name); | 962 | tracing_set_tracer(type->name); |
952 | default_bootup_tracer = NULL; | 963 | default_bootup_tracer = NULL; |
953 | /* disable other selftests, since this will break it. */ | 964 | /* disable other selftests, since this will break it. */ |
954 | tracing_selftest_disabled = 1; | 965 | tracing_selftest_disabled = true; |
955 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 966 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
956 | printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", | 967 | printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", |
957 | type->name); | 968 | type->name); |
@@ -3318,7 +3329,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, | |||
3318 | * we use the size that was given, and we can forget about | 3329 | * we use the size that was given, and we can forget about |
3319 | * expanding it later. | 3330 | * expanding it later. |
3320 | */ | 3331 | */ |
3321 | ring_buffer_expanded = 1; | 3332 | ring_buffer_expanded = true; |
3322 | 3333 | ||
3323 | /* May be called before buffers are initialized */ | 3334 | /* May be called before buffers are initialized */ |
3324 | if (!tr->trace_buffer.buffer) | 3335 | if (!tr->trace_buffer.buffer) |
@@ -5396,53 +5407,57 @@ static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf) | |||
5396 | } | 5407 | } |
5397 | } | 5408 | } |
5398 | 5409 | ||
5399 | static int allocate_trace_buffers(struct trace_array *tr, int size) | 5410 | static int |
5411 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) | ||
5400 | { | 5412 | { |
5401 | enum ring_buffer_flags rb_flags; | 5413 | enum ring_buffer_flags rb_flags; |
5402 | 5414 | ||
5403 | rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; | 5415 | rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; |
5404 | 5416 | ||
5405 | tr->trace_buffer.buffer = ring_buffer_alloc(size, rb_flags); | 5417 | buf->buffer = ring_buffer_alloc(size, rb_flags); |
5406 | if (!tr->trace_buffer.buffer) | 5418 | if (!buf->buffer) |
5407 | goto out_free; | 5419 | return -ENOMEM; |
5408 | 5420 | ||
5409 | tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu); | 5421 | buf->data = alloc_percpu(struct trace_array_cpu); |
5410 | if (!tr->trace_buffer.data) | 5422 | if (!buf->data) { |
5411 | goto out_free; | 5423 | ring_buffer_free(buf->buffer); |
5424 | return -ENOMEM; | ||
5425 | } | ||
5412 | 5426 | ||
5413 | init_trace_buffers(tr, &tr->trace_buffer); | 5427 | init_trace_buffers(tr, buf); |
5414 | 5428 | ||
5415 | /* Allocate the first page for all buffers */ | 5429 | /* Allocate the first page for all buffers */ |
5416 | set_buffer_entries(&tr->trace_buffer, | 5430 | set_buffer_entries(&tr->trace_buffer, |
5417 | ring_buffer_size(tr->trace_buffer.buffer, 0)); | 5431 | ring_buffer_size(tr->trace_buffer.buffer, 0)); |
5418 | 5432 | ||
5419 | #ifdef CONFIG_TRACER_MAX_TRACE | 5433 | return 0; |
5420 | 5434 | } | |
5421 | tr->max_buffer.buffer = ring_buffer_alloc(1, rb_flags); | ||
5422 | if (!tr->max_buffer.buffer) | ||
5423 | goto out_free; | ||
5424 | |||
5425 | tr->max_buffer.data = alloc_percpu(struct trace_array_cpu); | ||
5426 | if (!tr->max_buffer.data) | ||
5427 | goto out_free; | ||
5428 | 5435 | ||
5429 | init_trace_buffers(tr, &tr->max_buffer); | 5436 | static int allocate_trace_buffers(struct trace_array *tr, int size) |
5437 | { | ||
5438 | int ret; | ||
5430 | 5439 | ||
5431 | set_buffer_entries(&tr->max_buffer, 1); | 5440 | ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); |
5432 | #endif | 5441 | if (ret) |
5433 | return 0; | 5442 | return ret; |
5434 | 5443 | ||
5435 | out_free: | 5444 | #ifdef CONFIG_TRACER_MAX_TRACE |
5436 | if (tr->trace_buffer.buffer) | 5445 | ret = allocate_trace_buffer(tr, &tr->max_buffer, |
5446 | allocate_snapshot ? size : 1); | ||
5447 | if (WARN_ON(ret)) { | ||
5437 | ring_buffer_free(tr->trace_buffer.buffer); | 5448 | ring_buffer_free(tr->trace_buffer.buffer); |
5438 | free_percpu(tr->trace_buffer.data); | 5449 | free_percpu(tr->trace_buffer.data); |
5450 | return -ENOMEM; | ||
5451 | } | ||
5452 | tr->allocated_snapshot = allocate_snapshot; | ||
5439 | 5453 | ||
5440 | #ifdef CONFIG_TRACER_MAX_TRACE | 5454 | /* |
5441 | if (tr->max_buffer.buffer) | 5455 | * Only the top level trace array gets its snapshot allocated |
5442 | ring_buffer_free(tr->max_buffer.buffer); | 5456 | * from the kernel command line. |
5443 | free_percpu(tr->max_buffer.data); | 5457 | */ |
5458 | allocate_snapshot = false; | ||
5444 | #endif | 5459 | #endif |
5445 | return -ENOMEM; | 5460 | return 0; |
5446 | } | 5461 | } |
5447 | 5462 | ||
5448 | static int new_instance_create(const char *name) | 5463 | static int new_instance_create(const char *name) |