diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2013-03-12 11:17:54 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:36:00 -0400 |
commit | 3209cff4490bee55fd2bc1d087cb8ecf2a686a88 (patch) | |
tree | 0fd63cd5faa888c44b5d2a20ffd5048fcbd8dccf /kernel/trace/trace.c | |
parent | e1df4cb682ab2c3c2981c8efa4aec044e61f4e06 (diff) |
tracing: Add alloc/free_snapshot() to replace duplicate code
Add alloc_snapshot() and free_snapshot() to allocate and free the
snapshot buffer respectively, and use these to remove duplicate
code.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 79 |
1 files changed, 42 insertions, 37 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5c53e4092269..906049c0af90 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -149,14 +149,14 @@ static int __init set_ftrace_dump_on_oops(char *str) | |||
149 | } | 149 | } |
150 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 150 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
151 | 151 | ||
152 | static int __init alloc_snapshot(char *str) | 152 | static int __init boot_alloc_snapshot(char *str) |
153 | { | 153 | { |
154 | allocate_snapshot = true; | 154 | allocate_snapshot = true; |
155 | /* We also need the main ring buffer expanded */ | 155 | /* We also need the main ring buffer expanded */ |
156 | ring_buffer_expanded = true; | 156 | ring_buffer_expanded = true; |
157 | return 1; | 157 | return 1; |
158 | } | 158 | } |
159 | __setup("alloc_snapshot", alloc_snapshot); | 159 | __setup("alloc_snapshot", boot_alloc_snapshot); |
160 | 160 | ||
161 | 161 | ||
162 | static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; | 162 | static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; |
@@ -470,6 +470,38 @@ EXPORT_SYMBOL_GPL(tracing_snapshot); | |||
470 | 470 | ||
471 | static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, | 471 | static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, |
472 | struct trace_buffer *size_buf, int cpu_id); | 472 | struct trace_buffer *size_buf, int cpu_id); |
473 | static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); | ||
474 | |||
475 | static int alloc_snapshot(struct trace_array *tr) | ||
476 | { | ||
477 | int ret; | ||
478 | |||
479 | if (!tr->allocated_snapshot) { | ||
480 | |||
481 | /* allocate spare buffer */ | ||
482 | ret = resize_buffer_duplicate_size(&tr->max_buffer, | ||
483 | &tr->trace_buffer, RING_BUFFER_ALL_CPUS); | ||
484 | if (ret < 0) | ||
485 | return ret; | ||
486 | |||
487 | tr->allocated_snapshot = true; | ||
488 | } | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | void free_snapshot(struct trace_array *tr) | ||
494 | { | ||
495 | /* | ||
496 | * We don't free the ring buffer. instead, resize it because | ||
497 | * The max_tr ring buffer has some state (e.g. ring->clock) and | ||
498 | * we want preserve it. | ||
499 | */ | ||
500 | ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); | ||
501 | set_buffer_entries(&tr->max_buffer, 1); | ||
502 | tracing_reset_online_cpus(&tr->max_buffer); | ||
503 | tr->allocated_snapshot = false; | ||
504 | } | ||
473 | 505 | ||
474 | /** | 506 | /** |
475 | * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. | 507 | * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. |
@@ -487,16 +519,9 @@ void tracing_snapshot_alloc(void) | |||
487 | struct trace_array *tr = &global_trace; | 519 | struct trace_array *tr = &global_trace; |
488 | int ret; | 520 | int ret; |
489 | 521 | ||
490 | if (!tr->allocated_snapshot) { | 522 | ret = alloc_snapshot(tr); |
491 | 523 | if (WARN_ON(ret < 0)) | |
492 | /* allocate spare buffer */ | 524 | return; |
493 | ret = resize_buffer_duplicate_size(&tr->max_buffer, | ||
494 | &tr->trace_buffer, RING_BUFFER_ALL_CPUS); | ||
495 | if (WARN_ON(ret < 0)) | ||
496 | return; | ||
497 | |||
498 | tr->allocated_snapshot = true; | ||
499 | } | ||
500 | 525 | ||
501 | tracing_snapshot(); | 526 | tracing_snapshot(); |
502 | } | 527 | } |
@@ -3581,15 +3606,7 @@ static int tracing_set_tracer(const char *buf) | |||
3581 | * so a synchronized_sched() is sufficient. | 3606 | * so a synchronized_sched() is sufficient. |
3582 | */ | 3607 | */ |
3583 | synchronize_sched(); | 3608 | synchronize_sched(); |
3584 | /* | 3609 | free_snapshot(tr); |
3585 | * We don't free the ring buffer. instead, resize it because | ||
3586 | * The max_tr ring buffer has some state (e.g. ring->clock) and | ||
3587 | * we want preserve it. | ||
3588 | */ | ||
3589 | ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); | ||
3590 | set_buffer_entries(&tr->max_buffer, 1); | ||
3591 | tracing_reset_online_cpus(&tr->max_buffer); | ||
3592 | tr->allocated_snapshot = false; | ||
3593 | } | 3610 | } |
3594 | #endif | 3611 | #endif |
3595 | destroy_trace_option_files(topts); | 3612 | destroy_trace_option_files(topts); |
@@ -3598,12 +3615,9 @@ static int tracing_set_tracer(const char *buf) | |||
3598 | 3615 | ||
3599 | #ifdef CONFIG_TRACER_MAX_TRACE | 3616 | #ifdef CONFIG_TRACER_MAX_TRACE |
3600 | if (t->use_max_tr && !had_max_tr) { | 3617 | if (t->use_max_tr && !had_max_tr) { |
3601 | /* we need to make per cpu buffer sizes equivalent */ | 3618 | ret = alloc_snapshot(tr); |
3602 | ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer, | ||
3603 | RING_BUFFER_ALL_CPUS); | ||
3604 | if (ret < 0) | 3619 | if (ret < 0) |
3605 | goto out; | 3620 | goto out; |
3606 | tr->allocated_snapshot = true; | ||
3607 | } | 3621 | } |
3608 | #endif | 3622 | #endif |
3609 | 3623 | ||
@@ -4475,14 +4489,8 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4475 | ret = -EINVAL; | 4489 | ret = -EINVAL; |
4476 | break; | 4490 | break; |
4477 | } | 4491 | } |
4478 | if (tr->allocated_snapshot) { | 4492 | if (tr->allocated_snapshot) |
4479 | /* free spare buffer */ | 4493 | free_snapshot(tr); |
4480 | ring_buffer_resize(tr->max_buffer.buffer, 1, | ||
4481 | RING_BUFFER_ALL_CPUS); | ||
4482 | set_buffer_entries(&tr->max_buffer, 1); | ||
4483 | tracing_reset_online_cpus(&tr->max_buffer); | ||
4484 | tr->allocated_snapshot = false; | ||
4485 | } | ||
4486 | break; | 4494 | break; |
4487 | case 1: | 4495 | case 1: |
4488 | /* Only allow per-cpu swap if the ring buffer supports it */ | 4496 | /* Only allow per-cpu swap if the ring buffer supports it */ |
@@ -4493,12 +4501,9 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4493 | } | 4501 | } |
4494 | #endif | 4502 | #endif |
4495 | if (!tr->allocated_snapshot) { | 4503 | if (!tr->allocated_snapshot) { |
4496 | /* allocate spare buffer */ | 4504 | ret = alloc_snapshot(tr); |
4497 | ret = resize_buffer_duplicate_size(&tr->max_buffer, | ||
4498 | &tr->trace_buffer, RING_BUFFER_ALL_CPUS); | ||
4499 | if (ret < 0) | 4505 | if (ret < 0) |
4500 | break; | 4506 | break; |
4501 | tr->allocated_snapshot = true; | ||
4502 | } | 4507 | } |
4503 | local_irq_disable(); | 4508 | local_irq_disable(); |
4504 | /* Now, we're going to swap */ | 4509 | /* Now, we're going to swap */ |