aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-07-01 01:34:35 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-07-21 10:20:17 -0400
commitef710e100c1068d3dd5774d2b34c5485219e06ce (patch)
treeed295053a31de472d4ed4338679c00ac8e8437c7 /kernel/trace/trace.c
parentbc289ae98b75d93228d24f521ef02a076e506e94 (diff)
tracing: Shrink max latency ringbuffer if unnecessary
Documentation/trace/ftrace.txt says buffer_size_kb: This sets or displays the number of kilobytes each CPU buffer can hold. The tracer buffers are the same size for each CPU. The displayed number is the size of the CPU buffer and not total size of all buffers. The trace buffers are allocated in pages (blocks of memory that the kernel uses for allocation, usually 4 KB in size). If the last page allocated has room for more bytes than requested, the rest of the page will be used, making the actual allocation bigger than requested. ( Note, the size may not be a multiple of the page size due to buffer management overhead. ) This can only be updated when the current_tracer is set to "nop". But it's incorrect. currently total memory consumption is 'buffer_size_kb x CPUs x 2'. Why two times difference is there? because ftrace implicitly allocate the buffer for max latency too. That makes sad result when admin want to use large buffer. (If admin want full logging and makes detail analysis). example, If admin have 24 CPUs machine and write 200MB to buffer_size_kb, the system consume ~10GB memory (200MB x 24 x 2). umm.. 5GB memory waste is usually unacceptable. Fortunatelly, almost all users don't use max latency feature. The max latency buffer can be disabled easily. This patch shrink buffer size of the max latency buffer if unnecessary. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> LKML-Reference: <20100701104554.DA2D.A69D9226@jp.fujitsu.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c38
1 files changed, 32 insertions, 6 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index af9042977c08..f7488f44d26b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -660,6 +660,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
660 return; 660 return;
661 661
662 WARN_ON_ONCE(!irqs_disabled()); 662 WARN_ON_ONCE(!irqs_disabled());
663 if (!current_trace->use_max_tr) {
664 WARN_ON_ONCE(1);
665 return;
666 }
663 arch_spin_lock(&ftrace_max_lock); 667 arch_spin_lock(&ftrace_max_lock);
664 668
665 tr->buffer = max_tr.buffer; 669 tr->buffer = max_tr.buffer;
@@ -686,6 +690,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
686 return; 690 return;
687 691
688 WARN_ON_ONCE(!irqs_disabled()); 692 WARN_ON_ONCE(!irqs_disabled());
693 if (!current_trace->use_max_tr) {
694 WARN_ON_ONCE(1);
695 return;
696 }
697
689 arch_spin_lock(&ftrace_max_lock); 698 arch_spin_lock(&ftrace_max_lock);
690 699
691 ftrace_disable_cpu(); 700 ftrace_disable_cpu();
@@ -2801,6 +2810,9 @@ static int tracing_resize_ring_buffer(unsigned long size)
2801 if (ret < 0) 2810 if (ret < 0)
2802 return ret; 2811 return ret;
2803 2812
2813 if (!current_trace->use_max_tr)
2814 goto out;
2815
2804 ret = ring_buffer_resize(max_tr.buffer, size); 2816 ret = ring_buffer_resize(max_tr.buffer, size);
2805 if (ret < 0) { 2817 if (ret < 0) {
2806 int r; 2818 int r;
@@ -2828,11 +2840,14 @@ static int tracing_resize_ring_buffer(unsigned long size)
2828 return ret; 2840 return ret;
2829 } 2841 }
2830 2842
2843 max_tr.entries = size;
2844 out:
2831 global_trace.entries = size; 2845 global_trace.entries = size;
2832 2846
2833 return ret; 2847 return ret;
2834} 2848}
2835 2849
2850
2836/** 2851/**
2837 * tracing_update_buffers - used by tracing facility to expand ring buffers 2852 * tracing_update_buffers - used by tracing facility to expand ring buffers
2838 * 2853 *
@@ -2893,12 +2908,26 @@ static int tracing_set_tracer(const char *buf)
2893 trace_branch_disable(); 2908 trace_branch_disable();
2894 if (current_trace && current_trace->reset) 2909 if (current_trace && current_trace->reset)
2895 current_trace->reset(tr); 2910 current_trace->reset(tr);
2896 2911 if (current_trace && current_trace->use_max_tr) {
2912 /*
2913 * We don't free the ring buffer. instead, resize it because
2914 * The max_tr ring buffer has some state (e.g. ring->clock) and
2915 * we want preserve it.
2916 */
2917 ring_buffer_resize(max_tr.buffer, 1);
2918 max_tr.entries = 1;
2919 }
2897 destroy_trace_option_files(topts); 2920 destroy_trace_option_files(topts);
2898 2921
2899 current_trace = t; 2922 current_trace = t;
2900 2923
2901 topts = create_trace_option_files(current_trace); 2924 topts = create_trace_option_files(current_trace);
2925 if (current_trace->use_max_tr) {
2926 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
2927 if (ret < 0)
2928 goto out;
2929 max_tr.entries = global_trace.entries;
2930 }
2902 2931
2903 if (t->init) { 2932 if (t->init) {
2904 ret = tracer_init(t, tr); 2933 ret = tracer_init(t, tr);
@@ -3480,7 +3509,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3480 } 3509 }
3481 3510
3482 tracing_start(); 3511 tracing_start();
3483 max_tr.entries = global_trace.entries;
3484 mutex_unlock(&trace_types_lock); 3512 mutex_unlock(&trace_types_lock);
3485 3513
3486 return cnt; 3514 return cnt;
@@ -4578,16 +4606,14 @@ __init static int tracer_alloc_buffers(void)
4578 4606
4579 4607
4580#ifdef CONFIG_TRACER_MAX_TRACE 4608#ifdef CONFIG_TRACER_MAX_TRACE
4581 max_tr.buffer = ring_buffer_alloc(ring_buf_size, 4609 max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
4582 TRACE_BUFFER_FLAGS);
4583 if (!max_tr.buffer) { 4610 if (!max_tr.buffer) {
4584 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4611 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4585 WARN_ON(1); 4612 WARN_ON(1);
4586 ring_buffer_free(global_trace.buffer); 4613 ring_buffer_free(global_trace.buffer);
4587 goto out_free_cpumask; 4614 goto out_free_cpumask;
4588 } 4615 }
4589 max_tr.entries = ring_buffer_size(max_tr.buffer); 4616 max_tr.entries = 1;
4590 WARN_ON(max_tr.entries != global_trace.entries);
4591#endif 4617#endif
4592 4618
4593 /* Allocate the first page for all buffers */ 4619 /* Allocate the first page for all buffers */