aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-03-11 13:42:01 -0400
committerSteven Rostedt <srostedt@redhat.com>2009-03-11 22:15:22 -0400
commit73c5162aa362a543793f4a957c6c536dcbaa89ce (patch)
treeca318ef3da72aac3f809583967185c0b34998c94 /kernel/trace/trace.c
parent80370cb758e7ca2692cd9fb5e413d970b1f4b2b2 (diff)
tracing: keep ring buffer to minimum size till used
Impact: less memory impact on systems not using tracer When the kernel boots up that has tracing configured, it allocates the default size of the ring buffer. This currently happens to be 1.4Megs per possible CPU. This is quite a bit of wasted memory if the system is never using the tracer. The current solution is to keep the ring buffers to a minimum size until the user uses them. Once a tracer is piped into the current_tracer the ring buffer will be expanded to the default size. If the user changes the size of the ring buffer, it will take the size given by the user immediately. If the user adds a "ftrace=" to the kernel command line, then the ring buffers will be set to the default size on initialization. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c79
1 files changed, 59 insertions, 20 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4c97947650ae..0c1dc1850858 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -45,6 +45,12 @@ unsigned long __read_mostly tracing_max_latency;
45unsigned long __read_mostly tracing_thresh; 45unsigned long __read_mostly tracing_thresh;
46 46
47/* 47/*
48 * On boot up, the ring buffer is set to the minimum size, so that
49 * we do not waste memory on systems that are not using tracing.
50 */
51static int ring_buffer_expanded;
52
53/*
48 * We need to change this state when a selftest is running. 54 * We need to change this state when a selftest is running.
49 * A selftest will lurk into the ring-buffer to count the 55 * A selftest will lurk into the ring-buffer to count the
50 * entries inserted during the selftest although some concurrent 56 * entries inserted during the selftest although some concurrent
@@ -128,6 +134,8 @@ static int __init set_ftrace(char *str)
128{ 134{
129 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); 135 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
130 default_bootup_tracer = bootup_tracer_buf; 136 default_bootup_tracer = bootup_tracer_buf;
137 /* We are using ftrace early, expand it */
138 ring_buffer_expanded = 1;
131 return 1; 139 return 1;
132} 140}
133__setup("ftrace=", set_ftrace); 141__setup("ftrace=", set_ftrace);
@@ -2315,6 +2323,40 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
2315 return t->init(tr); 2323 return t->init(tr);
2316} 2324}
2317 2325
2326static int tracing_resize_ring_buffer(unsigned long size)
2327{
2328 int ret;
2329
2330 /*
2331 * If kernel or user changes the size of the ring buffer
2332 * it get completed.
2333 */
2334 ring_buffer_expanded = 1;
2335
2336 ret = ring_buffer_resize(global_trace.buffer, size);
2337 if (ret < 0)
2338 return ret;
2339
2340 ret = ring_buffer_resize(max_tr.buffer, size);
2341 if (ret < 0) {
2342 int r;
2343
2344 r = ring_buffer_resize(global_trace.buffer,
2345 global_trace.entries);
2346 if (r < 0) {
2347 /* AARGH! We are left with different
2348 * size max buffer!!!! */
2349 WARN_ON(1);
2350 tracing_disabled = 1;
2351 }
2352 return ret;
2353 }
2354
2355 global_trace.entries = size;
2356
2357 return ret;
2358}
2359
2318struct trace_option_dentry; 2360struct trace_option_dentry;
2319 2361
2320static struct trace_option_dentry * 2362static struct trace_option_dentry *
@@ -2330,6 +2372,13 @@ static int tracing_set_tracer(const char *buf)
2330 struct tracer *t; 2372 struct tracer *t;
2331 int ret = 0; 2373 int ret = 0;
2332 2374
2375 if (!ring_buffer_expanded) {
2376 ret = tracing_resize_ring_buffer(trace_buf_size);
2377 if (ret < 0)
2378 return ret;
2379 ret = 0;
2380 }
2381
2333 mutex_lock(&trace_types_lock); 2382 mutex_lock(&trace_types_lock);
2334 for (t = trace_types; t; t = t->next) { 2383 for (t = trace_types; t; t = t->next) {
2335 if (strcmp(t->name, buf) == 0) 2384 if (strcmp(t->name, buf) == 0)
@@ -2903,28 +2952,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2903 val <<= 10; 2952 val <<= 10;
2904 2953
2905 if (val != global_trace.entries) { 2954 if (val != global_trace.entries) {
2906 ret = ring_buffer_resize(global_trace.buffer, val); 2955 ret = tracing_resize_ring_buffer(val);
2907 if (ret < 0) { 2956 if (ret < 0) {
2908 cnt = ret; 2957 cnt = ret;
2909 goto out; 2958 goto out;
2910 } 2959 }
2911
2912 ret = ring_buffer_resize(max_tr.buffer, val);
2913 if (ret < 0) {
2914 int r;
2915 cnt = ret;
2916 r = ring_buffer_resize(global_trace.buffer,
2917 global_trace.entries);
2918 if (r < 0) {
2919 /* AARGH! We are left with different
2920 * size max buffer!!!! */
2921 WARN_ON(1);
2922 tracing_disabled = 1;
2923 }
2924 goto out;
2925 }
2926
2927 global_trace.entries = val;
2928 } 2960 }
2929 2961
2930 filp->f_pos += cnt; 2962 filp->f_pos += cnt;
@@ -3916,6 +3948,7 @@ void ftrace_dump(void)
3916__init static int tracer_alloc_buffers(void) 3948__init static int tracer_alloc_buffers(void)
3917{ 3949{
3918 struct trace_array_cpu *data; 3950 struct trace_array_cpu *data;
3951 int ring_buf_size;
3919 int i; 3952 int i;
3920 int ret = -ENOMEM; 3953 int ret = -ENOMEM;
3921 3954
@@ -3928,12 +3961,18 @@ __init static int tracer_alloc_buffers(void)
3928 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 3961 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
3929 goto out_free_tracing_cpumask; 3962 goto out_free_tracing_cpumask;
3930 3963
3964 /* To save memory, keep the ring buffer size to its minimum */
3965 if (ring_buffer_expanded)
3966 ring_buf_size = trace_buf_size;
3967 else
3968 ring_buf_size = 1;
3969
3931 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 3970 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3932 cpumask_copy(tracing_cpumask, cpu_all_mask); 3971 cpumask_copy(tracing_cpumask, cpu_all_mask);
3933 cpumask_clear(tracing_reader_cpumask); 3972 cpumask_clear(tracing_reader_cpumask);
3934 3973
3935 /* TODO: make the number of buffers hot pluggable with CPUS */ 3974 /* TODO: make the number of buffers hot pluggable with CPUS */
3936 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3975 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
3937 TRACE_BUFFER_FLAGS); 3976 TRACE_BUFFER_FLAGS);
3938 if (!global_trace.buffer) { 3977 if (!global_trace.buffer) {
3939 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3978 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
@@ -3944,7 +3983,7 @@ __init static int tracer_alloc_buffers(void)
3944 3983
3945 3984
3946#ifdef CONFIG_TRACER_MAX_TRACE 3985#ifdef CONFIG_TRACER_MAX_TRACE
3947 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 3986 max_tr.buffer = ring_buffer_alloc(ring_buf_size,
3948 TRACE_BUFFER_FLAGS); 3987 TRACE_BUFFER_FLAGS);
3949 if (!max_tr.buffer) { 3988 if (!max_tr.buffer) {
3950 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3989 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");