aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2013-02-27 20:23:57 -0500
committerSteven Rostedt <rostedt@goodmis.org>2013-03-15 00:34:46 -0400
commitd1a291437f75f6c841819b7855d95a21958cc822 (patch)
tree73566370a7150d05f14d2f5ebb705064d4d2a0f9 /kernel/trace
parent772482216f170ddc62fa92a3cc3271cdd1993525 (diff)
tracing: Use kmem_cache_alloc instead of kmalloc in trace_events.c
The event structures used by the trace events are mostly persistent, but they are also allocated by kmalloc, which is not the best at allocating space for what is used. By converting these kmallocs into kmem_cache_allocs, we can save over 50K of space that is permanently allocated. After boot we have: slab name active allocated size --------- ------ --------- ---- ftrace_event_file 979 1005 56 67 1 ftrace_event_field 2301 2310 48 77 1 The ftrace_event_file has at boot up 979 active objects out of 1005 allocated in the slabs. Each object is 56 bytes. In a normal kmalloc, that would allocate 64 bytes for each object. 1005 - 979 = 26 objects not used 26 * 56 = 1456 bytes wasted But if we used kmalloc: 64 - 56 = 8 bytes unused per allocation 8 * 979 = 7832 bytes wasted 7832 - 1456 = 6376 bytes in savings Doing the same for ftrace_event_field where there's 2301 objects allocated in a slab that can hold 2310 with 48 bytes each we have: 2310 - 2301 = 9 objects not used 9 * 48 = 432 bytes wasted A kmalloc would also use 64 bytes per object: 64 - 48 = 16 bytes unused per allocation 16 * 2301 = 36816 bytes wasted! 36816 - 432 = 36384 bytes in savings This change gives us a total of 42760 bytes in savings. At least on my machine, but as there's a lot of these persistent objects for all configurations that use trace points, this is a net win. Thanks to Ezequiel Garcia for his trace_analyze presentation which pointed out the wasted space in my code. Cc: Ezequiel Garcia <elezegarcia@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_events.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 21fe83b4106a..5d8845d36fa8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -36,6 +36,11 @@ EXPORT_SYMBOL_GPL(event_storage);
36LIST_HEAD(ftrace_events); 36LIST_HEAD(ftrace_events);
37LIST_HEAD(ftrace_common_fields); 37LIST_HEAD(ftrace_common_fields);
38 38
39#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41static struct kmem_cache *field_cachep;
42static struct kmem_cache *file_cachep;
43
39/* Double loops, do not use break, only goto's work */ 44/* Double loops, do not use break, only goto's work */
40#define do_for_each_event_file(tr, file) \ 45#define do_for_each_event_file(tr, file) \
41 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 46 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
@@ -63,7 +68,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
63{ 68{
64 struct ftrace_event_field *field; 69 struct ftrace_event_field *field;
65 70
66 field = kzalloc(sizeof(*field), GFP_KERNEL); 71 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
67 if (!field) 72 if (!field)
68 goto err; 73 goto err;
69 74
@@ -91,7 +96,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
91err: 96err:
92 if (field) 97 if (field)
93 kfree(field->name); 98 kfree(field->name);
94 kfree(field); 99 kmem_cache_free(field_cachep, field);
95 100
96 return -ENOMEM; 101 return -ENOMEM;
97} 102}
@@ -143,7 +148,7 @@ void trace_destroy_fields(struct ftrace_event_call *call)
143 list_del(&field->link); 148 list_del(&field->link);
144 kfree(field->type); 149 kfree(field->type);
145 kfree(field->name); 150 kfree(field->name);
146 kfree(field); 151 kmem_cache_free(field_cachep, field);
147 } 152 }
148} 153}
149 154
@@ -1383,7 +1388,7 @@ static void remove_event_from_tracers(struct ftrace_event_call *call)
1383 list_del(&file->list); 1388 list_del(&file->list);
1384 debugfs_remove_recursive(file->dir); 1389 debugfs_remove_recursive(file->dir);
1385 remove_subsystem(file->system); 1390 remove_subsystem(file->system);
1386 kfree(file); 1391 kmem_cache_free(file_cachep, file);
1387 1392
1388 /* 1393 /*
1389 * The do_for_each_event_file_safe() is 1394 * The do_for_each_event_file_safe() is
@@ -1462,7 +1467,7 @@ __trace_add_new_event(struct ftrace_event_call *call,
1462{ 1467{
1463 struct ftrace_event_file *file; 1468 struct ftrace_event_file *file;
1464 1469
1465 file = kzalloc(sizeof(*file), GFP_KERNEL); 1470 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1466 if (!file) 1471 if (!file)
1467 return -ENOMEM; 1472 return -ENOMEM;
1468 1473
@@ -1484,7 +1489,7 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
1484{ 1489{
1485 struct ftrace_event_file *file; 1490 struct ftrace_event_file *file;
1486 1491
1487 file = kzalloc(sizeof(*file), GFP_KERNEL); 1492 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1488 if (!file) 1493 if (!file)
1489 return -ENOMEM; 1494 return -ENOMEM;
1490 1495
@@ -1791,7 +1796,7 @@ __trace_remove_event_dirs(struct trace_array *tr)
1791 list_del(&file->list); 1796 list_del(&file->list);
1792 debugfs_remove_recursive(file->dir); 1797 debugfs_remove_recursive(file->dir);
1793 remove_subsystem(file->system); 1798 remove_subsystem(file->system);
1794 kfree(file); 1799 kmem_cache_free(file_cachep, file);
1795 } 1800 }
1796} 1801}
1797 1802
@@ -1947,6 +1952,13 @@ int event_trace_del_tracer(struct trace_array *tr)
1947 return 0; 1952 return 0;
1948} 1953}
1949 1954
1955static __init int event_trace_memsetup(void)
1956{
1957 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
1958 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
1959 return 0;
1960}
1961
1950static __init int event_trace_enable(void) 1962static __init int event_trace_enable(void)
1951{ 1963{
1952 struct trace_array *tr = top_trace_array(); 1964 struct trace_array *tr = top_trace_array();
@@ -2021,6 +2033,7 @@ static __init int event_trace_init(void)
2021 2033
2022 return 0; 2034 return 0;
2023} 2035}
2036early_initcall(event_trace_memsetup);
2024core_initcall(event_trace_enable); 2037core_initcall(event_trace_enable);
2025fs_initcall(event_trace_init); 2038fs_initcall(event_trace_init);
2026 2039