diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2008-12-29 16:42:23 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-30 03:36:13 -0500 |
commit | 36994e58a48fb8f9651c7dc845a6de298aba5bfc (patch) | |
tree | fac4b8a1c30560e91460651487f3a57ef7f2b5cc | |
parent | 7a51cffbd10886c0557677dd916c090097c691ef (diff) |
tracing/kmemtrace: normalize the raw tracer event to the unified tracing API
Impact: new tracer plugin
This patch adapts kmemtrace raw events tracing to the unified tracing API.
To enable and use this tracer, just do the following:
echo kmemtrace > /debugfs/tracing/current_tracer
cat /debugfs/tracing/trace
You will have the following output:
# tracer: kmemtrace
#
#
# ALLOC TYPE REQ GIVEN FLAGS POINTER NODE CALLER
# FREE | | | | | | | |
# |
type_id 1 call_site 18446744071565527833 ptr 18446612134395152256
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 0 call_site 18446744071565636711 ptr 18446612134345164672 bytes_req 240 bytes_alloc 240 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 0 call_site 18446744071565636711 ptr 18446612134345164912 bytes_req 240 bytes_alloc 240 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 0 call_site 18446744071565636711 ptr 18446612134345165152 bytes_req 240 bytes_alloc 240 gfp_flags 208 node -1
type_id 0 call_site 18446744071566144042 ptr 18446612134346191680 bytes_req 1304 bytes_alloc 1312 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
That was to stay backward compatible with the format output produced in
inux/tracepoint.h.
This is the default ouput, but note that I tried something else.
If you change an option:
echo kmem_minimalistic > /debugfs/trace_options
and then cat /debugfs/trace, you will have the following output:
# tracer: kmemtrace
#
#
# ALLOC TYPE REQ GIVEN FLAGS POINTER NODE CALLER
# FREE | | | | | | | |
# |
- C 0xffff88007c088780 file_free_rcu
+ K 4096 4096 000000d0 0xffff88007cad6000 -1 getname
- C 0xffff88007cad6000 putname
+ K 4096 4096 000000d0 0xffff88007cad6000 -1 getname
+ K 240 240 000000d0 0xffff8800790dc780 -1 d_alloc
- C 0xffff88007cad6000 putname
+ K 4096 4096 000000d0 0xffff88007cad6000 -1 getname
+ K 240 240 000000d0 0xffff8800790dc870 -1 d_alloc
- C 0xffff88007cad6000 putname
+ K 4096 4096 000000d0 0xffff88007cad6000 -1 getname
+ K 240 240 000000d0 0xffff8800790dc960 -1 d_alloc
+ K 1304 1312 000000d0 0xffff8800791d7340 -1 reiserfs_alloc_inode
- C 0xffff88007cad6000 putname
+ K 4096 4096 000000d0 0xffff88007cad6000 -1 getname
- C 0xffff88007cad6000 putname
+ K 992 1000 000000d0 0xffff880079045b58 -1 alloc_inode
+ K 768 1024 000080d0 0xffff88007c096400 -1 alloc_pipe_info
+ K 240 240 000000d0 0xffff8800790dca50 -1 d_alloc
+ K 272 320 000080d0 0xffff88007c088780 -1 get_empty_filp
+ K 272 320 000080d0 0xffff88007c088000 -1 get_empty_filp
Yeah I shall confess kmem_minimalistic should be: kmem_alternative.
Whatever, I find it more readable but this a personal opinion of course.
We can drop it if you want.
On the ALLOC/FREE column, + means an allocation and - a free.
On the type column, you have K = kmalloc, C = cache, P = page
I would like the flags to be GFP_* strings but that would not be easy to not
break the column with strings....
About the node...it seems to always be -1. I don't know why but that shouldn't
be difficult to find.
I moved linux/tracepoint.h to trace/tracepoint.h as well. I think that would
be more easy to find the tracer headers if they are all in their common
directory.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/slab_def.h | 2 | ||||
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | include/trace/kmemtrace.h (renamed from include/linux/kmemtrace.h) | 19 | ||||
-rw-r--r-- | init/main.c | 2 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 22 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/kmemtrace.c | 343 | ||||
-rw-r--r-- | kernel/trace/trace.h | 25 | ||||
-rw-r--r-- | lib/Kconfig.debug | 20 | ||||
-rw-r--r-- | mm/kmemtrace.c | 2 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 2 |
12 files changed, 401 insertions, 41 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 7555ce99f6d2..455f9affea9a 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/kmemtrace.h> | 17 | #include <trace/kmemtrace.h> |
18 | 18 | ||
19 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
20 | struct cache_sizes { | 20 | struct cache_sizes { |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index dc28432b5b9a..6b657f7dcb2b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <linux/kmemtrace.h> | 13 | #include <trace/kmemtrace.h> |
14 | 14 | ||
15 | enum stat_item { | 15 | enum stat_item { |
16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
diff --git a/include/linux/kmemtrace.h b/include/trace/kmemtrace.h index 5bea8ead6a6b..ad8b7857855a 100644 --- a/include/linux/kmemtrace.h +++ b/include/trace/kmemtrace.h | |||
@@ -22,28 +22,17 @@ enum kmemtrace_type_id { | |||
22 | 22 | ||
23 | extern void kmemtrace_init(void); | 23 | extern void kmemtrace_init(void); |
24 | 24 | ||
25 | static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | 25 | extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, |
26 | unsigned long call_site, | 26 | unsigned long call_site, |
27 | const void *ptr, | 27 | const void *ptr, |
28 | size_t bytes_req, | 28 | size_t bytes_req, |
29 | size_t bytes_alloc, | 29 | size_t bytes_alloc, |
30 | gfp_t gfp_flags, | 30 | gfp_t gfp_flags, |
31 | int node) | 31 | int node); |
32 | { | ||
33 | trace_mark(kmemtrace_alloc, "type_id %d call_site %lu ptr %lu " | ||
34 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d", | ||
35 | type_id, call_site, (unsigned long) ptr, | ||
36 | (unsigned long) bytes_req, (unsigned long) bytes_alloc, | ||
37 | (unsigned long) gfp_flags, node); | ||
38 | } | ||
39 | 32 | ||
40 | static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | 33 | extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id, |
41 | unsigned long call_site, | 34 | unsigned long call_site, |
42 | const void *ptr) | 35 | const void *ptr); |
43 | { | ||
44 | trace_mark(kmemtrace_free, "type_id %d call_site %lu ptr %lu", | ||
45 | type_id, call_site, (unsigned long) ptr); | ||
46 | } | ||
47 | 36 | ||
48 | #else /* CONFIG_KMEMTRACE */ | 37 | #else /* CONFIG_KMEMTRACE */ |
49 | 38 | ||
diff --git a/init/main.c b/init/main.c index 9711586aa7c9..beca7aaddb22 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -70,7 +70,7 @@ | |||
70 | #include <asm/setup.h> | 70 | #include <asm/setup.h> |
71 | #include <asm/sections.h> | 71 | #include <asm/sections.h> |
72 | #include <asm/cacheflush.h> | 72 | #include <asm/cacheflush.h> |
73 | #include <linux/kmemtrace.h> | 73 | #include <trace/kmemtrace.h> |
74 | 74 | ||
75 | #ifdef CONFIG_X86_LOCAL_APIC | 75 | #ifdef CONFIG_X86_LOCAL_APIC |
76 | #include <asm/smp.h> | 76 | #include <asm/smp.h> |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e2a4ff6fc3a6..27fb74b06b3c 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -264,6 +264,28 @@ config HW_BRANCH_TRACER | |||
264 | This tracer records all branches on the system in a circular | 264 | This tracer records all branches on the system in a circular |
265 | buffer giving access to the last N branches for each cpu. | 265 | buffer giving access to the last N branches for each cpu. |
266 | 266 | ||
267 | config KMEMTRACE | ||
268 | bool "Trace SLAB allocations" | ||
269 | select TRACING | ||
270 | depends on RELAY | ||
271 | help | ||
272 | kmemtrace provides tracing for slab allocator functions, such as | ||
273 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | ||
274 | data is then fed to the userspace application in order to analyse | ||
275 | allocation hotspots, internal fragmentation and so on, making it | ||
276 | possible to see how well an allocator performs, as well as debug | ||
277 | and profile kernel code. | ||
278 | |||
279 | This requires an userspace application to use. See | ||
280 | Documentation/vm/kmemtrace.txt for more information. | ||
281 | |||
282 | Saying Y will make the kernel somewhat larger and slower. However, | ||
283 | if you disable kmemtrace at run-time or boot-time, the performance | ||
284 | impact is minimal (depending on the arch the kernel is built for). | ||
285 | |||
286 | If unsure, say N. | ||
287 | |||
288 | |||
267 | config DYNAMIC_FTRACE | 289 | config DYNAMIC_FTRACE |
268 | bool "enable/disable ftrace tracepoints dynamically" | 290 | bool "enable/disable ftrace tracepoints dynamically" |
269 | depends on FUNCTION_TRACER | 291 | depends on FUNCTION_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 349d5a93653f..513dc86b5dfa 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -33,5 +33,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | |||
33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
34 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o | 34 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o |
35 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | 35 | obj-$(CONFIG_POWER_TRACER) += trace_power.o |
36 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | ||
36 | 37 | ||
37 | libftrace-y := ftrace.o | 38 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c new file mode 100644 index 000000000000..d69cbe3c2a4b --- /dev/null +++ b/kernel/trace/kmemtrace.c | |||
@@ -0,0 +1,343 @@ | |||
1 | /* | ||
2 | * Memory allocator tracing | ||
3 | * | ||
4 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
5 | * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi> | ||
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/dcache.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <trace/kmemtrace.h> | ||
14 | |||
15 | #include "trace.h" | ||
16 | #include "trace_output.h" | ||
17 | |||
18 | /* Select an alternative, minimalistic output than the original one */ | ||
19 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | ||
20 | |||
21 | static struct tracer_opt kmem_opts[] = { | ||
22 | /* Default disable the minimalistic output */ | ||
23 | { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, | ||
24 | { } | ||
25 | }; | ||
26 | |||
27 | static struct tracer_flags kmem_tracer_flags = { | ||
28 | .val = 0, | ||
29 | .opts = kmem_opts | ||
30 | }; | ||
31 | |||
32 | |||
33 | static bool kmem_tracing_enabled __read_mostly; | ||
34 | static struct trace_array *kmemtrace_array; | ||
35 | |||
36 | static int kmem_trace_init(struct trace_array *tr) | ||
37 | { | ||
38 | int cpu; | ||
39 | kmemtrace_array = tr; | ||
40 | |||
41 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
42 | tracing_reset(tr, cpu); | ||
43 | |||
44 | kmem_tracing_enabled = true; | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static void kmem_trace_reset(struct trace_array *tr) | ||
50 | { | ||
51 | kmem_tracing_enabled = false; | ||
52 | } | ||
53 | |||
54 | static void kmemtrace_headers(struct seq_file *s) | ||
55 | { | ||
56 | /* Don't need headers for the original kmemtrace output */ | ||
57 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
58 | return; | ||
59 | |||
60 | seq_printf(s, "#\n"); | ||
61 | seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " | ||
62 | " POINTER NODE CALLER\n"); | ||
63 | seq_printf(s, "# FREE | | | | " | ||
64 | " | | | |\n"); | ||
65 | seq_printf(s, "# |\n\n"); | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * The two following functions give the original output from kmemtrace, | ||
70 | * or something close to....perhaps they need some missing things | ||
71 | */ | ||
72 | static enum print_line_t | ||
73 | kmemtrace_print_alloc_original(struct trace_iterator *iter, | ||
74 | struct kmemtrace_alloc_entry *entry) | ||
75 | { | ||
76 | struct trace_seq *s = &iter->seq; | ||
77 | int ret; | ||
78 | |||
79 | /* Taken from the old linux/kmemtrace.h */ | ||
80 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu " | ||
81 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | ||
82 | entry->type_id, entry->call_site, (unsigned long) entry->ptr, | ||
83 | (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc, | ||
84 | (unsigned long) entry->gfp_flags, entry->node); | ||
85 | |||
86 | if (!ret) | ||
87 | return TRACE_TYPE_PARTIAL_LINE; | ||
88 | |||
89 | return TRACE_TYPE_HANDLED; | ||
90 | } | ||
91 | |||
92 | static enum print_line_t | ||
93 | kmemtrace_print_free_original(struct trace_iterator *iter, | ||
94 | struct kmemtrace_free_entry *entry) | ||
95 | { | ||
96 | struct trace_seq *s = &iter->seq; | ||
97 | int ret; | ||
98 | |||
99 | /* Taken from the old linux/kmemtrace.h */ | ||
100 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n", | ||
101 | entry->type_id, entry->call_site, (unsigned long) entry->ptr); | ||
102 | |||
103 | if (!ret) | ||
104 | return TRACE_TYPE_PARTIAL_LINE; | ||
105 | |||
106 | return TRACE_TYPE_HANDLED; | ||
107 | } | ||
108 | |||
109 | |||
110 | /* The two other following provide a more minimalistic output */ | ||
111 | static enum print_line_t | ||
112 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, | ||
113 | struct kmemtrace_alloc_entry *entry) | ||
114 | { | ||
115 | struct trace_seq *s = &iter->seq; | ||
116 | int ret; | ||
117 | |||
118 | /* Alloc entry */ | ||
119 | ret = trace_seq_printf(s, " + "); | ||
120 | if (!ret) | ||
121 | return TRACE_TYPE_PARTIAL_LINE; | ||
122 | |||
123 | /* Type */ | ||
124 | switch (entry->type_id) { | ||
125 | case KMEMTRACE_TYPE_KMALLOC: | ||
126 | ret = trace_seq_printf(s, "K "); | ||
127 | break; | ||
128 | case KMEMTRACE_TYPE_CACHE: | ||
129 | ret = trace_seq_printf(s, "C "); | ||
130 | break; | ||
131 | case KMEMTRACE_TYPE_PAGES: | ||
132 | ret = trace_seq_printf(s, "P "); | ||
133 | break; | ||
134 | default: | ||
135 | ret = trace_seq_printf(s, "? "); | ||
136 | } | ||
137 | |||
138 | if (!ret) | ||
139 | return TRACE_TYPE_PARTIAL_LINE; | ||
140 | |||
141 | /* Requested */ | ||
142 | ret = trace_seq_printf(s, "%4d ", entry->bytes_req); | ||
143 | if (!ret) | ||
144 | return TRACE_TYPE_PARTIAL_LINE; | ||
145 | |||
146 | /* Allocated */ | ||
147 | ret = trace_seq_printf(s, "%4d ", entry->bytes_alloc); | ||
148 | if (!ret) | ||
149 | return TRACE_TYPE_PARTIAL_LINE; | ||
150 | |||
151 | /* Flags | ||
152 | * TODO: would be better to see the name of the GFP flag names | ||
153 | */ | ||
154 | ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); | ||
155 | if (!ret) | ||
156 | return TRACE_TYPE_PARTIAL_LINE; | ||
157 | |||
158 | /* Pointer to allocated */ | ||
159 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
160 | if (!ret) | ||
161 | return TRACE_TYPE_PARTIAL_LINE; | ||
162 | |||
163 | /* Node */ | ||
164 | ret = trace_seq_printf(s, "%4d ", entry->node); | ||
165 | if (!ret) | ||
166 | return TRACE_TYPE_PARTIAL_LINE; | ||
167 | |||
168 | /* Call site */ | ||
169 | ret = seq_print_ip_sym(s, entry->call_site, 0); | ||
170 | if (!ret) | ||
171 | return TRACE_TYPE_PARTIAL_LINE; | ||
172 | |||
173 | if (!trace_seq_printf(s, "\n")) | ||
174 | return TRACE_TYPE_PARTIAL_LINE; | ||
175 | |||
176 | return TRACE_TYPE_HANDLED; | ||
177 | } | ||
178 | |||
179 | static enum print_line_t | ||
180 | kmemtrace_print_free_compress(struct trace_iterator *iter, | ||
181 | struct kmemtrace_free_entry *entry) | ||
182 | { | ||
183 | struct trace_seq *s = &iter->seq; | ||
184 | int ret; | ||
185 | |||
186 | /* Free entry */ | ||
187 | ret = trace_seq_printf(s, " - "); | ||
188 | if (!ret) | ||
189 | return TRACE_TYPE_PARTIAL_LINE; | ||
190 | |||
191 | /* Type */ | ||
192 | switch (entry->type_id) { | ||
193 | case KMEMTRACE_TYPE_KMALLOC: | ||
194 | ret = trace_seq_printf(s, "K "); | ||
195 | break; | ||
196 | case KMEMTRACE_TYPE_CACHE: | ||
197 | ret = trace_seq_printf(s, "C "); | ||
198 | break; | ||
199 | case KMEMTRACE_TYPE_PAGES: | ||
200 | ret = trace_seq_printf(s, "P "); | ||
201 | break; | ||
202 | default: | ||
203 | ret = trace_seq_printf(s, "? "); | ||
204 | } | ||
205 | |||
206 | if (!ret) | ||
207 | return TRACE_TYPE_PARTIAL_LINE; | ||
208 | |||
209 | /* Skip requested/allocated/flags */ | ||
210 | ret = trace_seq_printf(s, " "); | ||
211 | if (!ret) | ||
212 | return TRACE_TYPE_PARTIAL_LINE; | ||
213 | |||
214 | /* Pointer to allocated */ | ||
215 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
216 | if (!ret) | ||
217 | return TRACE_TYPE_PARTIAL_LINE; | ||
218 | |||
219 | /* Skip node */ | ||
220 | ret = trace_seq_printf(s, " "); | ||
221 | if (!ret) | ||
222 | return TRACE_TYPE_PARTIAL_LINE; | ||
223 | |||
224 | /* Call site */ | ||
225 | ret = seq_print_ip_sym(s, entry->call_site, 0); | ||
226 | if (!ret) | ||
227 | return TRACE_TYPE_PARTIAL_LINE; | ||
228 | |||
229 | if (!trace_seq_printf(s, "\n")) | ||
230 | return TRACE_TYPE_PARTIAL_LINE; | ||
231 | |||
232 | return TRACE_TYPE_HANDLED; | ||
233 | } | ||
234 | |||
235 | static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | ||
236 | { | ||
237 | struct trace_entry *entry = iter->ent; | ||
238 | |||
239 | switch (entry->type) { | ||
240 | case TRACE_KMEM_ALLOC: { | ||
241 | struct kmemtrace_alloc_entry *field; | ||
242 | trace_assign_type(field, entry); | ||
243 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | ||
244 | return kmemtrace_print_alloc_compress(iter, field); | ||
245 | else | ||
246 | return kmemtrace_print_alloc_original(iter, field); | ||
247 | } | ||
248 | |||
249 | case TRACE_KMEM_FREE: { | ||
250 | struct kmemtrace_free_entry *field; | ||
251 | trace_assign_type(field, entry); | ||
252 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | ||
253 | return kmemtrace_print_free_compress(iter, field); | ||
254 | else | ||
255 | return kmemtrace_print_free_original(iter, field); | ||
256 | } | ||
257 | |||
258 | default: | ||
259 | return TRACE_TYPE_UNHANDLED; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | /* Trace allocations */ | ||
264 | void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
265 | unsigned long call_site, | ||
266 | const void *ptr, | ||
267 | size_t bytes_req, | ||
268 | size_t bytes_alloc, | ||
269 | gfp_t gfp_flags, | ||
270 | int node) | ||
271 | { | ||
272 | struct ring_buffer_event *event; | ||
273 | struct kmemtrace_alloc_entry *entry; | ||
274 | struct trace_array *tr = kmemtrace_array; | ||
275 | unsigned long irq_flags; | ||
276 | |||
277 | if (!kmem_tracing_enabled) | ||
278 | return; | ||
279 | |||
280 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
281 | &irq_flags); | ||
282 | if (!event) | ||
283 | return; | ||
284 | entry = ring_buffer_event_data(event); | ||
285 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
286 | |||
287 | entry->ent.type = TRACE_KMEM_ALLOC; | ||
288 | entry->call_site = call_site; | ||
289 | entry->ptr = ptr; | ||
290 | entry->bytes_req = bytes_req; | ||
291 | entry->bytes_alloc = bytes_alloc; | ||
292 | entry->gfp_flags = gfp_flags; | ||
293 | entry->node = node; | ||
294 | |||
295 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
296 | |||
297 | trace_wake_up(); | ||
298 | } | ||
299 | |||
300 | void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
301 | unsigned long call_site, | ||
302 | const void *ptr) | ||
303 | { | ||
304 | struct ring_buffer_event *event; | ||
305 | struct kmemtrace_free_entry *entry; | ||
306 | struct trace_array *tr = kmemtrace_array; | ||
307 | unsigned long irq_flags; | ||
308 | |||
309 | if (!kmem_tracing_enabled) | ||
310 | return; | ||
311 | |||
312 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
313 | &irq_flags); | ||
314 | if (!event) | ||
315 | return; | ||
316 | entry = ring_buffer_event_data(event); | ||
317 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
318 | |||
319 | entry->ent.type = TRACE_KMEM_FREE; | ||
320 | entry->type_id = type_id; | ||
321 | entry->call_site = call_site; | ||
322 | entry->ptr = ptr; | ||
323 | |||
324 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
325 | |||
326 | trace_wake_up(); | ||
327 | } | ||
328 | |||
329 | static struct tracer kmem_tracer __read_mostly = { | ||
330 | .name = "kmemtrace", | ||
331 | .init = kmem_trace_init, | ||
332 | .reset = kmem_trace_reset, | ||
333 | .print_line = kmemtrace_print_line, | ||
334 | .print_header = kmemtrace_headers, | ||
335 | .flags = &kmem_tracer_flags | ||
336 | }; | ||
337 | |||
338 | static int __init init_kmem_tracer(void) | ||
339 | { | ||
340 | return register_tracer(&kmem_tracer); | ||
341 | } | ||
342 | |||
343 | device_initcall(init_kmem_tracer); | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cc7a4f864036..534505bb39b0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <trace/boot.h> | 11 | #include <trace/boot.h> |
12 | #include <trace/kmemtrace.h> | ||
12 | 13 | ||
13 | enum trace_type { | 14 | enum trace_type { |
14 | __TRACE_FIRST_TYPE = 0, | 15 | __TRACE_FIRST_TYPE = 0, |
@@ -29,6 +30,8 @@ enum trace_type { | |||
29 | TRACE_GRAPH_ENT, | 30 | TRACE_GRAPH_ENT, |
30 | TRACE_USER_STACK, | 31 | TRACE_USER_STACK, |
31 | TRACE_HW_BRANCHES, | 32 | TRACE_HW_BRANCHES, |
33 | TRACE_KMEM_ALLOC, | ||
34 | TRACE_KMEM_FREE, | ||
32 | TRACE_POWER, | 35 | TRACE_POWER, |
33 | 36 | ||
34 | __TRACE_LAST_TYPE | 37 | __TRACE_LAST_TYPE |
@@ -170,6 +173,24 @@ struct trace_power { | |||
170 | struct power_trace state_data; | 173 | struct power_trace state_data; |
171 | }; | 174 | }; |
172 | 175 | ||
176 | struct kmemtrace_alloc_entry { | ||
177 | struct trace_entry ent; | ||
178 | enum kmemtrace_type_id type_id; | ||
179 | unsigned long call_site; | ||
180 | const void *ptr; | ||
181 | size_t bytes_req; | ||
182 | size_t bytes_alloc; | ||
183 | gfp_t gfp_flags; | ||
184 | int node; | ||
185 | }; | ||
186 | |||
187 | struct kmemtrace_free_entry { | ||
188 | struct trace_entry ent; | ||
189 | enum kmemtrace_type_id type_id; | ||
190 | unsigned long call_site; | ||
191 | const void *ptr; | ||
192 | }; | ||
193 | |||
173 | /* | 194 | /* |
174 | * trace_flag_type is an enumeration that holds different | 195 | * trace_flag_type is an enumeration that holds different |
175 | * states when a trace occurs. These are: | 196 | * states when a trace occurs. These are: |
@@ -280,6 +301,10 @@ extern void __ftrace_bad_type(void); | |||
280 | TRACE_GRAPH_RET); \ | 301 | TRACE_GRAPH_RET); \ |
281 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | 302 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
282 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | 303 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ |
304 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | ||
305 | TRACE_KMEM_ALLOC); \ | ||
306 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | ||
307 | TRACE_KMEM_FREE); \ | ||
283 | __ftrace_bad_type(); \ | 308 | __ftrace_bad_type(); \ |
284 | } while (0) | 309 | } while (0) |
285 | 310 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b5417e23ba94..b0f239e443bc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -803,26 +803,6 @@ config FIREWIRE_OHCI_REMOTE_DMA | |||
803 | 803 | ||
804 | If unsure, say N. | 804 | If unsure, say N. |
805 | 805 | ||
806 | config KMEMTRACE | ||
807 | bool "Kernel memory tracer (kmemtrace)" | ||
808 | depends on RELAY && DEBUG_FS && MARKERS | ||
809 | help | ||
810 | kmemtrace provides tracing for slab allocator functions, such as | ||
811 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | ||
812 | data is then fed to the userspace application in order to analyse | ||
813 | allocation hotspots, internal fragmentation and so on, making it | ||
814 | possible to see how well an allocator performs, as well as debug | ||
815 | and profile kernel code. | ||
816 | |||
817 | This requires an userspace application to use. See | ||
818 | Documentation/vm/kmemtrace.txt for more information. | ||
819 | |||
820 | Saying Y will make the kernel somewhat larger and slower. However, | ||
821 | if you disable kmemtrace at run-time or boot-time, the performance | ||
822 | impact is minimal (depending on the arch the kernel is built for). | ||
823 | |||
824 | If unsure, say N. | ||
825 | |||
826 | menuconfig BUILD_DOCSRC | 806 | menuconfig BUILD_DOCSRC |
827 | bool "Build targets in Documentation/ tree" | 807 | bool "Build targets in Documentation/ tree" |
828 | depends on HEADERS_CHECK | 808 | depends on HEADERS_CHECK |
diff --git a/mm/kmemtrace.c b/mm/kmemtrace.c index 2a70a805027c..0573b5080cc4 100644 --- a/mm/kmemtrace.c +++ b/mm/kmemtrace.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/marker.h> | 11 | #include <linux/marker.h> |
12 | #include <linux/gfp.h> | 12 | #include <linux/gfp.h> |
13 | #include <linux/kmemtrace.h> | 13 | #include <trace/kmemtrace.h> |
14 | 14 | ||
15 | #define KMEMTRACE_SUBBUF_SIZE 524288 | 15 | #define KMEMTRACE_SUBBUF_SIZE 524288 |
16 | #define KMEMTRACE_DEF_N_SUBBUFS 20 | 16 | #define KMEMTRACE_DEF_N_SUBBUFS 20 |
@@ -65,7 +65,7 @@ | |||
65 | #include <linux/module.h> | 65 | #include <linux/module.h> |
66 | #include <linux/rcupdate.h> | 66 | #include <linux/rcupdate.h> |
67 | #include <linux/list.h> | 67 | #include <linux/list.h> |
68 | #include <linux/kmemtrace.h> | 68 | #include <trace/kmemtrace.h> |
69 | #include <asm/atomic.h> | 69 | #include <asm/atomic.h> |
70 | 70 | ||
71 | /* | 71 | /* |
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
19 | #include <linux/kmemtrace.h> | 19 | #include <trace/kmemtrace.h> |
20 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
21 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
22 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |