diff options
Diffstat (limited to 'kernel/trace/kmemtrace.c')
-rw-r--r-- | kernel/trace/kmemtrace.c | 529 |
1 files changed, 0 insertions, 529 deletions
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c deleted file mode 100644 index bbfc1bb1660b..000000000000 --- a/kernel/trace/kmemtrace.c +++ /dev/null | |||
@@ -1,529 +0,0 @@ | |||
1 | /* | ||
2 | * Memory allocator tracing | ||
3 | * | ||
4 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
5 | * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi> | ||
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/tracepoint.h> | ||
10 | #include <linux/seq_file.h> | ||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/dcache.h> | ||
13 | #include <linux/fs.h> | ||
14 | |||
15 | #include <linux/kmemtrace.h> | ||
16 | |||
17 | #include "trace_output.h" | ||
18 | #include "trace.h" | ||
19 | |||
20 | /* Select an alternative, minimalistic output than the original one */ | ||
21 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | ||
22 | |||
23 | static struct tracer_opt kmem_opts[] = { | ||
24 | /* Default disable the minimalistic output */ | ||
25 | { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, | ||
26 | { } | ||
27 | }; | ||
28 | |||
29 | static struct tracer_flags kmem_tracer_flags = { | ||
30 | .val = 0, | ||
31 | .opts = kmem_opts | ||
32 | }; | ||
33 | |||
34 | static struct trace_array *kmemtrace_array; | ||
35 | |||
36 | /* Trace allocations */ | ||
37 | static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | ||
38 | unsigned long call_site, | ||
39 | const void *ptr, | ||
40 | size_t bytes_req, | ||
41 | size_t bytes_alloc, | ||
42 | gfp_t gfp_flags, | ||
43 | int node) | ||
44 | { | ||
45 | struct ftrace_event_call *call = &event_kmem_alloc; | ||
46 | struct trace_array *tr = kmemtrace_array; | ||
47 | struct kmemtrace_alloc_entry *entry; | ||
48 | struct ring_buffer_event *event; | ||
49 | |||
50 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | ||
51 | if (!event) | ||
52 | return; | ||
53 | |||
54 | entry = ring_buffer_event_data(event); | ||
55 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
56 | |||
57 | entry->ent.type = TRACE_KMEM_ALLOC; | ||
58 | entry->type_id = type_id; | ||
59 | entry->call_site = call_site; | ||
60 | entry->ptr = ptr; | ||
61 | entry->bytes_req = bytes_req; | ||
62 | entry->bytes_alloc = bytes_alloc; | ||
63 | entry->gfp_flags = gfp_flags; | ||
64 | entry->node = node; | ||
65 | |||
66 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
67 | ring_buffer_unlock_commit(tr->buffer, event); | ||
68 | |||
69 | trace_wake_up(); | ||
70 | } | ||
71 | |||
72 | static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | ||
73 | unsigned long call_site, | ||
74 | const void *ptr) | ||
75 | { | ||
76 | struct ftrace_event_call *call = &event_kmem_free; | ||
77 | struct trace_array *tr = kmemtrace_array; | ||
78 | struct kmemtrace_free_entry *entry; | ||
79 | struct ring_buffer_event *event; | ||
80 | |||
81 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | ||
82 | if (!event) | ||
83 | return; | ||
84 | entry = ring_buffer_event_data(event); | ||
85 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
86 | |||
87 | entry->ent.type = TRACE_KMEM_FREE; | ||
88 | entry->type_id = type_id; | ||
89 | entry->call_site = call_site; | ||
90 | entry->ptr = ptr; | ||
91 | |||
92 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
93 | ring_buffer_unlock_commit(tr->buffer, event); | ||
94 | |||
95 | trace_wake_up(); | ||
96 | } | ||
97 | |||
98 | static void kmemtrace_kmalloc(void *ignore, | ||
99 | unsigned long call_site, | ||
100 | const void *ptr, | ||
101 | size_t bytes_req, | ||
102 | size_t bytes_alloc, | ||
103 | gfp_t gfp_flags) | ||
104 | { | ||
105 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | ||
106 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
107 | } | ||
108 | |||
109 | static void kmemtrace_kmem_cache_alloc(void *ignore, | ||
110 | unsigned long call_site, | ||
111 | const void *ptr, | ||
112 | size_t bytes_req, | ||
113 | size_t bytes_alloc, | ||
114 | gfp_t gfp_flags) | ||
115 | { | ||
116 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | ||
117 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
118 | } | ||
119 | |||
120 | static void kmemtrace_kmalloc_node(void *ignore, | ||
121 | unsigned long call_site, | ||
122 | const void *ptr, | ||
123 | size_t bytes_req, | ||
124 | size_t bytes_alloc, | ||
125 | gfp_t gfp_flags, | ||
126 | int node) | ||
127 | { | ||
128 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | ||
129 | bytes_req, bytes_alloc, gfp_flags, node); | ||
130 | } | ||
131 | |||
132 | static void kmemtrace_kmem_cache_alloc_node(void *ignore, | ||
133 | unsigned long call_site, | ||
134 | const void *ptr, | ||
135 | size_t bytes_req, | ||
136 | size_t bytes_alloc, | ||
137 | gfp_t gfp_flags, | ||
138 | int node) | ||
139 | { | ||
140 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | ||
141 | bytes_req, bytes_alloc, gfp_flags, node); | ||
142 | } | ||
143 | |||
144 | static void | ||
145 | kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr) | ||
146 | { | ||
147 | kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); | ||
148 | } | ||
149 | |||
150 | static void kmemtrace_kmem_cache_free(void *ignore, | ||
151 | unsigned long call_site, const void *ptr) | ||
152 | { | ||
153 | kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); | ||
154 | } | ||
155 | |||
156 | static int kmemtrace_start_probes(void) | ||
157 | { | ||
158 | int err; | ||
159 | |||
160 | err = register_trace_kmalloc(kmemtrace_kmalloc, NULL); | ||
161 | if (err) | ||
162 | return err; | ||
163 | err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); | ||
164 | if (err) | ||
165 | return err; | ||
166 | err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); | ||
167 | if (err) | ||
168 | return err; | ||
169 | err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); | ||
170 | if (err) | ||
171 | return err; | ||
172 | err = register_trace_kfree(kmemtrace_kfree, NULL); | ||
173 | if (err) | ||
174 | return err; | ||
175 | err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); | ||
176 | |||
177 | return err; | ||
178 | } | ||
179 | |||
180 | static void kmemtrace_stop_probes(void) | ||
181 | { | ||
182 | unregister_trace_kmalloc(kmemtrace_kmalloc, NULL); | ||
183 | unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); | ||
184 | unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); | ||
185 | unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); | ||
186 | unregister_trace_kfree(kmemtrace_kfree, NULL); | ||
187 | unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); | ||
188 | } | ||
189 | |||
190 | static int kmem_trace_init(struct trace_array *tr) | ||
191 | { | ||
192 | kmemtrace_array = tr; | ||
193 | |||
194 | tracing_reset_online_cpus(tr); | ||
195 | |||
196 | kmemtrace_start_probes(); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void kmem_trace_reset(struct trace_array *tr) | ||
202 | { | ||
203 | kmemtrace_stop_probes(); | ||
204 | } | ||
205 | |||
206 | static void kmemtrace_headers(struct seq_file *s) | ||
207 | { | ||
208 | /* Don't need headers for the original kmemtrace output */ | ||
209 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
210 | return; | ||
211 | |||
212 | seq_printf(s, "#\n"); | ||
213 | seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " | ||
214 | " POINTER NODE CALLER\n"); | ||
215 | seq_printf(s, "# FREE | | | | " | ||
216 | " | | | |\n"); | ||
217 | seq_printf(s, "# |\n\n"); | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * The following functions give the original output from kmemtrace, | ||
222 | * plus the origin CPU, since reordering occurs in-kernel now. | ||
223 | */ | ||
224 | |||
225 | #define KMEMTRACE_USER_ALLOC 0 | ||
226 | #define KMEMTRACE_USER_FREE 1 | ||
227 | |||
228 | struct kmemtrace_user_event { | ||
229 | u8 event_id; | ||
230 | u8 type_id; | ||
231 | u16 event_size; | ||
232 | u32 cpu; | ||
233 | u64 timestamp; | ||
234 | unsigned long call_site; | ||
235 | unsigned long ptr; | ||
236 | }; | ||
237 | |||
238 | struct kmemtrace_user_event_alloc { | ||
239 | size_t bytes_req; | ||
240 | size_t bytes_alloc; | ||
241 | unsigned gfp_flags; | ||
242 | int node; | ||
243 | }; | ||
244 | |||
245 | static enum print_line_t | ||
246 | kmemtrace_print_alloc(struct trace_iterator *iter, int flags, | ||
247 | struct trace_event *event) | ||
248 | { | ||
249 | struct trace_seq *s = &iter->seq; | ||
250 | struct kmemtrace_alloc_entry *entry; | ||
251 | int ret; | ||
252 | |||
253 | trace_assign_type(entry, iter->ent); | ||
254 | |||
255 | ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu " | ||
256 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | ||
257 | entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr, | ||
258 | (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc, | ||
259 | (unsigned long)entry->gfp_flags, entry->node); | ||
260 | |||
261 | if (!ret) | ||
262 | return TRACE_TYPE_PARTIAL_LINE; | ||
263 | return TRACE_TYPE_HANDLED; | ||
264 | } | ||
265 | |||
266 | static enum print_line_t | ||
267 | kmemtrace_print_free(struct trace_iterator *iter, int flags, | ||
268 | struct trace_event *event) | ||
269 | { | ||
270 | struct trace_seq *s = &iter->seq; | ||
271 | struct kmemtrace_free_entry *entry; | ||
272 | int ret; | ||
273 | |||
274 | trace_assign_type(entry, iter->ent); | ||
275 | |||
276 | ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n", | ||
277 | entry->type_id, (void *)entry->call_site, | ||
278 | (unsigned long)entry->ptr); | ||
279 | |||
280 | if (!ret) | ||
281 | return TRACE_TYPE_PARTIAL_LINE; | ||
282 | return TRACE_TYPE_HANDLED; | ||
283 | } | ||
284 | |||
285 | static enum print_line_t | ||
286 | kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags, | ||
287 | struct trace_event *event) | ||
288 | { | ||
289 | struct trace_seq *s = &iter->seq; | ||
290 | struct kmemtrace_alloc_entry *entry; | ||
291 | struct kmemtrace_user_event *ev; | ||
292 | struct kmemtrace_user_event_alloc *ev_alloc; | ||
293 | |||
294 | trace_assign_type(entry, iter->ent); | ||
295 | |||
296 | ev = trace_seq_reserve(s, sizeof(*ev)); | ||
297 | if (!ev) | ||
298 | return TRACE_TYPE_PARTIAL_LINE; | ||
299 | |||
300 | ev->event_id = KMEMTRACE_USER_ALLOC; | ||
301 | ev->type_id = entry->type_id; | ||
302 | ev->event_size = sizeof(*ev) + sizeof(*ev_alloc); | ||
303 | ev->cpu = iter->cpu; | ||
304 | ev->timestamp = iter->ts; | ||
305 | ev->call_site = entry->call_site; | ||
306 | ev->ptr = (unsigned long)entry->ptr; | ||
307 | |||
308 | ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc)); | ||
309 | if (!ev_alloc) | ||
310 | return TRACE_TYPE_PARTIAL_LINE; | ||
311 | |||
312 | ev_alloc->bytes_req = entry->bytes_req; | ||
313 | ev_alloc->bytes_alloc = entry->bytes_alloc; | ||
314 | ev_alloc->gfp_flags = entry->gfp_flags; | ||
315 | ev_alloc->node = entry->node; | ||
316 | |||
317 | return TRACE_TYPE_HANDLED; | ||
318 | } | ||
319 | |||
320 | static enum print_line_t | ||
321 | kmemtrace_print_free_user(struct trace_iterator *iter, int flags, | ||
322 | struct trace_event *event) | ||
323 | { | ||
324 | struct trace_seq *s = &iter->seq; | ||
325 | struct kmemtrace_free_entry *entry; | ||
326 | struct kmemtrace_user_event *ev; | ||
327 | |||
328 | trace_assign_type(entry, iter->ent); | ||
329 | |||
330 | ev = trace_seq_reserve(s, sizeof(*ev)); | ||
331 | if (!ev) | ||
332 | return TRACE_TYPE_PARTIAL_LINE; | ||
333 | |||
334 | ev->event_id = KMEMTRACE_USER_FREE; | ||
335 | ev->type_id = entry->type_id; | ||
336 | ev->event_size = sizeof(*ev); | ||
337 | ev->cpu = iter->cpu; | ||
338 | ev->timestamp = iter->ts; | ||
339 | ev->call_site = entry->call_site; | ||
340 | ev->ptr = (unsigned long)entry->ptr; | ||
341 | |||
342 | return TRACE_TYPE_HANDLED; | ||
343 | } | ||
344 | |||
345 | /* The two other following provide a more minimalistic output */ | ||
346 | static enum print_line_t | ||
347 | kmemtrace_print_alloc_compress(struct trace_iterator *iter) | ||
348 | { | ||
349 | struct kmemtrace_alloc_entry *entry; | ||
350 | struct trace_seq *s = &iter->seq; | ||
351 | int ret; | ||
352 | |||
353 | trace_assign_type(entry, iter->ent); | ||
354 | |||
355 | /* Alloc entry */ | ||
356 | ret = trace_seq_printf(s, " + "); | ||
357 | if (!ret) | ||
358 | return TRACE_TYPE_PARTIAL_LINE; | ||
359 | |||
360 | /* Type */ | ||
361 | switch (entry->type_id) { | ||
362 | case KMEMTRACE_TYPE_KMALLOC: | ||
363 | ret = trace_seq_printf(s, "K "); | ||
364 | break; | ||
365 | case KMEMTRACE_TYPE_CACHE: | ||
366 | ret = trace_seq_printf(s, "C "); | ||
367 | break; | ||
368 | case KMEMTRACE_TYPE_PAGES: | ||
369 | ret = trace_seq_printf(s, "P "); | ||
370 | break; | ||
371 | default: | ||
372 | ret = trace_seq_printf(s, "? "); | ||
373 | } | ||
374 | |||
375 | if (!ret) | ||
376 | return TRACE_TYPE_PARTIAL_LINE; | ||
377 | |||
378 | /* Requested */ | ||
379 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_req); | ||
380 | if (!ret) | ||
381 | return TRACE_TYPE_PARTIAL_LINE; | ||
382 | |||
383 | /* Allocated */ | ||
384 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc); | ||
385 | if (!ret) | ||
386 | return TRACE_TYPE_PARTIAL_LINE; | ||
387 | |||
388 | /* Flags | ||
389 | * TODO: would be better to see the name of the GFP flag names | ||
390 | */ | ||
391 | ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); | ||
392 | if (!ret) | ||
393 | return TRACE_TYPE_PARTIAL_LINE; | ||
394 | |||
395 | /* Pointer to allocated */ | ||
396 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
397 | if (!ret) | ||
398 | return TRACE_TYPE_PARTIAL_LINE; | ||
399 | |||
400 | /* Node and call site*/ | ||
401 | ret = trace_seq_printf(s, "%4d %pf\n", entry->node, | ||
402 | (void *)entry->call_site); | ||
403 | if (!ret) | ||
404 | return TRACE_TYPE_PARTIAL_LINE; | ||
405 | |||
406 | return TRACE_TYPE_HANDLED; | ||
407 | } | ||
408 | |||
409 | static enum print_line_t | ||
410 | kmemtrace_print_free_compress(struct trace_iterator *iter) | ||
411 | { | ||
412 | struct kmemtrace_free_entry *entry; | ||
413 | struct trace_seq *s = &iter->seq; | ||
414 | int ret; | ||
415 | |||
416 | trace_assign_type(entry, iter->ent); | ||
417 | |||
418 | /* Free entry */ | ||
419 | ret = trace_seq_printf(s, " - "); | ||
420 | if (!ret) | ||
421 | return TRACE_TYPE_PARTIAL_LINE; | ||
422 | |||
423 | /* Type */ | ||
424 | switch (entry->type_id) { | ||
425 | case KMEMTRACE_TYPE_KMALLOC: | ||
426 | ret = trace_seq_printf(s, "K "); | ||
427 | break; | ||
428 | case KMEMTRACE_TYPE_CACHE: | ||
429 | ret = trace_seq_printf(s, "C "); | ||
430 | break; | ||
431 | case KMEMTRACE_TYPE_PAGES: | ||
432 | ret = trace_seq_printf(s, "P "); | ||
433 | break; | ||
434 | default: | ||
435 | ret = trace_seq_printf(s, "? "); | ||
436 | } | ||
437 | |||
438 | if (!ret) | ||
439 | return TRACE_TYPE_PARTIAL_LINE; | ||
440 | |||
441 | /* Skip requested/allocated/flags */ | ||
442 | ret = trace_seq_printf(s, " "); | ||
443 | if (!ret) | ||
444 | return TRACE_TYPE_PARTIAL_LINE; | ||
445 | |||
446 | /* Pointer to allocated */ | ||
447 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
448 | if (!ret) | ||
449 | return TRACE_TYPE_PARTIAL_LINE; | ||
450 | |||
451 | /* Skip node and print call site*/ | ||
452 | ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site); | ||
453 | if (!ret) | ||
454 | return TRACE_TYPE_PARTIAL_LINE; | ||
455 | |||
456 | return TRACE_TYPE_HANDLED; | ||
457 | } | ||
458 | |||
459 | static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | ||
460 | { | ||
461 | struct trace_entry *entry = iter->ent; | ||
462 | |||
463 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
464 | return TRACE_TYPE_UNHANDLED; | ||
465 | |||
466 | switch (entry->type) { | ||
467 | case TRACE_KMEM_ALLOC: | ||
468 | return kmemtrace_print_alloc_compress(iter); | ||
469 | case TRACE_KMEM_FREE: | ||
470 | return kmemtrace_print_free_compress(iter); | ||
471 | default: | ||
472 | return TRACE_TYPE_UNHANDLED; | ||
473 | } | ||
474 | } | ||
475 | |||
476 | static struct trace_event_functions kmem_trace_alloc_funcs = { | ||
477 | .trace = kmemtrace_print_alloc, | ||
478 | .binary = kmemtrace_print_alloc_user, | ||
479 | }; | ||
480 | |||
481 | static struct trace_event kmem_trace_alloc = { | ||
482 | .type = TRACE_KMEM_ALLOC, | ||
483 | .funcs = &kmem_trace_alloc_funcs, | ||
484 | }; | ||
485 | |||
486 | static struct trace_event_functions kmem_trace_free_funcs = { | ||
487 | .trace = kmemtrace_print_free, | ||
488 | .binary = kmemtrace_print_free_user, | ||
489 | }; | ||
490 | |||
491 | static struct trace_event kmem_trace_free = { | ||
492 | .type = TRACE_KMEM_FREE, | ||
493 | .funcs = &kmem_trace_free_funcs, | ||
494 | }; | ||
495 | |||
496 | static struct tracer kmem_tracer __read_mostly = { | ||
497 | .name = "kmemtrace", | ||
498 | .init = kmem_trace_init, | ||
499 | .reset = kmem_trace_reset, | ||
500 | .print_line = kmemtrace_print_line, | ||
501 | .print_header = kmemtrace_headers, | ||
502 | .flags = &kmem_tracer_flags | ||
503 | }; | ||
504 | |||
505 | void kmemtrace_init(void) | ||
506 | { | ||
507 | /* earliest opportunity to start kmem tracing */ | ||
508 | } | ||
509 | |||
510 | static int __init init_kmem_tracer(void) | ||
511 | { | ||
512 | if (!register_ftrace_event(&kmem_trace_alloc)) { | ||
513 | pr_warning("Warning: could not register kmem events\n"); | ||
514 | return 1; | ||
515 | } | ||
516 | |||
517 | if (!register_ftrace_event(&kmem_trace_free)) { | ||
518 | pr_warning("Warning: could not register kmem events\n"); | ||
519 | return 1; | ||
520 | } | ||
521 | |||
522 | if (register_tracer(&kmem_tracer) != 0) { | ||
523 | pr_warning("Warning: could not register the kmem tracer\n"); | ||
524 | return 1; | ||
525 | } | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | device_initcall(init_kmem_tracer); | ||