diff options
-rw-r--r-- | kernel/trace/kmemtrace.c | 128 |
1 files changed, 67 insertions, 61 deletions
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index d8c2d0c91b4c..5011f4d91e37 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -6,15 +6,16 @@ | |||
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | 6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/dcache.h> | 9 | #include <linux/tracepoint.h> |
10 | #include <linux/seq_file.h> | ||
10 | #include <linux/debugfs.h> | 11 | #include <linux/debugfs.h> |
12 | #include <linux/dcache.h> | ||
11 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
12 | #include <linux/seq_file.h> | 14 | |
13 | #include <linux/tracepoint.h> | ||
14 | #include <trace/kmemtrace.h> | 15 | #include <trace/kmemtrace.h> |
15 | 16 | ||
16 | #include "trace.h" | ||
17 | #include "trace_output.h" | 17 | #include "trace_output.h" |
18 | #include "trace.h" | ||
18 | 19 | ||
19 | /* Select an alternative, minimalistic output than the original one */ | 20 | /* Select an alternative, minimalistic output than the original one */ |
20 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | 21 | #define TRACE_KMEM_OPT_MINIMAL 0x1 |
@@ -26,8 +27,8 @@ static struct tracer_opt kmem_opts[] = { | |||
26 | }; | 27 | }; |
27 | 28 | ||
28 | static struct tracer_flags kmem_tracer_flags = { | 29 | static struct tracer_flags kmem_tracer_flags = { |
29 | .val = 0, | 30 | .val = 0, |
30 | .opts = kmem_opts | 31 | .opts = kmem_opts |
31 | }; | 32 | }; |
32 | 33 | ||
33 | static struct trace_array *kmemtrace_array; | 34 | static struct trace_array *kmemtrace_array; |
@@ -41,24 +42,25 @@ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | |||
41 | gfp_t gfp_flags, | 42 | gfp_t gfp_flags, |
42 | int node) | 43 | int node) |
43 | { | 44 | { |
44 | struct ring_buffer_event *event; | ||
45 | struct kmemtrace_alloc_entry *entry; | ||
46 | struct trace_array *tr = kmemtrace_array; | 45 | struct trace_array *tr = kmemtrace_array; |
46 | struct kmemtrace_alloc_entry *entry; | ||
47 | struct ring_buffer_event *event; | ||
47 | 48 | ||
48 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 49 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
49 | if (!event) | 50 | if (!event) |
50 | return; | 51 | return; |
51 | entry = ring_buffer_event_data(event); | 52 | |
53 | entry = ring_buffer_event_data(event); | ||
52 | tracing_generic_entry_update(&entry->ent, 0, 0); | 54 | tracing_generic_entry_update(&entry->ent, 0, 0); |
53 | 55 | ||
54 | entry->ent.type = TRACE_KMEM_ALLOC; | 56 | entry->ent.type = TRACE_KMEM_ALLOC; |
55 | entry->type_id = type_id; | 57 | entry->type_id = type_id; |
56 | entry->call_site = call_site; | 58 | entry->call_site = call_site; |
57 | entry->ptr = ptr; | 59 | entry->ptr = ptr; |
58 | entry->bytes_req = bytes_req; | 60 | entry->bytes_req = bytes_req; |
59 | entry->bytes_alloc = bytes_alloc; | 61 | entry->bytes_alloc = bytes_alloc; |
60 | entry->gfp_flags = gfp_flags; | 62 | entry->gfp_flags = gfp_flags; |
61 | entry->node = node; | 63 | entry->node = node; |
62 | 64 | ||
63 | ring_buffer_unlock_commit(tr->buffer, event); | 65 | ring_buffer_unlock_commit(tr->buffer, event); |
64 | 66 | ||
@@ -69,9 +71,9 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | |||
69 | unsigned long call_site, | 71 | unsigned long call_site, |
70 | const void *ptr) | 72 | const void *ptr) |
71 | { | 73 | { |
72 | struct ring_buffer_event *event; | ||
73 | struct kmemtrace_free_entry *entry; | ||
74 | struct trace_array *tr = kmemtrace_array; | 74 | struct trace_array *tr = kmemtrace_array; |
75 | struct kmemtrace_free_entry *entry; | ||
76 | struct ring_buffer_event *event; | ||
75 | 77 | ||
76 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | 78 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); |
77 | if (!event) | 79 | if (!event) |
@@ -79,10 +81,10 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | |||
79 | entry = ring_buffer_event_data(event); | 81 | entry = ring_buffer_event_data(event); |
80 | tracing_generic_entry_update(&entry->ent, 0, 0); | 82 | tracing_generic_entry_update(&entry->ent, 0, 0); |
81 | 83 | ||
82 | entry->ent.type = TRACE_KMEM_FREE; | 84 | entry->ent.type = TRACE_KMEM_FREE; |
83 | entry->type_id = type_id; | 85 | entry->type_id = type_id; |
84 | entry->call_site = call_site; | 86 | entry->call_site = call_site; |
85 | entry->ptr = ptr; | 87 | entry->ptr = ptr; |
86 | 88 | ||
87 | ring_buffer_unlock_commit(tr->buffer, event); | 89 | ring_buffer_unlock_commit(tr->buffer, event); |
88 | 90 | ||
@@ -216,48 +218,50 @@ static void kmemtrace_headers(struct seq_file *s) | |||
216 | #define KMEMTRACE_USER_FREE 1 | 218 | #define KMEMTRACE_USER_FREE 1 |
217 | 219 | ||
218 | struct kmemtrace_user_event { | 220 | struct kmemtrace_user_event { |
219 | u8 event_id; | 221 | u8 event_id; |
220 | u8 type_id; | 222 | u8 type_id; |
221 | u16 event_size; | 223 | u16 event_size; |
222 | u32 cpu; | 224 | u32 cpu; |
223 | u64 timestamp; | 225 | u64 timestamp; |
224 | unsigned long call_site; | 226 | unsigned long call_site; |
225 | unsigned long ptr; | 227 | unsigned long ptr; |
226 | }; | 228 | }; |
227 | 229 | ||
228 | struct kmemtrace_user_event_alloc { | 230 | struct kmemtrace_user_event_alloc { |
229 | size_t bytes_req; | 231 | size_t bytes_req; |
230 | size_t bytes_alloc; | 232 | size_t bytes_alloc; |
231 | unsigned gfp_flags; | 233 | unsigned gfp_flags; |
232 | int node; | 234 | int node; |
233 | }; | 235 | }; |
234 | 236 | ||
235 | static enum print_line_t | 237 | static enum print_line_t |
236 | kmemtrace_print_alloc_user(struct trace_iterator *iter, | 238 | kmemtrace_print_alloc_user(struct trace_iterator *iter, |
237 | struct kmemtrace_alloc_entry *entry) | 239 | struct kmemtrace_alloc_entry *entry) |
238 | { | 240 | { |
241 | struct kmemtrace_user_event_alloc *ev_alloc; | ||
239 | struct trace_seq *s = &iter->seq; | 242 | struct trace_seq *s = &iter->seq; |
240 | struct kmemtrace_user_event *ev; | 243 | struct kmemtrace_user_event *ev; |
241 | struct kmemtrace_user_event_alloc *ev_alloc; | ||
242 | 244 | ||
243 | ev = trace_seq_reserve(s, sizeof(*ev)); | 245 | ev = trace_seq_reserve(s, sizeof(*ev)); |
244 | if (!ev) | 246 | if (!ev) |
245 | return TRACE_TYPE_PARTIAL_LINE; | 247 | return TRACE_TYPE_PARTIAL_LINE; |
246 | ev->event_id = KMEMTRACE_USER_ALLOC; | 248 | |
247 | ev->type_id = entry->type_id; | 249 | ev->event_id = KMEMTRACE_USER_ALLOC; |
248 | ev->event_size = sizeof(*ev) + sizeof(*ev_alloc); | 250 | ev->type_id = entry->type_id; |
249 | ev->cpu = iter->cpu; | 251 | ev->event_size = sizeof(*ev) + sizeof(*ev_alloc); |
250 | ev->timestamp = iter->ts; | 252 | ev->cpu = iter->cpu; |
251 | ev->call_site = entry->call_site; | 253 | ev->timestamp = iter->ts; |
252 | ev->ptr = (unsigned long) entry->ptr; | 254 | ev->call_site = entry->call_site; |
255 | ev->ptr = (unsigned long)entry->ptr; | ||
253 | 256 | ||
254 | ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc)); | 257 | ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc)); |
255 | if (!ev_alloc) | 258 | if (!ev_alloc) |
256 | return TRACE_TYPE_PARTIAL_LINE; | 259 | return TRACE_TYPE_PARTIAL_LINE; |
257 | ev_alloc->bytes_req = entry->bytes_req; | 260 | |
258 | ev_alloc->bytes_alloc = entry->bytes_alloc; | 261 | ev_alloc->bytes_req = entry->bytes_req; |
259 | ev_alloc->gfp_flags = entry->gfp_flags; | 262 | ev_alloc->bytes_alloc = entry->bytes_alloc; |
260 | ev_alloc->node = entry->node; | 263 | ev_alloc->gfp_flags = entry->gfp_flags; |
264 | ev_alloc->node = entry->node; | ||
261 | 265 | ||
262 | return TRACE_TYPE_HANDLED; | 266 | return TRACE_TYPE_HANDLED; |
263 | } | 267 | } |
@@ -272,13 +276,14 @@ kmemtrace_print_free_user(struct trace_iterator *iter, | |||
272 | ev = trace_seq_reserve(s, sizeof(*ev)); | 276 | ev = trace_seq_reserve(s, sizeof(*ev)); |
273 | if (!ev) | 277 | if (!ev) |
274 | return TRACE_TYPE_PARTIAL_LINE; | 278 | return TRACE_TYPE_PARTIAL_LINE; |
275 | ev->event_id = KMEMTRACE_USER_FREE; | 279 | |
276 | ev->type_id = entry->type_id; | 280 | ev->event_id = KMEMTRACE_USER_FREE; |
277 | ev->event_size = sizeof(*ev); | 281 | ev->type_id = entry->type_id; |
278 | ev->cpu = iter->cpu; | 282 | ev->event_size = sizeof(*ev); |
279 | ev->timestamp = iter->ts; | 283 | ev->cpu = iter->cpu; |
280 | ev->call_site = entry->call_site; | 284 | ev->timestamp = iter->ts; |
281 | ev->ptr = (unsigned long) entry->ptr; | 285 | ev->call_site = entry->call_site; |
286 | ev->ptr = (unsigned long)entry->ptr; | ||
282 | 287 | ||
283 | return TRACE_TYPE_HANDLED; | 288 | return TRACE_TYPE_HANDLED; |
284 | } | 289 | } |
@@ -354,7 +359,7 @@ kmemtrace_print_alloc_compress(struct trace_iterator *iter, | |||
354 | 359 | ||
355 | static enum print_line_t | 360 | static enum print_line_t |
356 | kmemtrace_print_free_compress(struct trace_iterator *iter, | 361 | kmemtrace_print_free_compress(struct trace_iterator *iter, |
357 | struct kmemtrace_free_entry *entry) | 362 | struct kmemtrace_free_entry *entry) |
358 | { | 363 | { |
359 | struct trace_seq *s = &iter->seq; | 364 | struct trace_seq *s = &iter->seq; |
360 | int ret; | 365 | int ret; |
@@ -415,6 +420,7 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | |||
415 | switch (entry->type) { | 420 | switch (entry->type) { |
416 | case TRACE_KMEM_ALLOC: { | 421 | case TRACE_KMEM_ALLOC: { |
417 | struct kmemtrace_alloc_entry *field; | 422 | struct kmemtrace_alloc_entry *field; |
423 | |||
418 | trace_assign_type(field, entry); | 424 | trace_assign_type(field, entry); |
419 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | 425 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) |
420 | return kmemtrace_print_alloc_compress(iter, field); | 426 | return kmemtrace_print_alloc_compress(iter, field); |
@@ -424,6 +430,7 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | |||
424 | 430 | ||
425 | case TRACE_KMEM_FREE: { | 431 | case TRACE_KMEM_FREE: { |
426 | struct kmemtrace_free_entry *field; | 432 | struct kmemtrace_free_entry *field; |
433 | |||
427 | trace_assign_type(field, entry); | 434 | trace_assign_type(field, entry); |
428 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | 435 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) |
429 | return kmemtrace_print_free_compress(iter, field); | 436 | return kmemtrace_print_free_compress(iter, field); |
@@ -437,12 +444,12 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | |||
437 | } | 444 | } |
438 | 445 | ||
439 | static struct tracer kmem_tracer __read_mostly = { | 446 | static struct tracer kmem_tracer __read_mostly = { |
440 | .name = "kmemtrace", | 447 | .name = "kmemtrace", |
441 | .init = kmem_trace_init, | 448 | .init = kmem_trace_init, |
442 | .reset = kmem_trace_reset, | 449 | .reset = kmem_trace_reset, |
443 | .print_line = kmemtrace_print_line, | 450 | .print_line = kmemtrace_print_line, |
444 | .print_header = kmemtrace_headers, | 451 | .print_header = kmemtrace_headers, |
445 | .flags = &kmem_tracer_flags | 452 | .flags = &kmem_tracer_flags |
446 | }; | 453 | }; |
447 | 454 | ||
448 | void kmemtrace_init(void) | 455 | void kmemtrace_init(void) |
@@ -454,5 +461,4 @@ static int __init init_kmem_tracer(void) | |||
454 | { | 461 | { |
455 | return register_tracer(&kmem_tracer); | 462 | return register_tracer(&kmem_tracer); |
456 | } | 463 | } |
457 | |||
458 | device_initcall(init_kmem_tracer); | 464 | device_initcall(init_kmem_tracer); |