diff options
author | Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> | 2009-03-23 09:12:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-03 06:23:08 -0400 |
commit | 42af9054c0eeed09ec58d13ec8bf52d225ebcfcc (patch) | |
tree | 6ae45d50bc1fb2b663b57cb0499374b018d7e204 /kernel/trace | |
parent | da2635a9854423b4aa3a5f0e4e6efcc39ac99004 (diff) |
kmemtrace: restore original tracing data binary format, improve ABI
When kmemtrace was ported to ftrace, the marker strings were taken as
an indication of how the traced data was being exposed to the userspace.
However, the actual format had been binary, not text.
This restores the original binary format, while also adding an origin CPU
field (since ftrace doesn't expose the data per-CPU to userspace), and
re-adding the timestamp field. It also drops arch-independent field
sizing where it didn't make sense, so pointers won't always be 64 bits
wide like they used to.
Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
LKML-Reference: <161be9ca8a27b432c4a6ab79f47788c4521652ae.1237813499.git.eduard.munteanu@linux360.ro>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/kmemtrace.c | 82 |
1 files changed, 58 insertions, 24 deletions
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index ae259f04ee39..d8c2d0c91b4c 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -208,47 +208,81 @@ static void kmemtrace_headers(struct seq_file *s) | |||
208 | } | 208 | } |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * The two following functions give the original output from kmemtrace, | 211 | * The following functions give the original output from kmemtrace, |
212 | * or something close to....perhaps they need some missing things | 212 | * plus the origin CPU, since reordering occurs in-kernel now. |
213 | */ | 213 | */ |
214 | |||
215 | #define KMEMTRACE_USER_ALLOC 0 | ||
216 | #define KMEMTRACE_USER_FREE 1 | ||
217 | |||
218 | struct kmemtrace_user_event { | ||
219 | u8 event_id; | ||
220 | u8 type_id; | ||
221 | u16 event_size; | ||
222 | u32 cpu; | ||
223 | u64 timestamp; | ||
224 | unsigned long call_site; | ||
225 | unsigned long ptr; | ||
226 | }; | ||
227 | |||
228 | struct kmemtrace_user_event_alloc { | ||
229 | size_t bytes_req; | ||
230 | size_t bytes_alloc; | ||
231 | unsigned gfp_flags; | ||
232 | int node; | ||
233 | }; | ||
234 | |||
214 | static enum print_line_t | 235 | static enum print_line_t |
215 | kmemtrace_print_alloc_original(struct trace_iterator *iter, | 236 | kmemtrace_print_alloc_user(struct trace_iterator *iter, |
216 | struct kmemtrace_alloc_entry *entry) | 237 | struct kmemtrace_alloc_entry *entry) |
217 | { | 238 | { |
218 | struct trace_seq *s = &iter->seq; | 239 | struct trace_seq *s = &iter->seq; |
219 | int ret; | 240 | struct kmemtrace_user_event *ev; |
220 | 241 | struct kmemtrace_user_event_alloc *ev_alloc; | |
221 | /* Taken from the old linux/kmemtrace.h */ | ||
222 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu " | ||
223 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | ||
224 | entry->type_id, entry->call_site, (unsigned long) entry->ptr, | ||
225 | (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc, | ||
226 | (unsigned long) entry->gfp_flags, entry->node); | ||
227 | 242 | ||
228 | if (!ret) | 243 | ev = trace_seq_reserve(s, sizeof(*ev)); |
244 | if (!ev) | ||
245 | return TRACE_TYPE_PARTIAL_LINE; | ||
246 | ev->event_id = KMEMTRACE_USER_ALLOC; | ||
247 | ev->type_id = entry->type_id; | ||
248 | ev->event_size = sizeof(*ev) + sizeof(*ev_alloc); | ||
249 | ev->cpu = iter->cpu; | ||
250 | ev->timestamp = iter->ts; | ||
251 | ev->call_site = entry->call_site; | ||
252 | ev->ptr = (unsigned long) entry->ptr; | ||
253 | |||
254 | ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc)); | ||
255 | if (!ev_alloc) | ||
229 | return TRACE_TYPE_PARTIAL_LINE; | 256 | return TRACE_TYPE_PARTIAL_LINE; |
257 | ev_alloc->bytes_req = entry->bytes_req; | ||
258 | ev_alloc->bytes_alloc = entry->bytes_alloc; | ||
259 | ev_alloc->gfp_flags = entry->gfp_flags; | ||
260 | ev_alloc->node = entry->node; | ||
230 | 261 | ||
231 | return TRACE_TYPE_HANDLED; | 262 | return TRACE_TYPE_HANDLED; |
232 | } | 263 | } |
233 | 264 | ||
234 | static enum print_line_t | 265 | static enum print_line_t |
235 | kmemtrace_print_free_original(struct trace_iterator *iter, | 266 | kmemtrace_print_free_user(struct trace_iterator *iter, |
236 | struct kmemtrace_free_entry *entry) | 267 | struct kmemtrace_free_entry *entry) |
237 | { | 268 | { |
238 | struct trace_seq *s = &iter->seq; | 269 | struct trace_seq *s = &iter->seq; |
239 | int ret; | 270 | struct kmemtrace_user_event *ev; |
240 | 271 | ||
241 | /* Taken from the old linux/kmemtrace.h */ | 272 | ev = trace_seq_reserve(s, sizeof(*ev)); |
242 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n", | 273 | if (!ev) |
243 | entry->type_id, entry->call_site, (unsigned long) entry->ptr); | ||
244 | |||
245 | if (!ret) | ||
246 | return TRACE_TYPE_PARTIAL_LINE; | 274 | return TRACE_TYPE_PARTIAL_LINE; |
275 | ev->event_id = KMEMTRACE_USER_FREE; | ||
276 | ev->type_id = entry->type_id; | ||
277 | ev->event_size = sizeof(*ev); | ||
278 | ev->cpu = iter->cpu; | ||
279 | ev->timestamp = iter->ts; | ||
280 | ev->call_site = entry->call_site; | ||
281 | ev->ptr = (unsigned long) entry->ptr; | ||
247 | 282 | ||
248 | return TRACE_TYPE_HANDLED; | 283 | return TRACE_TYPE_HANDLED; |
249 | } | 284 | } |
250 | 285 | ||
251 | |||
252 | /* The two other following provide a more minimalistic output */ | 286 | /* The two other following provide a more minimalistic output */ |
253 | static enum print_line_t | 287 | static enum print_line_t |
254 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, | 288 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, |
@@ -385,7 +419,7 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | |||
385 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | 419 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) |
386 | return kmemtrace_print_alloc_compress(iter, field); | 420 | return kmemtrace_print_alloc_compress(iter, field); |
387 | else | 421 | else |
388 | return kmemtrace_print_alloc_original(iter, field); | 422 | return kmemtrace_print_alloc_user(iter, field); |
389 | } | 423 | } |
390 | 424 | ||
391 | case TRACE_KMEM_FREE: { | 425 | case TRACE_KMEM_FREE: { |
@@ -394,7 +428,7 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | |||
394 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | 428 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) |
395 | return kmemtrace_print_free_compress(iter, field); | 429 | return kmemtrace_print_free_compress(iter, field); |
396 | else | 430 | else |
397 | return kmemtrace_print_free_original(iter, field); | 431 | return kmemtrace_print_free_user(iter, field); |
398 | } | 432 | } |
399 | 433 | ||
400 | default: | 434 | default: |