diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2010-05-24 04:22:49 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-06-28 17:12:46 -0400 |
commit | 8728fe501ed562c1b46dde3c195eadec77bca033 (patch) | |
tree | c911b4a6af85817ba6bdf3342cbae4838c14b1bb /kernel/trace/trace_events.c | |
parent | c9642c49aae1272d7c24008a40ae614470b957a6 (diff) |
tracing: Don't allocate common fields for every trace events
Every event has the same common fields, so it's a big waste of
memory to have a copy of those fields for every event.
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
LKML-Reference: <4BFA3759.30105@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r-- | kernel/trace/trace_events.c | 113 |
1 files changed, 64 insertions, 49 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index a594f9a7ee3d..d3b4bdf00b39 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -28,6 +28,7 @@ | |||
28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
29 | 29 | ||
30 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
31 | LIST_HEAD(ftrace_common_fields); | ||
31 | 32 | ||
32 | struct list_head * | 33 | struct list_head * |
33 | trace_get_fields(struct ftrace_event_call *event_call) | 34 | trace_get_fields(struct ftrace_event_call *event_call) |
@@ -37,15 +38,11 @@ trace_get_fields(struct ftrace_event_call *event_call) | |||
37 | return event_call->class->get_fields(event_call); | 38 | return event_call->class->get_fields(event_call); |
38 | } | 39 | } |
39 | 40 | ||
40 | int trace_define_field(struct ftrace_event_call *call, const char *type, | 41 | static int __trace_define_field(struct list_head *head, const char *type, |
41 | const char *name, int offset, int size, int is_signed, | 42 | const char *name, int offset, int size, |
42 | int filter_type) | 43 | int is_signed, int filter_type) |
43 | { | 44 | { |
44 | struct ftrace_event_field *field; | 45 | struct ftrace_event_field *field; |
45 | struct list_head *head; | ||
46 | |||
47 | if (WARN_ON(!call->class)) | ||
48 | return 0; | ||
49 | 46 | ||
50 | field = kzalloc(sizeof(*field), GFP_KERNEL); | 47 | field = kzalloc(sizeof(*field), GFP_KERNEL); |
51 | if (!field) | 48 | if (!field) |
@@ -68,7 +65,6 @@ int trace_define_field(struct ftrace_event_call *call, const char *type, | |||
68 | field->size = size; | 65 | field->size = size; |
69 | field->is_signed = is_signed; | 66 | field->is_signed = is_signed; |
70 | 67 | ||
71 | head = trace_get_fields(call); | ||
72 | list_add(&field->link, head); | 68 | list_add(&field->link, head); |
73 | 69 | ||
74 | return 0; | 70 | return 0; |
@@ -80,17 +76,32 @@ err: | |||
80 | 76 | ||
81 | return -ENOMEM; | 77 | return -ENOMEM; |
82 | } | 78 | } |
79 | |||
80 | int trace_define_field(struct ftrace_event_call *call, const char *type, | ||
81 | const char *name, int offset, int size, int is_signed, | ||
82 | int filter_type) | ||
83 | { | ||
84 | struct list_head *head; | ||
85 | |||
86 | if (WARN_ON(!call->class)) | ||
87 | return 0; | ||
88 | |||
89 | head = trace_get_fields(call); | ||
90 | return __trace_define_field(head, type, name, offset, size, | ||
91 | is_signed, filter_type); | ||
92 | } | ||
83 | EXPORT_SYMBOL_GPL(trace_define_field); | 93 | EXPORT_SYMBOL_GPL(trace_define_field); |
84 | 94 | ||
85 | #define __common_field(type, item) \ | 95 | #define __common_field(type, item) \ |
86 | ret = trace_define_field(call, #type, "common_" #item, \ | 96 | ret = __trace_define_field(&ftrace_common_fields, #type, \ |
87 | offsetof(typeof(ent), item), \ | 97 | "common_" #item, \ |
88 | sizeof(ent.item), \ | 98 | offsetof(typeof(ent), item), \ |
89 | is_signed_type(type), FILTER_OTHER); \ | 99 | sizeof(ent.item), \ |
100 | is_signed_type(type), FILTER_OTHER); \ | ||
90 | if (ret) \ | 101 | if (ret) \ |
91 | return ret; | 102 | return ret; |
92 | 103 | ||
93 | static int trace_define_common_fields(struct ftrace_event_call *call) | 104 | static int trace_define_common_fields(void) |
94 | { | 105 | { |
95 | int ret; | 106 | int ret; |
96 | struct trace_entry ent; | 107 | struct trace_entry ent; |
@@ -544,32 +555,10 @@ out: | |||
544 | return ret; | 555 | return ret; |
545 | } | 556 | } |
546 | 557 | ||
547 | static ssize_t | 558 | static void print_event_fields(struct trace_seq *s, struct list_head *head) |
548 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
549 | loff_t *ppos) | ||
550 | { | 559 | { |
551 | struct ftrace_event_call *call = filp->private_data; | ||
552 | struct ftrace_event_field *field; | 560 | struct ftrace_event_field *field; |
553 | struct list_head *head; | ||
554 | struct trace_seq *s; | ||
555 | int common_field_count = 5; | ||
556 | char *buf; | ||
557 | int r = 0; | ||
558 | |||
559 | if (*ppos) | ||
560 | return 0; | ||
561 | |||
562 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
563 | if (!s) | ||
564 | return -ENOMEM; | ||
565 | |||
566 | trace_seq_init(s); | ||
567 | 561 | ||
568 | trace_seq_printf(s, "name: %s\n", call->name); | ||
569 | trace_seq_printf(s, "ID: %d\n", call->event.type); | ||
570 | trace_seq_printf(s, "format:\n"); | ||
571 | |||
572 | head = trace_get_fields(call); | ||
573 | list_for_each_entry_reverse(field, head, link) { | 562 | list_for_each_entry_reverse(field, head, link) { |
574 | /* | 563 | /* |
575 | * Smartly shows the array type(except dynamic array). | 564 | * Smartly shows the array type(except dynamic array). |
@@ -584,29 +573,54 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
584 | array_descriptor = NULL; | 573 | array_descriptor = NULL; |
585 | 574 | ||
586 | if (!array_descriptor) { | 575 | if (!array_descriptor) { |
587 | r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" | 576 | trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" |
588 | "\tsize:%u;\tsigned:%d;\n", | 577 | "\tsize:%u;\tsigned:%d;\n", |
589 | field->type, field->name, field->offset, | 578 | field->type, field->name, field->offset, |
590 | field->size, !!field->is_signed); | 579 | field->size, !!field->is_signed); |
591 | } else { | 580 | } else { |
592 | r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" | 581 | trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" |
593 | "\tsize:%u;\tsigned:%d;\n", | 582 | "\tsize:%u;\tsigned:%d;\n", |
594 | (int)(array_descriptor - field->type), | 583 | (int)(array_descriptor - field->type), |
595 | field->type, field->name, | 584 | field->type, field->name, |
596 | array_descriptor, field->offset, | 585 | array_descriptor, field->offset, |
597 | field->size, !!field->is_signed); | 586 | field->size, !!field->is_signed); |
598 | } | 587 | } |
588 | } | ||
589 | } | ||
599 | 590 | ||
600 | if (--common_field_count == 0) | 591 | static ssize_t |
601 | r = trace_seq_printf(s, "\n"); | 592 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, |
593 | loff_t *ppos) | ||
594 | { | ||
595 | struct ftrace_event_call *call = filp->private_data; | ||
596 | struct list_head *head; | ||
597 | struct trace_seq *s; | ||
598 | char *buf; | ||
599 | int r; | ||
602 | 600 | ||
603 | if (!r) | 601 | if (*ppos) |
604 | break; | 602 | return 0; |
605 | } | ||
606 | 603 | ||
607 | if (r) | 604 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
608 | r = trace_seq_printf(s, "\nprint fmt: %s\n", | 605 | if (!s) |
609 | call->print_fmt); | 606 | return -ENOMEM; |
607 | |||
608 | trace_seq_init(s); | ||
609 | |||
610 | trace_seq_printf(s, "name: %s\n", call->name); | ||
611 | trace_seq_printf(s, "ID: %d\n", call->event.type); | ||
612 | trace_seq_printf(s, "format:\n"); | ||
613 | |||
614 | /* print common fields */ | ||
615 | print_event_fields(s, &ftrace_common_fields); | ||
616 | |||
617 | trace_seq_putc(s, '\n'); | ||
618 | |||
619 | /* print event specific fields */ | ||
620 | head = trace_get_fields(call); | ||
621 | print_event_fields(s, head); | ||
622 | |||
623 | r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); | ||
610 | 624 | ||
611 | if (!r) { | 625 | if (!r) { |
612 | /* | 626 | /* |
@@ -980,9 +994,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
980 | */ | 994 | */ |
981 | head = trace_get_fields(call); | 995 | head = trace_get_fields(call); |
982 | if (list_empty(head)) { | 996 | if (list_empty(head)) { |
983 | ret = trace_define_common_fields(call); | 997 | ret = call->class->define_fields(call); |
984 | if (!ret) | ||
985 | ret = call->class->define_fields(call); | ||
986 | if (ret < 0) { | 998 | if (ret < 0) { |
987 | pr_warning("Could not initialize trace point" | 999 | pr_warning("Could not initialize trace point" |
988 | " events/%s\n", call->name); | 1000 | " events/%s\n", call->name); |
@@ -1319,6 +1331,9 @@ static __init int event_trace_init(void) | |||
1319 | trace_create_file("enable", 0644, d_events, | 1331 | trace_create_file("enable", 0644, d_events, |
1320 | NULL, &ftrace_system_enable_fops); | 1332 | NULL, &ftrace_system_enable_fops); |
1321 | 1333 | ||
1334 | if (trace_define_common_fields()) | ||
1335 | pr_warning("tracing: Failed to allocate common fields"); | ||
1336 | |||
1322 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { | 1337 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { |
1323 | /* The linker may leave blanks */ | 1338 | /* The linker may leave blanks */ |
1324 | if (!call->name) | 1339 | if (!call->name) |