diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-10 17:55:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-10 17:55:31 -0400 |
commit | e2b8b2808538a91444e78c7db5a30519cadd09b2 (patch) | |
tree | 60c45581817db746bd61670f5c95b19c2661daa0 /kernel | |
parent | 4dd163a0512eb91bbcf4e66d2f65b8e4042561b3 (diff) | |
parent | ef18012b248b47ec9a12c3a83ca5e99782d39c5d (diff) |
Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/blktrace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 45 | ||||
-rw-r--r-- | kernel/trace/trace_events_stage_1.h | 8 | ||||
-rw-r--r-- | kernel/trace/trace_events_stage_2.h | 13 | ||||
-rw-r--r-- | kernel/trace/trace_events_stage_3.h | 98 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_workqueue.c | 6 |
11 files changed, 116 insertions, 90 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index d24a10b8411a..1f32e4edf490 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -33,7 +33,7 @@ static struct trace_array *blk_tr; | |||
33 | static int __read_mostly blk_tracer_enabled; | 33 | static int __read_mostly blk_tracer_enabled; |
34 | 34 | ||
35 | /* Select an alternative, minimalistic output than the original one */ | 35 | /* Select an alternative, minimalistic output than the original one */ |
36 | #define TRACE_BLK_OPT_CLASSIC 0x1 | 36 | #define TRACE_BLK_OPT_CLASSIC 0x1 |
37 | 37 | ||
38 | static struct tracer_opt blk_tracer_opts[] = { | 38 | static struct tracer_opt blk_tracer_opts[] = { |
39 | /* Default disable the minimalistic output */ | 39 | /* Default disable the minimalistic output */ |
@@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(blk_trace_startstop); | |||
564 | /** | 564 | /** |
565 | * blk_trace_ioctl: - handle the ioctls associated with tracing | 565 | * blk_trace_ioctl: - handle the ioctls associated with tracing |
566 | * @bdev: the block device | 566 | * @bdev: the block device |
567 | * @cmd: the ioctl cmd | 567 | * @cmd: the ioctl cmd |
568 | * @arg: the argument data, if any | 568 | * @arg: the argument data, if any |
569 | * | 569 | * |
570 | **/ | 570 | **/ |
@@ -1128,9 +1128,9 @@ static void blk_tracer_reset(struct trace_array *tr) | |||
1128 | 1128 | ||
1129 | static struct { | 1129 | static struct { |
1130 | const char *act[2]; | 1130 | const char *act[2]; |
1131 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); | 1131 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); |
1132 | } what2act[] __read_mostly = { | 1132 | } what2act[] __read_mostly = { |
1133 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, | 1133 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, |
1134 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, | 1134 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, |
1135 | [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, | 1135 | [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, |
1136 | [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, | 1136 | [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, |
@@ -1229,7 +1229,7 @@ static struct tracer blk_tracer __read_mostly = { | |||
1229 | }; | 1229 | }; |
1230 | 1230 | ||
1231 | static struct trace_event trace_blk_event = { | 1231 | static struct trace_event trace_blk_event = { |
1232 | .type = TRACE_BLK, | 1232 | .type = TRACE_BLK, |
1233 | .trace = blk_trace_event_print, | 1233 | .trace = blk_trace_event_print, |
1234 | .binary = blk_trace_event_print_binary, | 1234 | .binary = blk_trace_event_print_binary, |
1235 | }; | 1235 | }; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e5b56199e5e0..a941d257b619 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -799,7 +799,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
799 | 799 | ||
800 | entry->preempt_count = pc & 0xff; | 800 | entry->preempt_count = pc & 0xff; |
801 | entry->pid = (tsk) ? tsk->pid : 0; | 801 | entry->pid = (tsk) ? tsk->pid : 0; |
802 | entry->tgid = (tsk) ? tsk->tgid : 0; | 802 | entry->tgid = (tsk) ? tsk->tgid : 0; |
803 | entry->flags = | 803 | entry->flags = |
804 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 804 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
805 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 805 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index aaa0755268b9..ad8c22efff41 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -157,7 +157,7 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, | |||
157 | 157 | ||
158 | 158 | ||
159 | static struct trace_event trace_branch_event = { | 159 | static struct trace_event trace_branch_event = { |
160 | .type = TRACE_BRANCH, | 160 | .type = TRACE_BRANCH, |
161 | .trace = trace_branch_print, | 161 | .trace = trace_branch_print, |
162 | }; | 162 | }; |
163 | 163 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1880a6438097..769dfd00fc85 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -102,7 +102,7 @@ static int ftrace_set_clr_event(char *buf, int set) | |||
102 | mutex_lock(&event_mutex); | 102 | mutex_lock(&event_mutex); |
103 | events_for_each(call) { | 103 | events_for_each(call) { |
104 | 104 | ||
105 | if (!call->name) | 105 | if (!call->name || !call->regfunc) |
106 | continue; | 106 | continue; |
107 | 107 | ||
108 | if (match && | 108 | if (match && |
@@ -207,8 +207,20 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
207 | 207 | ||
208 | (*pos)++; | 208 | (*pos)++; |
209 | 209 | ||
210 | if ((unsigned long)call >= (unsigned long)__stop_ftrace_events) | 210 | for (;;) { |
211 | return NULL; | 211 | if ((unsigned long)call >= (unsigned long)__stop_ftrace_events) |
212 | return NULL; | ||
213 | |||
214 | /* | ||
215 | * The ftrace subsystem is for showing formats only. | ||
216 | * They can not be enabled or disabled via the event files. | ||
217 | */ | ||
218 | if (call->regfunc) | ||
219 | break; | ||
220 | |||
221 | call++; | ||
222 | next = call; | ||
223 | } | ||
212 | 224 | ||
213 | m->private = ++next; | 225 | m->private = ++next; |
214 | 226 | ||
@@ -338,8 +350,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
338 | 350 | ||
339 | #undef FIELD | 351 | #undef FIELD |
340 | #define FIELD(type, name) \ | 352 | #define FIELD(type, name) \ |
341 | #type, #name, (unsigned int)offsetof(typeof(field), name), \ | 353 | #type, #name, offsetof(typeof(field), name), sizeof(field.name) |
342 | (unsigned int)sizeof(field.name) | ||
343 | 354 | ||
344 | static int trace_write_header(struct trace_seq *s) | 355 | static int trace_write_header(struct trace_seq *s) |
345 | { | 356 | { |
@@ -347,11 +358,11 @@ static int trace_write_header(struct trace_seq *s) | |||
347 | 358 | ||
348 | /* struct trace_entry */ | 359 | /* struct trace_entry */ |
349 | return trace_seq_printf(s, | 360 | return trace_seq_printf(s, |
350 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 361 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
351 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 362 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
352 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 363 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
353 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 364 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
354 | "\tfield:%s %s;\toffset:%u;\tsize:%u;\n" | 365 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
355 | "\n", | 366 | "\n", |
356 | FIELD(unsigned char, type), | 367 | FIELD(unsigned char, type), |
357 | FIELD(unsigned char, flags), | 368 | FIELD(unsigned char, flags), |
@@ -417,6 +428,13 @@ static const struct seq_operations show_set_event_seq_ops = { | |||
417 | .stop = t_stop, | 428 | .stop = t_stop, |
418 | }; | 429 | }; |
419 | 430 | ||
431 | static const struct file_operations ftrace_avail_fops = { | ||
432 | .open = ftrace_event_seq_open, | ||
433 | .read = seq_read, | ||
434 | .llseek = seq_lseek, | ||
435 | .release = seq_release, | ||
436 | }; | ||
437 | |||
420 | static const struct file_operations ftrace_set_event_fops = { | 438 | static const struct file_operations ftrace_set_event_fops = { |
421 | .open = ftrace_event_seq_open, | 439 | .open = ftrace_event_seq_open, |
422 | .read = seq_read, | 440 | .read = seq_read, |
@@ -558,6 +576,13 @@ static __init int event_trace_init(void) | |||
558 | if (!d_tracer) | 576 | if (!d_tracer) |
559 | return 0; | 577 | return 0; |
560 | 578 | ||
579 | entry = debugfs_create_file("available_events", 0444, d_tracer, | ||
580 | (void *)&show_event_seq_ops, | ||
581 | &ftrace_avail_fops); | ||
582 | if (!entry) | ||
583 | pr_warning("Could not create debugfs " | ||
584 | "'available_events' entry\n"); | ||
585 | |||
561 | entry = debugfs_create_file("set_event", 0644, d_tracer, | 586 | entry = debugfs_create_file("set_event", 0644, d_tracer, |
562 | (void *)&show_set_event_seq_ops, | 587 | (void *)&show_set_event_seq_ops, |
563 | &ftrace_set_event_fops); | 588 | &ftrace_set_event_fops); |
diff --git a/kernel/trace/trace_events_stage_1.h b/kernel/trace/trace_events_stage_1.h index 15e9bf965a18..38985f9b379c 100644 --- a/kernel/trace/trace_events_stage_1.h +++ b/kernel/trace/trace_events_stage_1.h | |||
@@ -6,11 +6,13 @@ | |||
6 | * struct ftrace_raw_<call> { | 6 | * struct ftrace_raw_<call> { |
7 | * struct trace_entry ent; | 7 | * struct trace_entry ent; |
8 | * <type> <item>; | 8 | * <type> <item>; |
9 | * <type2> <item2>[<len>]; | ||
9 | * [...] | 10 | * [...] |
10 | * }; | 11 | * }; |
11 | * | 12 | * |
12 | * The <type> <item> is created by the TRACE_FIELD(type, item, assign) | 13 | * The <type> <item> is created by the __field(type, item) macro or |
13 | * macro. We simply do "type item;", and that will create the fields | 14 | * the __array(type2, item2, len) macro. |
15 | * We simply do "type item;", and that will create the fields | ||
14 | * in the structure. | 16 | * in the structure. |
15 | */ | 17 | */ |
16 | 18 | ||
@@ -27,7 +29,7 @@ | |||
27 | #define TP_STRUCT__entry(args...) args | 29 | #define TP_STRUCT__entry(args...) args |
28 | 30 | ||
29 | #undef TRACE_EVENT | 31 | #undef TRACE_EVENT |
30 | #define TRACE_EVENT(name, proto, args, tstruct, print, assign) \ | 32 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ |
31 | struct ftrace_raw_##name { \ | 33 | struct ftrace_raw_##name { \ |
32 | struct trace_entry ent; \ | 34 | struct trace_entry ent; \ |
33 | tstruct \ | 35 | tstruct \ |
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h index d91bf4c56661..ca347afd6aa0 100644 --- a/kernel/trace/trace_events_stage_2.h +++ b/kernel/trace/trace_events_stage_2.h | |||
@@ -20,7 +20,7 @@ | |||
20 | * | 20 | * |
21 | * field = (typeof(field))entry; | 21 | * field = (typeof(field))entry; |
22 | * | 22 | * |
23 | * ret = trace_seq_printf(s, <TP_RAW_FMT> "%s", <ARGS> "\n"); | 23 | * ret = trace_seq_printf(s, <TP_printk> "\n"); |
24 | * if (!ret) | 24 | * if (!ret) |
25 | * return TRACE_TYPE_PARTIAL_LINE; | 25 | * return TRACE_TYPE_PARTIAL_LINE; |
26 | * | 26 | * |
@@ -39,7 +39,7 @@ | |||
39 | #define TP_printk(fmt, args...) fmt "\n", args | 39 | #define TP_printk(fmt, args...) fmt "\n", args |
40 | 40 | ||
41 | #undef TRACE_EVENT | 41 | #undef TRACE_EVENT |
42 | #define TRACE_EVENT(call, proto, args, tstruct, print, assign) \ | 42 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
43 | enum print_line_t \ | 43 | enum print_line_t \ |
44 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | 44 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ |
45 | { \ | 45 | { \ |
@@ -76,10 +76,9 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
76 | * int ret; | 76 | * int ret; |
77 | * | 77 | * |
78 | * ret = trace_seq_printf(s, #type " " #item ";" | 78 | * ret = trace_seq_printf(s, #type " " #item ";" |
79 | * " size:%d; offset:%d;\n", | 79 | * " offset:%u; size:%u;\n", |
80 | * sizeof(field.type), | 80 | * offsetof(struct ftrace_raw_##call, item), |
81 | * offsetof(struct ftrace_raw_##call, | 81 | * sizeof(field.type)); |
82 | * item)); | ||
83 | * | 82 | * |
84 | * } | 83 | * } |
85 | */ | 84 | */ |
@@ -115,7 +114,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
115 | #define TP_fast_assign(args...) args | 114 | #define TP_fast_assign(args...) args |
116 | 115 | ||
117 | #undef TRACE_EVENT | 116 | #undef TRACE_EVENT |
118 | #define TRACE_EVENT(call, proto, args, tstruct, print, func) \ | 117 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
119 | static int \ | 118 | static int \ |
120 | ftrace_format_##call(struct trace_seq *s) \ | 119 | ftrace_format_##call(struct trace_seq *s) \ |
121 | { \ | 120 | { \ |
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h index 3ba55d4ab073..ae2e323df0c7 100644 --- a/kernel/trace/trace_events_stage_3.h +++ b/kernel/trace/trace_events_stage_3.h | |||
@@ -5,23 +5,23 @@ | |||
5 | * | 5 | * |
6 | * static void ftrace_event_<call>(proto) | 6 | * static void ftrace_event_<call>(proto) |
7 | * { | 7 | * { |
8 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | 8 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); |
9 | * } | 9 | * } |
10 | * | 10 | * |
11 | * static int ftrace_reg_event_<call>(void) | 11 | * static int ftrace_reg_event_<call>(void) |
12 | * { | 12 | * { |
13 | * int ret; | 13 | * int ret; |
14 | * | 14 | * |
15 | * ret = register_trace_<call>(ftrace_event_<call>); | 15 | * ret = register_trace_<call>(ftrace_event_<call>); |
16 | * if (!ret) | 16 | * if (!ret) |
17 | * pr_info("event trace: Could not activate trace point " | 17 | * pr_info("event trace: Could not activate trace point " |
18 | * "probe to <call>"); | 18 | * "probe to <call>"); |
19 | * return ret; | 19 | * return ret; |
20 | * } | 20 | * } |
21 | * | 21 | * |
22 | * static void ftrace_unreg_event_<call>(void) | 22 | * static void ftrace_unreg_event_<call>(void) |
23 | * { | 23 | * { |
24 | * unregister_trace_<call>(ftrace_event_<call>); | 24 | * unregister_trace_<call>(ftrace_event_<call>); |
25 | * } | 25 | * } |
26 | * | 26 | * |
27 | * For those macros defined with TRACE_FORMAT: | 27 | * For those macros defined with TRACE_FORMAT: |
@@ -29,9 +29,9 @@ | |||
29 | * static struct ftrace_event_call __used | 29 | * static struct ftrace_event_call __used |
30 | * __attribute__((__aligned__(4))) | 30 | * __attribute__((__aligned__(4))) |
31 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 31 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
32 | * .name = "<call>", | 32 | * .name = "<call>", |
33 | * .regfunc = ftrace_reg_event_<call>, | 33 | * .regfunc = ftrace_reg_event_<call>, |
34 | * .unregfunc = ftrace_unreg_event_<call>, | 34 | * .unregfunc = ftrace_unreg_event_<call>, |
35 | * } | 35 | * } |
36 | * | 36 | * |
37 | * | 37 | * |
@@ -41,66 +41,66 @@ | |||
41 | * | 41 | * |
42 | * static void ftrace_raw_event_<call>(proto) | 42 | * static void ftrace_raw_event_<call>(proto) |
43 | * { | 43 | * { |
44 | * struct ring_buffer_event *event; | 44 | * struct ring_buffer_event *event; |
45 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 45 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
46 | * unsigned long irq_flags; | 46 | * unsigned long irq_flags; |
47 | * int pc; | 47 | * int pc; |
48 | * | 48 | * |
49 | * local_save_flags(irq_flags); | 49 | * local_save_flags(irq_flags); |
50 | * pc = preempt_count(); | 50 | * pc = preempt_count(); |
51 | * | 51 | * |
52 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | 52 | * event = trace_current_buffer_lock_reserve(event_<call>.id, |
53 | * sizeof(struct ftrace_raw_<call>), | 53 | * sizeof(struct ftrace_raw_<call>), |
54 | * irq_flags, pc); | 54 | * irq_flags, pc); |
55 | * if (!event) | 55 | * if (!event) |
56 | * return; | 56 | * return; |
57 | * entry = ring_buffer_event_data(event); | 57 | * entry = ring_buffer_event_data(event); |
58 | * | 58 | * |
59 | * <tstruct>; <-- Here we assign the entries by the TRACE_FIELD. | 59 | * <assign>; <-- Here we assign the entries by the __field and |
60 | * __array macros. | ||
60 | * | 61 | * |
61 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | 62 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); |
62 | * } | 63 | * } |
63 | * | 64 | * |
64 | * static int ftrace_raw_reg_event_<call>(void) | 65 | * static int ftrace_raw_reg_event_<call>(void) |
65 | * { | 66 | * { |
66 | * int ret; | 67 | * int ret; |
67 | * | 68 | * |
68 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | 69 | * ret = register_trace_<call>(ftrace_raw_event_<call>); |
69 | * if (!ret) | 70 | * if (!ret) |
70 | * pr_info("event trace: Could not activate trace point " | 71 | * pr_info("event trace: Could not activate trace point " |
71 | * "probe to <call>"); | 72 | * "probe to <call>"); |
72 | * return ret; | 73 | * return ret; |
73 | * } | 74 | * } |
74 | * | 75 | * |
75 | * static void ftrace_unreg_event_<call>(void) | 76 | * static void ftrace_unreg_event_<call>(void) |
76 | * { | 77 | * { |
77 | * unregister_trace_<call>(ftrace_raw_event_<call>); | 78 | * unregister_trace_<call>(ftrace_raw_event_<call>); |
78 | * } | 79 | * } |
79 | * | 80 | * |
80 | * static struct trace_event ftrace_event_type_<call> = { | 81 | * static struct trace_event ftrace_event_type_<call> = { |
81 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 82 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
82 | * }; | 83 | * }; |
83 | * | 84 | * |
84 | * static int ftrace_raw_init_event_<call>(void) | 85 | * static int ftrace_raw_init_event_<call>(void) |
85 | * { | 86 | * { |
86 | * int id; | 87 | * int id; |
87 | * | 88 | * |
88 | * id = register_ftrace_event(&ftrace_event_type_<call>); | 89 | * id = register_ftrace_event(&ftrace_event_type_<call>); |
89 | * if (!id) | 90 | * if (!id) |
90 | * return -ENODEV; | 91 | * return -ENODEV; |
91 | * event_<call>.id = id; | 92 | * event_<call>.id = id; |
92 | * return 0; | 93 | * return 0; |
93 | * } | 94 | * } |
94 | * | 95 | * |
95 | * static struct ftrace_event_call __used | 96 | * static struct ftrace_event_call __used |
96 | * __attribute__((__aligned__(4))) | 97 | * __attribute__((__aligned__(4))) |
97 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 98 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
98 | * .name = "<call>", | 99 | * .name = "<call>", |
99 | * .regfunc = ftrace_reg_event_<call>, | 100 | * .system = "<system>", |
100 | * .unregfunc = ftrace_unreg_event_<call>, | 101 | * .raw_init = ftrace_raw_init_event_<call>, |
101 | * .raw_init = ftrace_raw_init_event_<call>, | 102 | * .regfunc = ftrace_reg_event_<call>, |
102 | * .raw_reg = ftrace_raw_reg_event_<call>, | 103 | * .unregfunc = ftrace_unreg_event_<call>, |
103 | * .raw_unreg = ftrace_raw_unreg_event_<call>, | ||
104 | * .show_format = ftrace_format_<call>, | 104 | * .show_format = ftrace_format_<call>, |
105 | * } | 105 | * } |
106 | * | 106 | * |
@@ -138,7 +138,7 @@ _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ | |||
138 | static struct ftrace_event_call __used \ | 138 | static struct ftrace_event_call __used \ |
139 | __attribute__((__aligned__(4))) \ | 139 | __attribute__((__aligned__(4))) \ |
140 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 140 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
141 | .name = #call, \ | 141 | .name = #call, \ |
142 | .system = __stringify(TRACE_SYSTEM), \ | 142 | .system = __stringify(TRACE_SYSTEM), \ |
143 | .regfunc = ftrace_reg_event_##call, \ | 143 | .regfunc = ftrace_reg_event_##call, \ |
144 | .unregfunc = ftrace_unreg_event_##call, \ | 144 | .unregfunc = ftrace_unreg_event_##call, \ |
@@ -148,7 +148,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
148 | #define __entry entry | 148 | #define __entry entry |
149 | 149 | ||
150 | #undef TRACE_EVENT | 150 | #undef TRACE_EVENT |
151 | #define TRACE_EVENT(call, proto, args, tstruct, print, assign) \ | 151 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
152 | \ | 152 | \ |
153 | static struct ftrace_event_call event_##call; \ | 153 | static struct ftrace_event_call event_##call; \ |
154 | \ | 154 | \ |
@@ -163,7 +163,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
163 | pc = preempt_count(); \ | 163 | pc = preempt_count(); \ |
164 | \ | 164 | \ |
165 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | 165 | event = trace_current_buffer_lock_reserve(event_##call.id, \ |
166 | sizeof(struct ftrace_raw_##call), \ | 166 | sizeof(struct ftrace_raw_##call), \ |
167 | irq_flags, pc); \ | 167 | irq_flags, pc); \ |
168 | if (!event) \ | 168 | if (!event) \ |
169 | return; \ | 169 | return; \ |
@@ -208,7 +208,7 @@ static int ftrace_raw_init_event_##call(void) \ | |||
208 | static struct ftrace_event_call __used \ | 208 | static struct ftrace_event_call __used \ |
209 | __attribute__((__aligned__(4))) \ | 209 | __attribute__((__aligned__(4))) \ |
210 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 210 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
211 | .name = #call, \ | 211 | .name = #call, \ |
212 | .system = __stringify(TRACE_SYSTEM), \ | 212 | .system = __stringify(TRACE_SYSTEM), \ |
213 | .raw_init = ftrace_raw_init_event_##call, \ | 213 | .raw_init = ftrace_raw_init_event_##call, \ |
214 | .regfunc = ftrace_raw_reg_event_##call, \ | 214 | .regfunc = ftrace_raw_reg_event_##call, \ |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 23ae78430d58..4d9952d3df50 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -94,7 +94,7 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
94 | static struct ftrace_event_call __used \ | 94 | static struct ftrace_event_call __used \ |
95 | __attribute__((__aligned__(4))) \ | 95 | __attribute__((__aligned__(4))) \ |
96 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 96 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
97 | .name = #call, \ | 97 | .name = #call, \ |
98 | .id = proto, \ | 98 | .id = proto, \ |
99 | .system = __stringify(TRACE_SYSTEM), \ | 99 | .system = __stringify(TRACE_SYSTEM), \ |
100 | .show_format = ftrace_format_##call, \ | 100 | .show_format = ftrace_format_##call, \ |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 35257be6a9d6..8566c14b3e9a 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -841,12 +841,12 @@ static void graph_trace_close(struct trace_iterator *iter) | |||
841 | } | 841 | } |
842 | 842 | ||
843 | static struct tracer graph_trace __read_mostly = { | 843 | static struct tracer graph_trace __read_mostly = { |
844 | .name = "function_graph", | 844 | .name = "function_graph", |
845 | .open = graph_trace_open, | 845 | .open = graph_trace_open, |
846 | .close = graph_trace_close, | 846 | .close = graph_trace_close, |
847 | .wait_pipe = poll_wait_pipe, | 847 | .wait_pipe = poll_wait_pipe, |
848 | .init = graph_trace_init, | 848 | .init = graph_trace_init, |
849 | .reset = graph_trace_reset, | 849 | .reset = graph_trace_reset, |
850 | .print_line = print_graph_function, | 850 | .print_line = print_graph_function, |
851 | .print_header = print_graph_headers, | 851 | .print_header = print_graph_headers, |
852 | .flags = &tracer_flags, | 852 | .flags = &tracer_flags, |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ef8fd661b217..491832af9ba1 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -565,7 +565,7 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags) | |||
565 | } | 565 | } |
566 | 566 | ||
567 | static struct trace_event trace_fn_event = { | 567 | static struct trace_event trace_fn_event = { |
568 | .type = TRACE_FN, | 568 | .type = TRACE_FN, |
569 | .trace = trace_fn_trace, | 569 | .trace = trace_fn_trace, |
570 | .raw = trace_fn_raw, | 570 | .raw = trace_fn_raw, |
571 | .hex = trace_fn_hex, | 571 | .hex = trace_fn_hex, |
@@ -696,7 +696,7 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, | |||
696 | } | 696 | } |
697 | 697 | ||
698 | static struct trace_event trace_ctx_event = { | 698 | static struct trace_event trace_ctx_event = { |
699 | .type = TRACE_CTX, | 699 | .type = TRACE_CTX, |
700 | .trace = trace_ctx_print, | 700 | .trace = trace_ctx_print, |
701 | .raw = trace_ctx_raw, | 701 | .raw = trace_ctx_raw, |
702 | .hex = trace_ctx_hex, | 702 | .hex = trace_ctx_hex, |
@@ -704,7 +704,7 @@ static struct trace_event trace_ctx_event = { | |||
704 | }; | 704 | }; |
705 | 705 | ||
706 | static struct trace_event trace_wake_event = { | 706 | static struct trace_event trace_wake_event = { |
707 | .type = TRACE_WAKE, | 707 | .type = TRACE_WAKE, |
708 | .trace = trace_wake_print, | 708 | .trace = trace_wake_print, |
709 | .raw = trace_wake_raw, | 709 | .raw = trace_wake_raw, |
710 | .hex = trace_wake_hex, | 710 | .hex = trace_wake_hex, |
@@ -759,7 +759,7 @@ static enum print_line_t trace_special_bin(struct trace_iterator *iter, | |||
759 | } | 759 | } |
760 | 760 | ||
761 | static struct trace_event trace_special_event = { | 761 | static struct trace_event trace_special_event = { |
762 | .type = TRACE_SPECIAL, | 762 | .type = TRACE_SPECIAL, |
763 | .trace = trace_special_print, | 763 | .trace = trace_special_print, |
764 | .raw = trace_special_print, | 764 | .raw = trace_special_print, |
765 | .hex = trace_special_hex, | 765 | .hex = trace_special_hex, |
@@ -796,7 +796,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
796 | } | 796 | } |
797 | 797 | ||
798 | static struct trace_event trace_stack_event = { | 798 | static struct trace_event trace_stack_event = { |
799 | .type = TRACE_STACK, | 799 | .type = TRACE_STACK, |
800 | .trace = trace_stack_print, | 800 | .trace = trace_stack_print, |
801 | .raw = trace_special_print, | 801 | .raw = trace_special_print, |
802 | .hex = trace_special_hex, | 802 | .hex = trace_special_hex, |
@@ -825,7 +825,7 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | |||
825 | } | 825 | } |
826 | 826 | ||
827 | static struct trace_event trace_user_stack_event = { | 827 | static struct trace_event trace_user_stack_event = { |
828 | .type = TRACE_USER_STACK, | 828 | .type = TRACE_USER_STACK, |
829 | .trace = trace_user_stack_print, | 829 | .trace = trace_user_stack_print, |
830 | .raw = trace_special_print, | 830 | .raw = trace_special_print, |
831 | .hex = trace_special_hex, | 831 | .hex = trace_special_hex, |
@@ -879,7 +879,7 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) | |||
879 | 879 | ||
880 | 880 | ||
881 | static struct trace_event trace_print_event = { | 881 | static struct trace_event trace_print_event = { |
882 | .type = TRACE_PRINT, | 882 | .type = TRACE_PRINT, |
883 | .trace = trace_print_print, | 883 | .trace = trace_print_print, |
884 | .raw = trace_print_raw, | 884 | .raw = trace_print_raw, |
885 | }; | 885 | }; |
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 739fdacf873b..fb5ccac8bbc0 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c | |||
@@ -19,14 +19,14 @@ struct cpu_workqueue_stats { | |||
19 | /* Useful to know if we print the cpu headers */ | 19 | /* Useful to know if we print the cpu headers */ |
20 | bool first_entry; | 20 | bool first_entry; |
21 | int cpu; | 21 | int cpu; |
22 | pid_t pid; | 22 | pid_t pid; |
23 | /* Can be inserted from interrupt or user context, need to be atomic */ | 23 | /* Can be inserted from interrupt or user context, need to be atomic */ |
24 | atomic_t inserted; | 24 | atomic_t inserted; |
25 | /* | 25 | /* |
26 | * Don't need to be atomic, works are serialized in a single workqueue thread | 26 | * Don't need to be atomic, works are serialized in a single workqueue thread |
27 | * on a single CPU. | 27 | * on a single CPU. |
28 | */ | 28 | */ |
29 | unsigned int executed; | 29 | unsigned int executed; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | /* List of workqueue threads on one cpu */ | 32 | /* List of workqueue threads on one cpu */ |