diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 93 | ||||
-rw-r--r-- | kernel/trace/trace.h | 27 | ||||
-rw-r--r-- | kernel/trace/trace_functions_return.c | 38 | ||||
-rw-r--r-- | kernel/trace/trace_nop.c | 52 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 24 |
5 files changed, 213 insertions, 21 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5653c6b07ba1..4ee6f0375222 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -43,6 +43,20 @@ | |||
43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
44 | unsigned long __read_mostly tracing_thresh; | 44 | unsigned long __read_mostly tracing_thresh; |
45 | 45 | ||
46 | /* For tracers that don't implement custom flags */ | ||
47 | static struct tracer_opt dummy_tracer_opt[] = { | ||
48 | { } | ||
49 | }; | ||
50 | |||
51 | static struct tracer_flags dummy_tracer_flags = { | ||
52 | .val = 0, | ||
53 | .opts = dummy_tracer_opt | ||
54 | }; | ||
55 | |||
56 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | ||
57 | { | ||
58 | return 0; | ||
59 | } | ||
46 | 60 | ||
47 | /* | 61 | /* |
48 | * Kill all tracing for good (never come back). | 62 | * Kill all tracing for good (never come back). |
@@ -537,6 +551,14 @@ int register_tracer(struct tracer *type) | |||
537 | } | 551 | } |
538 | } | 552 | } |
539 | 553 | ||
554 | if (!type->set_flag) | ||
555 | type->set_flag = &dummy_set_flag; | ||
556 | if (!type->flags) | ||
557 | type->flags = &dummy_tracer_flags; | ||
558 | else | ||
559 | if (!type->flags->opts) | ||
560 | type->flags->opts = dummy_tracer_opt; | ||
561 | |||
540 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 562 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
541 | if (type->selftest) { | 563 | if (type->selftest) { |
542 | struct tracer *saved_tracer = current_trace; | 564 | struct tracer *saved_tracer = current_trace; |
@@ -840,6 +862,7 @@ static void __trace_function_return(struct trace_array *tr, | |||
840 | entry->parent_ip = trace->ret; | 862 | entry->parent_ip = trace->ret; |
841 | entry->rettime = trace->rettime; | 863 | entry->rettime = trace->rettime; |
842 | entry->calltime = trace->calltime; | 864 | entry->calltime = trace->calltime; |
865 | entry->overrun = trace->overrun; | ||
843 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 866 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); |
844 | } | 867 | } |
845 | #endif | 868 | #endif |
@@ -2436,10 +2459,13 @@ static ssize_t | |||
2436 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | 2459 | tracing_trace_options_read(struct file *filp, char __user *ubuf, |
2437 | size_t cnt, loff_t *ppos) | 2460 | size_t cnt, loff_t *ppos) |
2438 | { | 2461 | { |
2462 | int i; | ||
2439 | char *buf; | 2463 | char *buf; |
2440 | int r = 0; | 2464 | int r = 0; |
2441 | int len = 0; | 2465 | int len = 0; |
2442 | int i; | 2466 | u32 tracer_flags = current_trace->flags->val; |
2467 | struct tracer_opt *trace_opts = current_trace->flags->opts; | ||
2468 | |||
2443 | 2469 | ||
2444 | /* calulate max size */ | 2470 | /* calulate max size */ |
2445 | for (i = 0; trace_options[i]; i++) { | 2471 | for (i = 0; trace_options[i]; i++) { |
@@ -2447,6 +2473,15 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf, | |||
2447 | len += 3; /* "no" and space */ | 2473 | len += 3; /* "no" and space */ |
2448 | } | 2474 | } |
2449 | 2475 | ||
2476 | /* | ||
2477 | * Increase the size with names of options specific | ||
2478 | * of the current tracer. | ||
2479 | */ | ||
2480 | for (i = 0; trace_opts[i].name; i++) { | ||
2481 | len += strlen(trace_opts[i].name); | ||
2482 | len += 3; /* "no" and space */ | ||
2483 | } | ||
2484 | |||
2450 | /* +2 for \n and \0 */ | 2485 | /* +2 for \n and \0 */ |
2451 | buf = kmalloc(len + 2, GFP_KERNEL); | 2486 | buf = kmalloc(len + 2, GFP_KERNEL); |
2452 | if (!buf) | 2487 | if (!buf) |
@@ -2459,6 +2494,15 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf, | |||
2459 | r += sprintf(buf + r, "no%s ", trace_options[i]); | 2494 | r += sprintf(buf + r, "no%s ", trace_options[i]); |
2460 | } | 2495 | } |
2461 | 2496 | ||
2497 | for (i = 0; trace_opts[i].name; i++) { | ||
2498 | if (tracer_flags & trace_opts[i].bit) | ||
2499 | r += sprintf(buf + r, "%s ", | ||
2500 | trace_opts[i].name); | ||
2501 | else | ||
2502 | r += sprintf(buf + r, "no%s ", | ||
2503 | trace_opts[i].name); | ||
2504 | } | ||
2505 | |||
2462 | r += sprintf(buf + r, "\n"); | 2506 | r += sprintf(buf + r, "\n"); |
2463 | WARN_ON(r >= len + 2); | 2507 | WARN_ON(r >= len + 2); |
2464 | 2508 | ||
@@ -2469,6 +2513,40 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf, | |||
2469 | return r; | 2513 | return r; |
2470 | } | 2514 | } |
2471 | 2515 | ||
2516 | /* Try to assign a tracer specific option */ | ||
2517 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | ||
2518 | { | ||
2519 | struct tracer_flags *trace_flags = trace->flags; | ||
2520 | struct tracer_opt *opts = NULL; | ||
2521 | int ret = 0, i = 0; | ||
2522 | int len; | ||
2523 | |||
2524 | for (i = 0; trace_flags->opts[i].name; i++) { | ||
2525 | opts = &trace_flags->opts[i]; | ||
2526 | len = strlen(opts->name); | ||
2527 | |||
2528 | if (strncmp(cmp, opts->name, len) == 0) { | ||
2529 | ret = trace->set_flag(trace_flags->val, | ||
2530 | opts->bit, !neg); | ||
2531 | break; | ||
2532 | } | ||
2533 | } | ||
2534 | /* Not found */ | ||
2535 | if (!trace_flags->opts[i].name) | ||
2536 | return -EINVAL; | ||
2537 | |||
2538 | /* Refused to handle */ | ||
2539 | if (ret) | ||
2540 | return ret; | ||
2541 | |||
2542 | if (neg) | ||
2543 | trace_flags->val &= ~opts->bit; | ||
2544 | else | ||
2545 | trace_flags->val |= opts->bit; | ||
2546 | |||
2547 | return 0; | ||
2548 | } | ||
2549 | |||
2472 | static ssize_t | 2550 | static ssize_t |
2473 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 2551 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
2474 | size_t cnt, loff_t *ppos) | 2552 | size_t cnt, loff_t *ppos) |
@@ -2476,6 +2554,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2476 | char buf[64]; | 2554 | char buf[64]; |
2477 | char *cmp = buf; | 2555 | char *cmp = buf; |
2478 | int neg = 0; | 2556 | int neg = 0; |
2557 | int ret; | ||
2479 | int i; | 2558 | int i; |
2480 | 2559 | ||
2481 | if (cnt >= sizeof(buf)) | 2560 | if (cnt >= sizeof(buf)) |
@@ -2502,11 +2581,13 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2502 | break; | 2581 | break; |
2503 | } | 2582 | } |
2504 | } | 2583 | } |
2505 | /* | 2584 | |
2506 | * If no option could be set, return an error: | 2585 | /* If no option could be set, test the specific tracer options */ |
2507 | */ | 2586 | if (!trace_options[i]) { |
2508 | if (!trace_options[i]) | 2587 | ret = set_tracer_option(current_trace, cmp, neg); |
2509 | return -EINVAL; | 2588 | if (ret) |
2589 | return ret; | ||
2590 | } | ||
2510 | 2591 | ||
2511 | filp->f_pos += cnt; | 2592 | filp->f_pos += cnt; |
2512 | 2593 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 37947f6b92bf..2cb12fd98f6b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -60,6 +60,7 @@ struct ftrace_ret_entry { | |||
60 | unsigned long parent_ip; | 60 | unsigned long parent_ip; |
61 | unsigned long long calltime; | 61 | unsigned long long calltime; |
62 | unsigned long long rettime; | 62 | unsigned long long rettime; |
63 | unsigned long overrun; | ||
63 | }; | 64 | }; |
64 | extern struct tracer boot_tracer; | 65 | extern struct tracer boot_tracer; |
65 | 66 | ||
@@ -259,6 +260,29 @@ enum print_line_t { | |||
259 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ | 260 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ |
260 | }; | 261 | }; |
261 | 262 | ||
263 | |||
264 | /* | ||
265 | * An option specific to a tracer. This is a boolean value. | ||
266 | * The bit is the bit index that sets its value on the | ||
267 | * flags value in struct tracer_flags. | ||
268 | */ | ||
269 | struct tracer_opt { | ||
270 | const char *name; /* Will appear on the trace_options file */ | ||
271 | u32 bit; /* Mask assigned in val field in tracer_flags */ | ||
272 | }; | ||
273 | |||
274 | /* | ||
275 | * The set of specific options for a tracer. Your tracer | ||
276 | * have to set the initial value of the flags val. | ||
277 | */ | ||
278 | struct tracer_flags { | ||
279 | u32 val; | ||
280 | struct tracer_opt *opts; | ||
281 | }; | ||
282 | |||
283 | /* Makes more easy to define a tracer opt */ | ||
284 | #define TRACER_OPT(s, b) .name = #s, .bit = b | ||
285 | |||
262 | /* | 286 | /* |
263 | * A specific tracer, represented by methods that operate on a trace array: | 287 | * A specific tracer, represented by methods that operate on a trace array: |
264 | */ | 288 | */ |
@@ -280,8 +304,11 @@ struct tracer { | |||
280 | struct trace_array *tr); | 304 | struct trace_array *tr); |
281 | #endif | 305 | #endif |
282 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 306 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
307 | /* If you handled the flag setting, return 0 */ | ||
308 | int (*set_flag)(u32 old_flags, u32 bit, int set); | ||
283 | struct tracer *next; | 309 | struct tracer *next; |
284 | int print_max; | 310 | int print_max; |
311 | struct tracer_flags *flags; | ||
285 | }; | 312 | }; |
286 | 313 | ||
287 | struct trace_seq { | 314 | struct trace_seq { |
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c index a68564af022b..e00d64509c9c 100644 --- a/kernel/trace/trace_functions_return.c +++ b/kernel/trace/trace_functions_return.c | |||
@@ -14,6 +14,19 @@ | |||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | 15 | ||
16 | 16 | ||
17 | #define TRACE_RETURN_PRINT_OVERRUN 0x1 | ||
18 | static struct tracer_opt trace_opts[] = { | ||
19 | /* Display overruns or not */ | ||
20 | { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) }, | ||
21 | { } /* Empty entry */ | ||
22 | }; | ||
23 | |||
24 | static struct tracer_flags tracer_flags = { | ||
25 | .val = 0, /* Don't display overruns by default */ | ||
26 | .opts = trace_opts | ||
27 | }; | ||
28 | |||
29 | |||
17 | static int return_trace_init(struct trace_array *tr) | 30 | static int return_trace_init(struct trace_array *tr) |
18 | { | 31 | { |
19 | int cpu; | 32 | int cpu; |
@@ -42,26 +55,39 @@ print_return_function(struct trace_iterator *iter) | |||
42 | ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip); | 55 | ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip); |
43 | if (!ret) | 56 | if (!ret) |
44 | return TRACE_TYPE_PARTIAL_LINE; | 57 | return TRACE_TYPE_PARTIAL_LINE; |
58 | |||
45 | ret = seq_print_ip_sym(s, field->ip, | 59 | ret = seq_print_ip_sym(s, field->ip, |
46 | trace_flags & TRACE_ITER_SYM_MASK); | 60 | trace_flags & TRACE_ITER_SYM_MASK); |
47 | if (!ret) | 61 | if (!ret) |
48 | return TRACE_TYPE_PARTIAL_LINE; | 62 | return TRACE_TYPE_PARTIAL_LINE; |
49 | ret = trace_seq_printf(s, " (%llu ns)\n", | 63 | |
64 | ret = trace_seq_printf(s, " (%llu ns)", | ||
50 | field->rettime - field->calltime); | 65 | field->rettime - field->calltime); |
51 | if (!ret) | 66 | if (!ret) |
52 | return TRACE_TYPE_PARTIAL_LINE; | 67 | return TRACE_TYPE_PARTIAL_LINE; |
53 | else | 68 | |
54 | return TRACE_TYPE_HANDLED; | 69 | if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) { |
70 | ret = trace_seq_printf(s, " (Overruns: %lu)", | ||
71 | field->overrun); | ||
72 | if (!ret) | ||
73 | return TRACE_TYPE_PARTIAL_LINE; | ||
74 | } | ||
75 | |||
76 | ret = trace_seq_printf(s, "\n"); | ||
77 | if (!ret) | ||
78 | return TRACE_TYPE_PARTIAL_LINE; | ||
79 | |||
80 | return TRACE_TYPE_HANDLED; | ||
55 | } | 81 | } |
56 | return TRACE_TYPE_UNHANDLED; | 82 | return TRACE_TYPE_UNHANDLED; |
57 | } | 83 | } |
58 | 84 | ||
59 | static struct tracer return_trace __read_mostly = | 85 | static struct tracer return_trace __read_mostly = { |
60 | { | ||
61 | .name = "return", | 86 | .name = "return", |
62 | .init = return_trace_init, | 87 | .init = return_trace_init, |
63 | .reset = return_trace_reset, | 88 | .reset = return_trace_reset, |
64 | .print_line = print_return_function | 89 | .print_line = print_return_function, |
90 | .flags = &tracer_flags, | ||
65 | }; | 91 | }; |
66 | 92 | ||
67 | static __init int init_return_trace(void) | 93 | static __init int init_return_trace(void) |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 0e77415caed3..b9767acd30ac 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -12,6 +12,27 @@ | |||
12 | 12 | ||
13 | #include "trace.h" | 13 | #include "trace.h" |
14 | 14 | ||
15 | /* Our two options */ | ||
16 | enum { | ||
17 | TRACE_NOP_OPT_ACCEPT = 0x1, | ||
18 | TRACE_NOP_OPT_REFUSE = 0x2 | ||
19 | }; | ||
20 | |||
21 | /* Options for the tracer (see trace_options file) */ | ||
22 | static struct tracer_opt nop_opts[] = { | ||
23 | /* Option that will be accepted by set_flag callback */ | ||
24 | { TRACER_OPT(test_nop_accept, TRACE_NOP_OPT_ACCEPT) }, | ||
25 | /* Option that will be refused by set_flag callback */ | ||
26 | { TRACER_OPT(test_nop_refuse, TRACE_NOP_OPT_REFUSE) }, | ||
27 | { } /* Always set a last empty entry */ | ||
28 | }; | ||
29 | |||
30 | static struct tracer_flags nop_flags = { | ||
31 | /* You can check your flags value here when you want. */ | ||
32 | .val = 0, /* By default: all flags disabled */ | ||
33 | .opts = nop_opts | ||
34 | }; | ||
35 | |||
15 | static struct trace_array *ctx_trace; | 36 | static struct trace_array *ctx_trace; |
16 | 37 | ||
17 | static void start_nop_trace(struct trace_array *tr) | 38 | static void start_nop_trace(struct trace_array *tr) |
@@ -41,6 +62,35 @@ static void nop_trace_reset(struct trace_array *tr) | |||
41 | stop_nop_trace(tr); | 62 | stop_nop_trace(tr); |
42 | } | 63 | } |
43 | 64 | ||
65 | /* It only serves as a signal handler and a callback to | ||
66 | * accept or refuse tthe setting of a flag. | ||
67 | * If you don't implement it, then the flag setting will be | ||
68 | * automatically accepted. | ||
69 | */ | ||
70 | static int nop_set_flag(u32 old_flags, u32 bit, int set) | ||
71 | { | ||
72 | /* | ||
73 | * Note that you don't need to update nop_flags.val yourself. | ||
74 | * The tracing Api will do it automatically if you return 0 | ||
75 | */ | ||
76 | if (bit == TRACE_NOP_OPT_ACCEPT) { | ||
77 | printk(KERN_DEBUG "nop_test_accept flag set to %d: we accept." | ||
78 | " Now cat trace_options to see the result\n", | ||
79 | set); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | if (bit == TRACE_NOP_OPT_REFUSE) { | ||
84 | printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse." | ||
85 | "Now cat trace_options to see the result\n", | ||
86 | set); | ||
87 | return -EINVAL; | ||
88 | } | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | |||
44 | struct tracer nop_trace __read_mostly = | 94 | struct tracer nop_trace __read_mostly = |
45 | { | 95 | { |
46 | .name = "nop", | 96 | .name = "nop", |
@@ -49,5 +99,7 @@ struct tracer nop_trace __read_mostly = | |||
49 | #ifdef CONFIG_FTRACE_SELFTEST | 99 | #ifdef CONFIG_FTRACE_SELFTEST |
50 | .selftest = trace_selftest_startup_nop, | 100 | .selftest = trace_selftest_startup_nop, |
51 | #endif | 101 | #endif |
102 | .flags = &nop_flags, | ||
103 | .set_flag = nop_set_flag | ||
52 | }; | 104 | }; |
53 | 105 | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index d39e8b7de6a2..fde3be15c642 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -180,11 +180,16 @@ static struct file_operations stack_max_size_fops = { | |||
180 | static void * | 180 | static void * |
181 | t_next(struct seq_file *m, void *v, loff_t *pos) | 181 | t_next(struct seq_file *m, void *v, loff_t *pos) |
182 | { | 182 | { |
183 | long i = (long)m->private; | 183 | long i; |
184 | 184 | ||
185 | (*pos)++; | 185 | (*pos)++; |
186 | 186 | ||
187 | i++; | 187 | if (v == SEQ_START_TOKEN) |
188 | i = 0; | ||
189 | else { | ||
190 | i = *(long *)v; | ||
191 | i++; | ||
192 | } | ||
188 | 193 | ||
189 | if (i >= max_stack_trace.nr_entries || | 194 | if (i >= max_stack_trace.nr_entries || |
190 | stack_dump_trace[i] == ULONG_MAX) | 195 | stack_dump_trace[i] == ULONG_MAX) |
@@ -197,12 +202,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
197 | 202 | ||
198 | static void *t_start(struct seq_file *m, loff_t *pos) | 203 | static void *t_start(struct seq_file *m, loff_t *pos) |
199 | { | 204 | { |
200 | void *t = &m->private; | 205 | void *t = SEQ_START_TOKEN; |
201 | loff_t l = 0; | 206 | loff_t l = 0; |
202 | 207 | ||
203 | local_irq_disable(); | 208 | local_irq_disable(); |
204 | __raw_spin_lock(&max_stack_lock); | 209 | __raw_spin_lock(&max_stack_lock); |
205 | 210 | ||
211 | if (*pos == 0) | ||
212 | return SEQ_START_TOKEN; | ||
213 | |||
206 | for (; t && l < *pos; t = t_next(m, t, &l)) | 214 | for (; t && l < *pos; t = t_next(m, t, &l)) |
207 | ; | 215 | ; |
208 | 216 | ||
@@ -231,10 +239,10 @@ static int trace_lookup_stack(struct seq_file *m, long i) | |||
231 | 239 | ||
232 | static int t_show(struct seq_file *m, void *v) | 240 | static int t_show(struct seq_file *m, void *v) |
233 | { | 241 | { |
234 | long i = *(long *)v; | 242 | long i; |
235 | int size; | 243 | int size; |
236 | 244 | ||
237 | if (i < 0) { | 245 | if (v == SEQ_START_TOKEN) { |
238 | seq_printf(m, " Depth Size Location" | 246 | seq_printf(m, " Depth Size Location" |
239 | " (%d entries)\n" | 247 | " (%d entries)\n" |
240 | " ----- ---- --------\n", | 248 | " ----- ---- --------\n", |
@@ -242,6 +250,8 @@ static int t_show(struct seq_file *m, void *v) | |||
242 | return 0; | 250 | return 0; |
243 | } | 251 | } |
244 | 252 | ||
253 | i = *(long *)v; | ||
254 | |||
245 | if (i >= max_stack_trace.nr_entries || | 255 | if (i >= max_stack_trace.nr_entries || |
246 | stack_dump_trace[i] == ULONG_MAX) | 256 | stack_dump_trace[i] == ULONG_MAX) |
247 | return 0; | 257 | return 0; |
@@ -271,10 +281,6 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
271 | int ret; | 281 | int ret; |
272 | 282 | ||
273 | ret = seq_open(file, &stack_trace_seq_ops); | 283 | ret = seq_open(file, &stack_trace_seq_ops); |
274 | if (!ret) { | ||
275 | struct seq_file *m = file->private_data; | ||
276 | m->private = (void *)-1; | ||
277 | } | ||
278 | 284 | ||
279 | return ret; | 285 | return ret; |
280 | } | 286 | } |