diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 96 |
1 files changed, 71 insertions, 25 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9ec59f541156..ee9c921d7f21 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
22 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
23 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
@@ -42,8 +41,6 @@ | |||
42 | #include "trace.h" | 41 | #include "trace.h" |
43 | #include "trace_output.h" | 42 | #include "trace_output.h" |
44 | 43 | ||
45 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | ||
46 | |||
47 | /* | 44 | /* |
48 | * On boot up, the ring buffer is set to the minimum size, so that | 45 | * On boot up, the ring buffer is set to the minimum size, so that |
49 | * we do not waste memory on systems that are not using tracing. | 46 | * we do not waste memory on systems that are not using tracing. |
@@ -341,7 +338,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
341 | /* trace_flags holds trace_options default values */ | 338 | /* trace_flags holds trace_options default values */ |
342 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 339 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
343 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 340 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
344 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD; | 341 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; |
345 | 342 | ||
346 | static int trace_stop_count; | 343 | static int trace_stop_count; |
347 | static DEFINE_SPINLOCK(tracing_start_lock); | 344 | static DEFINE_SPINLOCK(tracing_start_lock); |
@@ -426,6 +423,7 @@ static const char *trace_options[] = { | |||
426 | "sleep-time", | 423 | "sleep-time", |
427 | "graph-time", | 424 | "graph-time", |
428 | "record-cmd", | 425 | "record-cmd", |
426 | "overwrite", | ||
429 | NULL | 427 | NULL |
430 | }; | 428 | }; |
431 | 429 | ||
@@ -781,6 +779,11 @@ __acquires(kernel_lock) | |||
781 | tracing_reset_online_cpus(tr); | 779 | tracing_reset_online_cpus(tr); |
782 | 780 | ||
783 | current_trace = type; | 781 | current_trace = type; |
782 | |||
783 | /* If we expanded the buffers, make sure the max is expanded too */ | ||
784 | if (ring_buffer_expanded && type->use_max_tr) | ||
785 | ring_buffer_resize(max_tr.buffer, trace_buf_size); | ||
786 | |||
784 | /* the test is responsible for initializing and enabling */ | 787 | /* the test is responsible for initializing and enabling */ |
785 | pr_info("Testing tracer %s: ", type->name); | 788 | pr_info("Testing tracer %s: ", type->name); |
786 | ret = type->selftest(type, tr); | 789 | ret = type->selftest(type, tr); |
@@ -793,6 +796,10 @@ __acquires(kernel_lock) | |||
793 | /* Only reset on passing, to avoid touching corrupted buffers */ | 796 | /* Only reset on passing, to avoid touching corrupted buffers */ |
794 | tracing_reset_online_cpus(tr); | 797 | tracing_reset_online_cpus(tr); |
795 | 798 | ||
799 | /* Shrink the max buffer again */ | ||
800 | if (ring_buffer_expanded && type->use_max_tr) | ||
801 | ring_buffer_resize(max_tr.buffer, 1); | ||
802 | |||
796 | printk(KERN_CONT "PASSED\n"); | 803 | printk(KERN_CONT "PASSED\n"); |
797 | } | 804 | } |
798 | #endif | 805 | #endif |
@@ -1103,7 +1110,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
1103 | 1110 | ||
1104 | entry->preempt_count = pc & 0xff; | 1111 | entry->preempt_count = pc & 0xff; |
1105 | entry->pid = (tsk) ? tsk->pid : 0; | 1112 | entry->pid = (tsk) ? tsk->pid : 0; |
1106 | entry->lock_depth = (tsk) ? tsk->lock_depth : 0; | 1113 | entry->padding = 0; |
1107 | entry->flags = | 1114 | entry->flags = |
1108 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 1115 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
1109 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 1116 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
@@ -1284,6 +1291,8 @@ void trace_dump_stack(void) | |||
1284 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1291 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); |
1285 | } | 1292 | } |
1286 | 1293 | ||
1294 | static DEFINE_PER_CPU(int, user_stack_count); | ||
1295 | |||
1287 | void | 1296 | void |
1288 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1297 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1289 | { | 1298 | { |
@@ -1302,10 +1311,20 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1302 | if (unlikely(in_nmi())) | 1311 | if (unlikely(in_nmi())) |
1303 | return; | 1312 | return; |
1304 | 1313 | ||
1314 | /* | ||
1315 | * prevent recursion, since the user stack tracing may | ||
1316 | * trigger other kernel events. | ||
1317 | */ | ||
1318 | preempt_disable(); | ||
1319 | if (__this_cpu_read(user_stack_count)) | ||
1320 | goto out; | ||
1321 | |||
1322 | __this_cpu_inc(user_stack_count); | ||
1323 | |||
1305 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1324 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1306 | sizeof(*entry), flags, pc); | 1325 | sizeof(*entry), flags, pc); |
1307 | if (!event) | 1326 | if (!event) |
1308 | return; | 1327 | goto out_drop_count; |
1309 | entry = ring_buffer_event_data(event); | 1328 | entry = ring_buffer_event_data(event); |
1310 | 1329 | ||
1311 | entry->tgid = current->tgid; | 1330 | entry->tgid = current->tgid; |
@@ -1319,6 +1338,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1319 | save_stack_trace_user(&trace); | 1338 | save_stack_trace_user(&trace); |
1320 | if (!filter_check_discard(call, entry, buffer, event)) | 1339 | if (!filter_check_discard(call, entry, buffer, event)) |
1321 | ring_buffer_unlock_commit(buffer, event); | 1340 | ring_buffer_unlock_commit(buffer, event); |
1341 | |||
1342 | out_drop_count: | ||
1343 | __this_cpu_dec(user_stack_count); | ||
1344 | out: | ||
1345 | preempt_enable(); | ||
1322 | } | 1346 | } |
1323 | 1347 | ||
1324 | #ifdef UNUSED | 1348 | #ifdef UNUSED |
@@ -1733,10 +1757,9 @@ static void print_lat_help_header(struct seq_file *m) | |||
1733 | seq_puts(m, "# | / _----=> need-resched \n"); | 1757 | seq_puts(m, "# | / _----=> need-resched \n"); |
1734 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | 1758 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); |
1735 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | 1759 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); |
1736 | seq_puts(m, "# |||| /_--=> lock-depth \n"); | 1760 | seq_puts(m, "# |||| / delay \n"); |
1737 | seq_puts(m, "# |||||/ delay \n"); | 1761 | seq_puts(m, "# cmd pid ||||| time | caller \n"); |
1738 | seq_puts(m, "# cmd pid |||||| time | caller \n"); | 1762 | seq_puts(m, "# \\ / ||||| \\ | / \n"); |
1739 | seq_puts(m, "# \\ / |||||| \\ | / \n"); | ||
1740 | } | 1763 | } |
1741 | 1764 | ||
1742 | static void print_func_help_header(struct seq_file *m) | 1765 | static void print_func_help_header(struct seq_file *m) |
@@ -1991,9 +2014,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1991 | { | 2014 | { |
1992 | enum print_line_t ret; | 2015 | enum print_line_t ret; |
1993 | 2016 | ||
1994 | if (iter->lost_events) | 2017 | if (iter->lost_events && |
1995 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | 2018 | !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
1996 | iter->cpu, iter->lost_events); | 2019 | iter->cpu, iter->lost_events)) |
2020 | return TRACE_TYPE_PARTIAL_LINE; | ||
1997 | 2021 | ||
1998 | if (iter->trace && iter->trace->print_line) { | 2022 | if (iter->trace && iter->trace->print_line) { |
1999 | ret = iter->trace->print_line(iter); | 2023 | ret = iter->trace->print_line(iter); |
@@ -2196,7 +2220,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp) | |||
2196 | 2220 | ||
2197 | static int tracing_release(struct inode *inode, struct file *file) | 2221 | static int tracing_release(struct inode *inode, struct file *file) |
2198 | { | 2222 | { |
2199 | struct seq_file *m = (struct seq_file *)file->private_data; | 2223 | struct seq_file *m = file->private_data; |
2200 | struct trace_iterator *iter; | 2224 | struct trace_iterator *iter; |
2201 | int cpu; | 2225 | int cpu; |
2202 | 2226 | ||
@@ -2320,11 +2344,19 @@ tracing_write_stub(struct file *filp, const char __user *ubuf, | |||
2320 | return count; | 2344 | return count; |
2321 | } | 2345 | } |
2322 | 2346 | ||
2347 | static loff_t tracing_seek(struct file *file, loff_t offset, int origin) | ||
2348 | { | ||
2349 | if (file->f_mode & FMODE_READ) | ||
2350 | return seq_lseek(file, offset, origin); | ||
2351 | else | ||
2352 | return 0; | ||
2353 | } | ||
2354 | |||
2323 | static const struct file_operations tracing_fops = { | 2355 | static const struct file_operations tracing_fops = { |
2324 | .open = tracing_open, | 2356 | .open = tracing_open, |
2325 | .read = seq_read, | 2357 | .read = seq_read, |
2326 | .write = tracing_write_stub, | 2358 | .write = tracing_write_stub, |
2327 | .llseek = seq_lseek, | 2359 | .llseek = tracing_seek, |
2328 | .release = tracing_release, | 2360 | .release = tracing_release, |
2329 | }; | 2361 | }; |
2330 | 2362 | ||
@@ -2505,6 +2537,9 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2505 | 2537 | ||
2506 | if (mask == TRACE_ITER_RECORD_CMD) | 2538 | if (mask == TRACE_ITER_RECORD_CMD) |
2507 | trace_event_enable_cmd_record(enabled); | 2539 | trace_event_enable_cmd_record(enabled); |
2540 | |||
2541 | if (mask == TRACE_ITER_OVERWRITE) | ||
2542 | ring_buffer_change_overwrite(global_trace.buffer, enabled); | ||
2508 | } | 2543 | } |
2509 | 2544 | ||
2510 | static ssize_t | 2545 | static ssize_t |
@@ -2686,6 +2721,10 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2686 | 2721 | ||
2687 | mutex_lock(&trace_types_lock); | 2722 | mutex_lock(&trace_types_lock); |
2688 | if (tracer_enabled ^ val) { | 2723 | if (tracer_enabled ^ val) { |
2724 | |||
2725 | /* Only need to warn if this is used to change the state */ | ||
2726 | WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on"); | ||
2727 | |||
2689 | if (val) { | 2728 | if (val) { |
2690 | tracer_enabled = 1; | 2729 | tracer_enabled = 1; |
2691 | if (current_trace->start) | 2730 | if (current_trace->start) |
@@ -3192,6 +3231,14 @@ waitagain: | |||
3192 | 3231 | ||
3193 | if (iter->seq.len >= cnt) | 3232 | if (iter->seq.len >= cnt) |
3194 | break; | 3233 | break; |
3234 | |||
3235 | /* | ||
3236 | * Setting the full flag means we reached the trace_seq buffer | ||
3237 | * size and we should leave by partial output condition above. | ||
3238 | * One of the trace_seq_* functions is not used properly. | ||
3239 | */ | ||
3240 | WARN_ONCE(iter->seq.full, "full flag set for trace type %d", | ||
3241 | iter->ent->type); | ||
3195 | } | 3242 | } |
3196 | trace_access_unlock(iter->cpu_file); | 3243 | trace_access_unlock(iter->cpu_file); |
3197 | trace_event_read_unlock(); | 3244 | trace_event_read_unlock(); |
@@ -3202,7 +3249,7 @@ waitagain: | |||
3202 | trace_seq_init(&iter->seq); | 3249 | trace_seq_init(&iter->seq); |
3203 | 3250 | ||
3204 | /* | 3251 | /* |
3205 | * If there was nothing to send to user, inspite of consuming trace | 3252 | * If there was nothing to send to user, in spite of consuming trace |
3206 | * entries, go back to wait for more entries. | 3253 | * entries, go back to wait for more entries. |
3207 | */ | 3254 | */ |
3208 | if (sret == -EBUSY) | 3255 | if (sret == -EBUSY) |
@@ -3996,13 +4043,9 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
3996 | { | 4043 | { |
3997 | struct dentry *d_percpu = tracing_dentry_percpu(); | 4044 | struct dentry *d_percpu = tracing_dentry_percpu(); |
3998 | struct dentry *d_cpu; | 4045 | struct dentry *d_cpu; |
3999 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | 4046 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
4000 | char cpu_dir[7]; | ||
4001 | 4047 | ||
4002 | if (cpu > 999 || cpu < 0) | 4048 | snprintf(cpu_dir, 30, "cpu%ld", cpu); |
4003 | return; | ||
4004 | |||
4005 | sprintf(cpu_dir, "cpu%ld", cpu); | ||
4006 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | 4049 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); |
4007 | if (!d_cpu) { | 4050 | if (!d_cpu) { |
4008 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | 4051 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); |
@@ -4531,9 +4574,11 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) | |||
4531 | __init static int tracer_alloc_buffers(void) | 4574 | __init static int tracer_alloc_buffers(void) |
4532 | { | 4575 | { |
4533 | int ring_buf_size; | 4576 | int ring_buf_size; |
4577 | enum ring_buffer_flags rb_flags; | ||
4534 | int i; | 4578 | int i; |
4535 | int ret = -ENOMEM; | 4579 | int ret = -ENOMEM; |
4536 | 4580 | ||
4581 | |||
4537 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) | 4582 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) |
4538 | goto out; | 4583 | goto out; |
4539 | 4584 | ||
@@ -4546,12 +4591,13 @@ __init static int tracer_alloc_buffers(void) | |||
4546 | else | 4591 | else |
4547 | ring_buf_size = 1; | 4592 | ring_buf_size = 1; |
4548 | 4593 | ||
4594 | rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; | ||
4595 | |||
4549 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4596 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
4550 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4597 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
4551 | 4598 | ||
4552 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4599 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
4553 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 4600 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); |
4554 | TRACE_BUFFER_FLAGS); | ||
4555 | if (!global_trace.buffer) { | 4601 | if (!global_trace.buffer) { |
4556 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 4602 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
4557 | WARN_ON(1); | 4603 | WARN_ON(1); |
@@ -4561,7 +4607,7 @@ __init static int tracer_alloc_buffers(void) | |||
4561 | 4607 | ||
4562 | 4608 | ||
4563 | #ifdef CONFIG_TRACER_MAX_TRACE | 4609 | #ifdef CONFIG_TRACER_MAX_TRACE |
4564 | max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS); | 4610 | max_tr.buffer = ring_buffer_alloc(1, rb_flags); |
4565 | if (!max_tr.buffer) { | 4611 | if (!max_tr.buffer) { |
4566 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 4612 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
4567 | WARN_ON(1); | 4613 | WARN_ON(1); |