diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 293 |
1 files changed, 222 insertions, 71 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e5125677efa0..1f835a83cb2c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/poll.h> | 39 | #include <linux/poll.h> |
40 | #include <linux/nmi.h> | 40 | #include <linux/nmi.h> |
41 | #include <linux/fs.h> | 41 | #include <linux/fs.h> |
42 | #include <linux/sched/rt.h> | ||
42 | 43 | ||
43 | #include "trace.h" | 44 | #include "trace.h" |
44 | #include "trace_output.h" | 45 | #include "trace_output.h" |
@@ -249,7 +250,7 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | |||
249 | static struct tracer *trace_types __read_mostly; | 250 | static struct tracer *trace_types __read_mostly; |
250 | 251 | ||
251 | /* current_trace points to the tracer that is currently active */ | 252 | /* current_trace points to the tracer that is currently active */ |
252 | static struct tracer *current_trace __read_mostly; | 253 | static struct tracer *current_trace __read_mostly = &nop_trace; |
253 | 254 | ||
254 | /* | 255 | /* |
255 | * trace_types_lock is used to protect the trace_types list. | 256 | * trace_types_lock is used to protect the trace_types list. |
@@ -709,10 +710,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
709 | return; | 710 | return; |
710 | 711 | ||
711 | WARN_ON_ONCE(!irqs_disabled()); | 712 | WARN_ON_ONCE(!irqs_disabled()); |
712 | if (!current_trace->use_max_tr) { | 713 | |
713 | WARN_ON_ONCE(1); | 714 | if (!current_trace->allocated_snapshot) { |
715 | /* Only the nop tracer should hit this when disabling */ | ||
716 | WARN_ON_ONCE(current_trace != &nop_trace); | ||
714 | return; | 717 | return; |
715 | } | 718 | } |
719 | |||
716 | arch_spin_lock(&ftrace_max_lock); | 720 | arch_spin_lock(&ftrace_max_lock); |
717 | 721 | ||
718 | tr->buffer = max_tr.buffer; | 722 | tr->buffer = max_tr.buffer; |
@@ -739,10 +743,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
739 | return; | 743 | return; |
740 | 744 | ||
741 | WARN_ON_ONCE(!irqs_disabled()); | 745 | WARN_ON_ONCE(!irqs_disabled()); |
742 | if (!current_trace->use_max_tr) { | 746 | if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) |
743 | WARN_ON_ONCE(1); | ||
744 | return; | 747 | return; |
745 | } | ||
746 | 748 | ||
747 | arch_spin_lock(&ftrace_max_lock); | 749 | arch_spin_lock(&ftrace_max_lock); |
748 | 750 | ||
@@ -862,10 +864,13 @@ int register_tracer(struct tracer *type) | |||
862 | 864 | ||
863 | current_trace = type; | 865 | current_trace = type; |
864 | 866 | ||
865 | /* If we expanded the buffers, make sure the max is expanded too */ | 867 | if (type->use_max_tr) { |
866 | if (ring_buffer_expanded && type->use_max_tr) | 868 | /* If we expanded the buffers, make sure the max is expanded too */ |
867 | ring_buffer_resize(max_tr.buffer, trace_buf_size, | 869 | if (ring_buffer_expanded) |
868 | RING_BUFFER_ALL_CPUS); | 870 | ring_buffer_resize(max_tr.buffer, trace_buf_size, |
871 | RING_BUFFER_ALL_CPUS); | ||
872 | type->allocated_snapshot = true; | ||
873 | } | ||
869 | 874 | ||
870 | /* the test is responsible for initializing and enabling */ | 875 | /* the test is responsible for initializing and enabling */ |
871 | pr_info("Testing tracer %s: ", type->name); | 876 | pr_info("Testing tracer %s: ", type->name); |
@@ -881,10 +886,14 @@ int register_tracer(struct tracer *type) | |||
881 | /* Only reset on passing, to avoid touching corrupted buffers */ | 886 | /* Only reset on passing, to avoid touching corrupted buffers */ |
882 | tracing_reset_online_cpus(tr); | 887 | tracing_reset_online_cpus(tr); |
883 | 888 | ||
884 | /* Shrink the max buffer again */ | 889 | if (type->use_max_tr) { |
885 | if (ring_buffer_expanded && type->use_max_tr) | 890 | type->allocated_snapshot = false; |
886 | ring_buffer_resize(max_tr.buffer, 1, | 891 | |
887 | RING_BUFFER_ALL_CPUS); | 892 | /* Shrink the max buffer again */ |
893 | if (ring_buffer_expanded) | ||
894 | ring_buffer_resize(max_tr.buffer, 1, | ||
895 | RING_BUFFER_ALL_CPUS); | ||
896 | } | ||
888 | 897 | ||
889 | printk(KERN_CONT "PASSED\n"); | 898 | printk(KERN_CONT "PASSED\n"); |
890 | } | 899 | } |
@@ -922,6 +931,9 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
922 | { | 931 | { |
923 | struct ring_buffer *buffer = tr->buffer; | 932 | struct ring_buffer *buffer = tr->buffer; |
924 | 933 | ||
934 | if (!buffer) | ||
935 | return; | ||
936 | |||
925 | ring_buffer_record_disable(buffer); | 937 | ring_buffer_record_disable(buffer); |
926 | 938 | ||
927 | /* Make sure all commits have finished */ | 939 | /* Make sure all commits have finished */ |
@@ -936,6 +948,9 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
936 | struct ring_buffer *buffer = tr->buffer; | 948 | struct ring_buffer *buffer = tr->buffer; |
937 | int cpu; | 949 | int cpu; |
938 | 950 | ||
951 | if (!buffer) | ||
952 | return; | ||
953 | |||
939 | ring_buffer_record_disable(buffer); | 954 | ring_buffer_record_disable(buffer); |
940 | 955 | ||
941 | /* Make sure all commits have finished */ | 956 | /* Make sure all commits have finished */ |
@@ -1167,7 +1182,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
1167 | 1182 | ||
1168 | entry->preempt_count = pc & 0xff; | 1183 | entry->preempt_count = pc & 0xff; |
1169 | entry->pid = (tsk) ? tsk->pid : 0; | 1184 | entry->pid = (tsk) ? tsk->pid : 0; |
1170 | entry->padding = 0; | ||
1171 | entry->flags = | 1185 | entry->flags = |
1172 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 1186 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
1173 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 1187 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
@@ -1335,7 +1349,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1335 | */ | 1349 | */ |
1336 | preempt_disable_notrace(); | 1350 | preempt_disable_notrace(); |
1337 | 1351 | ||
1338 | use_stack = ++__get_cpu_var(ftrace_stack_reserve); | 1352 | use_stack = __this_cpu_inc_return(ftrace_stack_reserve); |
1339 | /* | 1353 | /* |
1340 | * We don't need any atomic variables, just a barrier. | 1354 | * We don't need any atomic variables, just a barrier. |
1341 | * If an interrupt comes in, we don't care, because it would | 1355 | * If an interrupt comes in, we don't care, because it would |
@@ -1389,7 +1403,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1389 | out: | 1403 | out: |
1390 | /* Again, don't let gcc optimize things here */ | 1404 | /* Again, don't let gcc optimize things here */ |
1391 | barrier(); | 1405 | barrier(); |
1392 | __get_cpu_var(ftrace_stack_reserve)--; | 1406 | __this_cpu_dec(ftrace_stack_reserve); |
1393 | preempt_enable_notrace(); | 1407 | preempt_enable_notrace(); |
1394 | 1408 | ||
1395 | } | 1409 | } |
@@ -1517,7 +1531,6 @@ static struct trace_buffer_struct *trace_percpu_nmi_buffer; | |||
1517 | static char *get_trace_buf(void) | 1531 | static char *get_trace_buf(void) |
1518 | { | 1532 | { |
1519 | struct trace_buffer_struct *percpu_buffer; | 1533 | struct trace_buffer_struct *percpu_buffer; |
1520 | struct trace_buffer_struct *buffer; | ||
1521 | 1534 | ||
1522 | /* | 1535 | /* |
1523 | * If we have allocated per cpu buffers, then we do not | 1536 | * If we have allocated per cpu buffers, then we do not |
@@ -1535,9 +1548,7 @@ static char *get_trace_buf(void) | |||
1535 | if (!percpu_buffer) | 1548 | if (!percpu_buffer) |
1536 | return NULL; | 1549 | return NULL; |
1537 | 1550 | ||
1538 | buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); | 1551 | return this_cpu_ptr(&percpu_buffer->buffer[0]); |
1539 | |||
1540 | return buffer->buffer; | ||
1541 | } | 1552 | } |
1542 | 1553 | ||
1543 | static int alloc_percpu_trace_buffer(void) | 1554 | static int alloc_percpu_trace_buffer(void) |
@@ -1942,21 +1953,27 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
1942 | static void *s_start(struct seq_file *m, loff_t *pos) | 1953 | static void *s_start(struct seq_file *m, loff_t *pos) |
1943 | { | 1954 | { |
1944 | struct trace_iterator *iter = m->private; | 1955 | struct trace_iterator *iter = m->private; |
1945 | static struct tracer *old_tracer; | ||
1946 | int cpu_file = iter->cpu_file; | 1956 | int cpu_file = iter->cpu_file; |
1947 | void *p = NULL; | 1957 | void *p = NULL; |
1948 | loff_t l = 0; | 1958 | loff_t l = 0; |
1949 | int cpu; | 1959 | int cpu; |
1950 | 1960 | ||
1951 | /* copy the tracer to avoid using a global lock all around */ | 1961 | /* |
1962 | * copy the tracer to avoid using a global lock all around. | ||
1963 | * iter->trace is a copy of current_trace, the pointer to the | ||
1964 | * name may be used instead of a strcmp(), as iter->trace->name | ||
1965 | * will point to the same string as current_trace->name. | ||
1966 | */ | ||
1952 | mutex_lock(&trace_types_lock); | 1967 | mutex_lock(&trace_types_lock); |
1953 | if (unlikely(old_tracer != current_trace && current_trace)) { | 1968 | if (unlikely(current_trace && iter->trace->name != current_trace->name)) |
1954 | old_tracer = current_trace; | ||
1955 | *iter->trace = *current_trace; | 1969 | *iter->trace = *current_trace; |
1956 | } | ||
1957 | mutex_unlock(&trace_types_lock); | 1970 | mutex_unlock(&trace_types_lock); |
1958 | 1971 | ||
1959 | atomic_inc(&trace_record_cmdline_disabled); | 1972 | if (iter->snapshot && iter->trace->use_max_tr) |
1973 | return ERR_PTR(-EBUSY); | ||
1974 | |||
1975 | if (!iter->snapshot) | ||
1976 | atomic_inc(&trace_record_cmdline_disabled); | ||
1960 | 1977 | ||
1961 | if (*pos != iter->pos) { | 1978 | if (*pos != iter->pos) { |
1962 | iter->ent = NULL; | 1979 | iter->ent = NULL; |
@@ -1995,7 +2012,11 @@ static void s_stop(struct seq_file *m, void *p) | |||
1995 | { | 2012 | { |
1996 | struct trace_iterator *iter = m->private; | 2013 | struct trace_iterator *iter = m->private; |
1997 | 2014 | ||
1998 | atomic_dec(&trace_record_cmdline_disabled); | 2015 | if (iter->snapshot && iter->trace->use_max_tr) |
2016 | return; | ||
2017 | |||
2018 | if (!iter->snapshot) | ||
2019 | atomic_dec(&trace_record_cmdline_disabled); | ||
1999 | trace_access_unlock(iter->cpu_file); | 2020 | trace_access_unlock(iter->cpu_file); |
2000 | trace_event_read_unlock(); | 2021 | trace_event_read_unlock(); |
2001 | } | 2022 | } |
@@ -2080,8 +2101,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
2080 | unsigned long total; | 2101 | unsigned long total; |
2081 | const char *name = "preemption"; | 2102 | const char *name = "preemption"; |
2082 | 2103 | ||
2083 | if (type) | 2104 | name = type->name; |
2084 | name = type->name; | ||
2085 | 2105 | ||
2086 | get_total_entries(tr, &total, &entries); | 2106 | get_total_entries(tr, &total, &entries); |
2087 | 2107 | ||
@@ -2380,6 +2400,27 @@ static void test_ftrace_alive(struct seq_file *m) | |||
2380 | seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); | 2400 | seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); |
2381 | } | 2401 | } |
2382 | 2402 | ||
2403 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
2404 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) | ||
2405 | { | ||
2406 | if (iter->trace->allocated_snapshot) | ||
2407 | seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); | ||
2408 | else | ||
2409 | seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); | ||
2410 | |||
2411 | seq_printf(m, "# Snapshot commands:\n"); | ||
2412 | seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); | ||
2413 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | ||
2414 | seq_printf(m, "# Takes a snapshot of the main buffer.\n"); | ||
2415 | seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); | ||
2416 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); | ||
2417 | seq_printf(m, "# is not a '0' or '1')\n"); | ||
2418 | } | ||
2419 | #else | ||
2420 | /* Should never be called */ | ||
2421 | static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } | ||
2422 | #endif | ||
2423 | |||
2383 | static int s_show(struct seq_file *m, void *v) | 2424 | static int s_show(struct seq_file *m, void *v) |
2384 | { | 2425 | { |
2385 | struct trace_iterator *iter = v; | 2426 | struct trace_iterator *iter = v; |
@@ -2391,7 +2432,9 @@ static int s_show(struct seq_file *m, void *v) | |||
2391 | seq_puts(m, "#\n"); | 2432 | seq_puts(m, "#\n"); |
2392 | test_ftrace_alive(m); | 2433 | test_ftrace_alive(m); |
2393 | } | 2434 | } |
2394 | if (iter->trace && iter->trace->print_header) | 2435 | if (iter->snapshot && trace_empty(iter)) |
2436 | print_snapshot_help(m, iter); | ||
2437 | else if (iter->trace && iter->trace->print_header) | ||
2395 | iter->trace->print_header(m); | 2438 | iter->trace->print_header(m); |
2396 | else | 2439 | else |
2397 | trace_default_header(m); | 2440 | trace_default_header(m); |
@@ -2430,7 +2473,7 @@ static const struct seq_operations tracer_seq_ops = { | |||
2430 | }; | 2473 | }; |
2431 | 2474 | ||
2432 | static struct trace_iterator * | 2475 | static struct trace_iterator * |
2433 | __tracing_open(struct inode *inode, struct file *file) | 2476 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) |
2434 | { | 2477 | { |
2435 | long cpu_file = (long) inode->i_private; | 2478 | long cpu_file = (long) inode->i_private; |
2436 | struct trace_iterator *iter; | 2479 | struct trace_iterator *iter; |
@@ -2457,16 +2500,16 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2457 | if (!iter->trace) | 2500 | if (!iter->trace) |
2458 | goto fail; | 2501 | goto fail; |
2459 | 2502 | ||
2460 | if (current_trace) | 2503 | *iter->trace = *current_trace; |
2461 | *iter->trace = *current_trace; | ||
2462 | 2504 | ||
2463 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) | 2505 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
2464 | goto fail; | 2506 | goto fail; |
2465 | 2507 | ||
2466 | if (current_trace && current_trace->print_max) | 2508 | if (current_trace->print_max || snapshot) |
2467 | iter->tr = &max_tr; | 2509 | iter->tr = &max_tr; |
2468 | else | 2510 | else |
2469 | iter->tr = &global_trace; | 2511 | iter->tr = &global_trace; |
2512 | iter->snapshot = snapshot; | ||
2470 | iter->pos = -1; | 2513 | iter->pos = -1; |
2471 | mutex_init(&iter->mutex); | 2514 | mutex_init(&iter->mutex); |
2472 | iter->cpu_file = cpu_file; | 2515 | iter->cpu_file = cpu_file; |
@@ -2483,8 +2526,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2483 | if (trace_clocks[trace_clock_id].in_ns) | 2526 | if (trace_clocks[trace_clock_id].in_ns) |
2484 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 2527 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
2485 | 2528 | ||
2486 | /* stop the trace while dumping */ | 2529 | /* stop the trace while dumping if we are not opening "snapshot" */ |
2487 | tracing_stop(); | 2530 | if (!iter->snapshot) |
2531 | tracing_stop(); | ||
2488 | 2532 | ||
2489 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2533 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
2490 | for_each_tracing_cpu(cpu) { | 2534 | for_each_tracing_cpu(cpu) { |
@@ -2547,8 +2591,9 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2547 | if (iter->trace && iter->trace->close) | 2591 | if (iter->trace && iter->trace->close) |
2548 | iter->trace->close(iter); | 2592 | iter->trace->close(iter); |
2549 | 2593 | ||
2550 | /* reenable tracing if it was previously enabled */ | 2594 | if (!iter->snapshot) |
2551 | tracing_start(); | 2595 | /* reenable tracing if it was previously enabled */ |
2596 | tracing_start(); | ||
2552 | mutex_unlock(&trace_types_lock); | 2597 | mutex_unlock(&trace_types_lock); |
2553 | 2598 | ||
2554 | mutex_destroy(&iter->mutex); | 2599 | mutex_destroy(&iter->mutex); |
@@ -2576,7 +2621,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2576 | } | 2621 | } |
2577 | 2622 | ||
2578 | if (file->f_mode & FMODE_READ) { | 2623 | if (file->f_mode & FMODE_READ) { |
2579 | iter = __tracing_open(inode, file); | 2624 | iter = __tracing_open(inode, file, false); |
2580 | if (IS_ERR(iter)) | 2625 | if (IS_ERR(iter)) |
2581 | ret = PTR_ERR(iter); | 2626 | ret = PTR_ERR(iter); |
2582 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 2627 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) |
@@ -2899,6 +2944,8 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2899 | if (copy_from_user(&buf, ubuf, cnt)) | 2944 | if (copy_from_user(&buf, ubuf, cnt)) |
2900 | return -EFAULT; | 2945 | return -EFAULT; |
2901 | 2946 | ||
2947 | buf[cnt] = 0; | ||
2948 | |||
2902 | trace_set_options(buf); | 2949 | trace_set_options(buf); |
2903 | 2950 | ||
2904 | *ppos += cnt; | 2951 | *ppos += cnt; |
@@ -3012,10 +3059,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
3012 | int r; | 3059 | int r; |
3013 | 3060 | ||
3014 | mutex_lock(&trace_types_lock); | 3061 | mutex_lock(&trace_types_lock); |
3015 | if (current_trace) | 3062 | r = sprintf(buf, "%s\n", current_trace->name); |
3016 | r = sprintf(buf, "%s\n", current_trace->name); | ||
3017 | else | ||
3018 | r = sprintf(buf, "\n"); | ||
3019 | mutex_unlock(&trace_types_lock); | 3063 | mutex_unlock(&trace_types_lock); |
3020 | 3064 | ||
3021 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3065 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
@@ -3181,6 +3225,7 @@ static int tracing_set_tracer(const char *buf) | |||
3181 | static struct trace_option_dentry *topts; | 3225 | static struct trace_option_dentry *topts; |
3182 | struct trace_array *tr = &global_trace; | 3226 | struct trace_array *tr = &global_trace; |
3183 | struct tracer *t; | 3227 | struct tracer *t; |
3228 | bool had_max_tr; | ||
3184 | int ret = 0; | 3229 | int ret = 0; |
3185 | 3230 | ||
3186 | mutex_lock(&trace_types_lock); | 3231 | mutex_lock(&trace_types_lock); |
@@ -3205,9 +3250,21 @@ static int tracing_set_tracer(const char *buf) | |||
3205 | goto out; | 3250 | goto out; |
3206 | 3251 | ||
3207 | trace_branch_disable(); | 3252 | trace_branch_disable(); |
3208 | if (current_trace && current_trace->reset) | 3253 | if (current_trace->reset) |
3209 | current_trace->reset(tr); | 3254 | current_trace->reset(tr); |
3210 | if (current_trace && current_trace->use_max_tr) { | 3255 | |
3256 | had_max_tr = current_trace->allocated_snapshot; | ||
3257 | current_trace = &nop_trace; | ||
3258 | |||
3259 | if (had_max_tr && !t->use_max_tr) { | ||
3260 | /* | ||
3261 | * We need to make sure that the update_max_tr sees that | ||
3262 | * current_trace changed to nop_trace to keep it from | ||
3263 | * swapping the buffers after we resize it. | ||
3264 | * The update_max_tr is called from interrupts disabled | ||
3265 | * so a synchronized_sched() is sufficient. | ||
3266 | */ | ||
3267 | synchronize_sched(); | ||
3211 | /* | 3268 | /* |
3212 | * We don't free the ring buffer. instead, resize it because | 3269 | * We don't free the ring buffer. instead, resize it because |
3213 | * The max_tr ring buffer has some state (e.g. ring->clock) and | 3270 | * The max_tr ring buffer has some state (e.g. ring->clock) and |
@@ -3215,18 +3272,19 @@ static int tracing_set_tracer(const char *buf) | |||
3215 | */ | 3272 | */ |
3216 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); | 3273 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); |
3217 | set_buffer_entries(&max_tr, 1); | 3274 | set_buffer_entries(&max_tr, 1); |
3275 | tracing_reset_online_cpus(&max_tr); | ||
3276 | current_trace->allocated_snapshot = false; | ||
3218 | } | 3277 | } |
3219 | destroy_trace_option_files(topts); | 3278 | destroy_trace_option_files(topts); |
3220 | 3279 | ||
3221 | current_trace = &nop_trace; | ||
3222 | |||
3223 | topts = create_trace_option_files(t); | 3280 | topts = create_trace_option_files(t); |
3224 | if (t->use_max_tr) { | 3281 | if (t->use_max_tr && !had_max_tr) { |
3225 | /* we need to make per cpu buffer sizes equivalent */ | 3282 | /* we need to make per cpu buffer sizes equivalent */ |
3226 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, | 3283 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, |
3227 | RING_BUFFER_ALL_CPUS); | 3284 | RING_BUFFER_ALL_CPUS); |
3228 | if (ret < 0) | 3285 | if (ret < 0) |
3229 | goto out; | 3286 | goto out; |
3287 | t->allocated_snapshot = true; | ||
3230 | } | 3288 | } |
3231 | 3289 | ||
3232 | if (t->init) { | 3290 | if (t->init) { |
@@ -3334,8 +3392,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3334 | ret = -ENOMEM; | 3392 | ret = -ENOMEM; |
3335 | goto fail; | 3393 | goto fail; |
3336 | } | 3394 | } |
3337 | if (current_trace) | 3395 | *iter->trace = *current_trace; |
3338 | *iter->trace = *current_trace; | ||
3339 | 3396 | ||
3340 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 3397 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { |
3341 | ret = -ENOMEM; | 3398 | ret = -ENOMEM; |
@@ -3452,7 +3509,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
3452 | return -EINTR; | 3509 | return -EINTR; |
3453 | 3510 | ||
3454 | /* | 3511 | /* |
3455 | * We block until we read something and tracing is enabled. | 3512 | * We block until we read something and tracing is disabled. |
3456 | * We still block if tracing is disabled, but we have never | 3513 | * We still block if tracing is disabled, but we have never |
3457 | * read anything. This allows a user to cat this file, and | 3514 | * read anything. This allows a user to cat this file, and |
3458 | * then enable tracing. But after we have read something, | 3515 | * then enable tracing. But after we have read something, |
@@ -3460,7 +3517,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
3460 | * | 3517 | * |
3461 | * iter->pos will be 0 if we haven't read anything. | 3518 | * iter->pos will be 0 if we haven't read anything. |
3462 | */ | 3519 | */ |
3463 | if (tracing_is_enabled() && iter->pos) | 3520 | if (!tracing_is_enabled() && iter->pos) |
3464 | break; | 3521 | break; |
3465 | } | 3522 | } |
3466 | 3523 | ||
@@ -3475,7 +3532,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
3475 | size_t cnt, loff_t *ppos) | 3532 | size_t cnt, loff_t *ppos) |
3476 | { | 3533 | { |
3477 | struct trace_iterator *iter = filp->private_data; | 3534 | struct trace_iterator *iter = filp->private_data; |
3478 | static struct tracer *old_tracer; | ||
3479 | ssize_t sret; | 3535 | ssize_t sret; |
3480 | 3536 | ||
3481 | /* return any leftover data */ | 3537 | /* return any leftover data */ |
@@ -3487,10 +3543,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
3487 | 3543 | ||
3488 | /* copy the tracer to avoid using a global lock all around */ | 3544 | /* copy the tracer to avoid using a global lock all around */ |
3489 | mutex_lock(&trace_types_lock); | 3545 | mutex_lock(&trace_types_lock); |
3490 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3546 | if (unlikely(iter->trace->name != current_trace->name)) |
3491 | old_tracer = current_trace; | ||
3492 | *iter->trace = *current_trace; | 3547 | *iter->trace = *current_trace; |
3493 | } | ||
3494 | mutex_unlock(&trace_types_lock); | 3548 | mutex_unlock(&trace_types_lock); |
3495 | 3549 | ||
3496 | /* | 3550 | /* |
@@ -3646,7 +3700,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3646 | .ops = &tracing_pipe_buf_ops, | 3700 | .ops = &tracing_pipe_buf_ops, |
3647 | .spd_release = tracing_spd_release_pipe, | 3701 | .spd_release = tracing_spd_release_pipe, |
3648 | }; | 3702 | }; |
3649 | static struct tracer *old_tracer; | ||
3650 | ssize_t ret; | 3703 | ssize_t ret; |
3651 | size_t rem; | 3704 | size_t rem; |
3652 | unsigned int i; | 3705 | unsigned int i; |
@@ -3656,10 +3709,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3656 | 3709 | ||
3657 | /* copy the tracer to avoid using a global lock all around */ | 3710 | /* copy the tracer to avoid using a global lock all around */ |
3658 | mutex_lock(&trace_types_lock); | 3711 | mutex_lock(&trace_types_lock); |
3659 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3712 | if (unlikely(iter->trace->name != current_trace->name)) |
3660 | old_tracer = current_trace; | ||
3661 | *iter->trace = *current_trace; | 3713 | *iter->trace = *current_trace; |
3662 | } | ||
3663 | mutex_unlock(&trace_types_lock); | 3714 | mutex_unlock(&trace_types_lock); |
3664 | 3715 | ||
3665 | mutex_lock(&iter->mutex); | 3716 | mutex_lock(&iter->mutex); |
@@ -4035,8 +4086,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4035 | * Reset the buffer so that it doesn't have incomparable timestamps. | 4086 | * Reset the buffer so that it doesn't have incomparable timestamps. |
4036 | */ | 4087 | */ |
4037 | tracing_reset_online_cpus(&global_trace); | 4088 | tracing_reset_online_cpus(&global_trace); |
4038 | if (max_tr.buffer) | 4089 | tracing_reset_online_cpus(&max_tr); |
4039 | tracing_reset_online_cpus(&max_tr); | ||
4040 | 4090 | ||
4041 | mutex_unlock(&trace_types_lock); | 4091 | mutex_unlock(&trace_types_lock); |
4042 | 4092 | ||
@@ -4052,6 +4102,85 @@ static int tracing_clock_open(struct inode *inode, struct file *file) | |||
4052 | return single_open(file, tracing_clock_show, NULL); | 4102 | return single_open(file, tracing_clock_show, NULL); |
4053 | } | 4103 | } |
4054 | 4104 | ||
4105 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
4106 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | ||
4107 | { | ||
4108 | struct trace_iterator *iter; | ||
4109 | int ret = 0; | ||
4110 | |||
4111 | if (file->f_mode & FMODE_READ) { | ||
4112 | iter = __tracing_open(inode, file, true); | ||
4113 | if (IS_ERR(iter)) | ||
4114 | ret = PTR_ERR(iter); | ||
4115 | } | ||
4116 | return ret; | ||
4117 | } | ||
4118 | |||
4119 | static ssize_t | ||
4120 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
4121 | loff_t *ppos) | ||
4122 | { | ||
4123 | unsigned long val; | ||
4124 | int ret; | ||
4125 | |||
4126 | ret = tracing_update_buffers(); | ||
4127 | if (ret < 0) | ||
4128 | return ret; | ||
4129 | |||
4130 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
4131 | if (ret) | ||
4132 | return ret; | ||
4133 | |||
4134 | mutex_lock(&trace_types_lock); | ||
4135 | |||
4136 | if (current_trace->use_max_tr) { | ||
4137 | ret = -EBUSY; | ||
4138 | goto out; | ||
4139 | } | ||
4140 | |||
4141 | switch (val) { | ||
4142 | case 0: | ||
4143 | if (current_trace->allocated_snapshot) { | ||
4144 | /* free spare buffer */ | ||
4145 | ring_buffer_resize(max_tr.buffer, 1, | ||
4146 | RING_BUFFER_ALL_CPUS); | ||
4147 | set_buffer_entries(&max_tr, 1); | ||
4148 | tracing_reset_online_cpus(&max_tr); | ||
4149 | current_trace->allocated_snapshot = false; | ||
4150 | } | ||
4151 | break; | ||
4152 | case 1: | ||
4153 | if (!current_trace->allocated_snapshot) { | ||
4154 | /* allocate spare buffer */ | ||
4155 | ret = resize_buffer_duplicate_size(&max_tr, | ||
4156 | &global_trace, RING_BUFFER_ALL_CPUS); | ||
4157 | if (ret < 0) | ||
4158 | break; | ||
4159 | current_trace->allocated_snapshot = true; | ||
4160 | } | ||
4161 | |||
4162 | local_irq_disable(); | ||
4163 | /* Now, we're going to swap */ | ||
4164 | update_max_tr(&global_trace, current, smp_processor_id()); | ||
4165 | local_irq_enable(); | ||
4166 | break; | ||
4167 | default: | ||
4168 | if (current_trace->allocated_snapshot) | ||
4169 | tracing_reset_online_cpus(&max_tr); | ||
4170 | break; | ||
4171 | } | ||
4172 | |||
4173 | if (ret >= 0) { | ||
4174 | *ppos += cnt; | ||
4175 | ret = cnt; | ||
4176 | } | ||
4177 | out: | ||
4178 | mutex_unlock(&trace_types_lock); | ||
4179 | return ret; | ||
4180 | } | ||
4181 | #endif /* CONFIG_TRACER_SNAPSHOT */ | ||
4182 | |||
4183 | |||
4055 | static const struct file_operations tracing_max_lat_fops = { | 4184 | static const struct file_operations tracing_max_lat_fops = { |
4056 | .open = tracing_open_generic, | 4185 | .open = tracing_open_generic, |
4057 | .read = tracing_max_lat_read, | 4186 | .read = tracing_max_lat_read, |
@@ -4108,6 +4237,16 @@ static const struct file_operations trace_clock_fops = { | |||
4108 | .write = tracing_clock_write, | 4237 | .write = tracing_clock_write, |
4109 | }; | 4238 | }; |
4110 | 4239 | ||
4240 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
4241 | static const struct file_operations snapshot_fops = { | ||
4242 | .open = tracing_snapshot_open, | ||
4243 | .read = seq_read, | ||
4244 | .write = tracing_snapshot_write, | ||
4245 | .llseek = tracing_seek, | ||
4246 | .release = tracing_release, | ||
4247 | }; | ||
4248 | #endif /* CONFIG_TRACER_SNAPSHOT */ | ||
4249 | |||
4111 | struct ftrace_buffer_info { | 4250 | struct ftrace_buffer_info { |
4112 | struct trace_array *tr; | 4251 | struct trace_array *tr; |
4113 | void *spare; | 4252 | void *spare; |
@@ -4412,6 +4551,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
4412 | cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); | 4551 | cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); |
4413 | trace_seq_printf(s, "dropped events: %ld\n", cnt); | 4552 | trace_seq_printf(s, "dropped events: %ld\n", cnt); |
4414 | 4553 | ||
4554 | cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); | ||
4555 | trace_seq_printf(s, "read events: %ld\n", cnt); | ||
4556 | |||
4415 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 4557 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); |
4416 | 4558 | ||
4417 | kfree(s); | 4559 | kfree(s); |
@@ -4488,7 +4630,7 @@ struct dentry *tracing_init_dentry(void) | |||
4488 | 4630 | ||
4489 | static struct dentry *d_percpu; | 4631 | static struct dentry *d_percpu; |
4490 | 4632 | ||
4491 | struct dentry *tracing_dentry_percpu(void) | 4633 | static struct dentry *tracing_dentry_percpu(void) |
4492 | { | 4634 | { |
4493 | static int once; | 4635 | static int once; |
4494 | struct dentry *d_tracer; | 4636 | struct dentry *d_tracer; |
@@ -4815,10 +4957,17 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
4815 | return ret; | 4957 | return ret; |
4816 | 4958 | ||
4817 | if (buffer) { | 4959 | if (buffer) { |
4818 | if (val) | 4960 | mutex_lock(&trace_types_lock); |
4961 | if (val) { | ||
4819 | ring_buffer_record_on(buffer); | 4962 | ring_buffer_record_on(buffer); |
4820 | else | 4963 | if (current_trace->start) |
4964 | current_trace->start(tr); | ||
4965 | } else { | ||
4821 | ring_buffer_record_off(buffer); | 4966 | ring_buffer_record_off(buffer); |
4967 | if (current_trace->stop) | ||
4968 | current_trace->stop(tr); | ||
4969 | } | ||
4970 | mutex_unlock(&trace_types_lock); | ||
4822 | } | 4971 | } |
4823 | 4972 | ||
4824 | (*ppos)++; | 4973 | (*ppos)++; |
@@ -4897,6 +5046,11 @@ static __init int tracer_init_debugfs(void) | |||
4897 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 5046 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
4898 | #endif | 5047 | #endif |
4899 | 5048 | ||
5049 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
5050 | trace_create_file("snapshot", 0644, d_tracer, | ||
5051 | (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); | ||
5052 | #endif | ||
5053 | |||
4900 | create_trace_options_dir(); | 5054 | create_trace_options_dir(); |
4901 | 5055 | ||
4902 | for_each_tracing_cpu(cpu) | 5056 | for_each_tracing_cpu(cpu) |
@@ -5005,6 +5159,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
5005 | if (disable_tracing) | 5159 | if (disable_tracing) |
5006 | ftrace_kill(); | 5160 | ftrace_kill(); |
5007 | 5161 | ||
5162 | /* Simulate the iterator */ | ||
5008 | trace_init_global_iter(&iter); | 5163 | trace_init_global_iter(&iter); |
5009 | 5164 | ||
5010 | for_each_tracing_cpu(cpu) { | 5165 | for_each_tracing_cpu(cpu) { |
@@ -5016,10 +5171,6 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
5016 | /* don't look at user memory in panic mode */ | 5171 | /* don't look at user memory in panic mode */ |
5017 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 5172 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
5018 | 5173 | ||
5019 | /* Simulate the iterator */ | ||
5020 | iter.tr = &global_trace; | ||
5021 | iter.trace = current_trace; | ||
5022 | |||
5023 | switch (oops_dump_mode) { | 5174 | switch (oops_dump_mode) { |
5024 | case DUMP_ALL: | 5175 | case DUMP_ALL: |
5025 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 5176 | iter.cpu_file = TRACE_PIPE_ALL_CPU; |
@@ -5164,7 +5315,7 @@ __init static int tracer_alloc_buffers(void) | |||
5164 | init_irq_work(&trace_work_wakeup, trace_wake_up); | 5315 | init_irq_work(&trace_work_wakeup, trace_wake_up); |
5165 | 5316 | ||
5166 | register_tracer(&nop_trace); | 5317 | register_tracer(&nop_trace); |
5167 | current_trace = &nop_trace; | 5318 | |
5168 | /* All seems OK, enable tracing */ | 5319 | /* All seems OK, enable tracing */ |
5169 | tracing_disabled = 0; | 5320 | tracing_disabled = 0; |
5170 | 5321 | ||