diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 252 |
1 files changed, 186 insertions, 66 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3c13e46d7d24..5d520b7bb4c5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -249,7 +249,7 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | |||
249 | static struct tracer *trace_types __read_mostly; | 249 | static struct tracer *trace_types __read_mostly; |
250 | 250 | ||
251 | /* current_trace points to the tracer that is currently active */ | 251 | /* current_trace points to the tracer that is currently active */ |
252 | static struct tracer *current_trace __read_mostly; | 252 | static struct tracer *current_trace __read_mostly = &nop_trace; |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * trace_types_lock is used to protect the trace_types list. | 255 | * trace_types_lock is used to protect the trace_types list. |
@@ -709,10 +709,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
709 | return; | 709 | return; |
710 | 710 | ||
711 | WARN_ON_ONCE(!irqs_disabled()); | 711 | WARN_ON_ONCE(!irqs_disabled()); |
712 | if (!current_trace->use_max_tr) { | 712 | |
713 | WARN_ON_ONCE(1); | 713 | if (!current_trace->allocated_snapshot) { |
714 | /* Only the nop tracer should hit this when disabling */ | ||
715 | WARN_ON_ONCE(current_trace != &nop_trace); | ||
714 | return; | 716 | return; |
715 | } | 717 | } |
718 | |||
716 | arch_spin_lock(&ftrace_max_lock); | 719 | arch_spin_lock(&ftrace_max_lock); |
717 | 720 | ||
718 | tr->buffer = max_tr.buffer; | 721 | tr->buffer = max_tr.buffer; |
@@ -739,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
739 | return; | 742 | return; |
740 | 743 | ||
741 | WARN_ON_ONCE(!irqs_disabled()); | 744 | WARN_ON_ONCE(!irqs_disabled()); |
742 | if (!current_trace->use_max_tr) { | 745 | if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) |
743 | WARN_ON_ONCE(1); | ||
744 | return; | 746 | return; |
745 | } | ||
746 | 747 | ||
747 | arch_spin_lock(&ftrace_max_lock); | 748 | arch_spin_lock(&ftrace_max_lock); |
748 | 749 | ||
@@ -862,10 +863,13 @@ int register_tracer(struct tracer *type) | |||
862 | 863 | ||
863 | current_trace = type; | 864 | current_trace = type; |
864 | 865 | ||
865 | /* If we expanded the buffers, make sure the max is expanded too */ | 866 | if (type->use_max_tr) { |
866 | if (ring_buffer_expanded && type->use_max_tr) | 867 | /* If we expanded the buffers, make sure the max is expanded too */ |
867 | ring_buffer_resize(max_tr.buffer, trace_buf_size, | 868 | if (ring_buffer_expanded) |
868 | RING_BUFFER_ALL_CPUS); | 869 | ring_buffer_resize(max_tr.buffer, trace_buf_size, |
870 | RING_BUFFER_ALL_CPUS); | ||
871 | type->allocated_snapshot = true; | ||
872 | } | ||
869 | 873 | ||
870 | /* the test is responsible for initializing and enabling */ | 874 | /* the test is responsible for initializing and enabling */ |
871 | pr_info("Testing tracer %s: ", type->name); | 875 | pr_info("Testing tracer %s: ", type->name); |
@@ -881,10 +885,14 @@ int register_tracer(struct tracer *type) | |||
881 | /* Only reset on passing, to avoid touching corrupted buffers */ | 885 | /* Only reset on passing, to avoid touching corrupted buffers */ |
882 | tracing_reset_online_cpus(tr); | 886 | tracing_reset_online_cpus(tr); |
883 | 887 | ||
884 | /* Shrink the max buffer again */ | 888 | if (type->use_max_tr) { |
885 | if (ring_buffer_expanded && type->use_max_tr) | 889 | type->allocated_snapshot = false; |
886 | ring_buffer_resize(max_tr.buffer, 1, | 890 | |
887 | RING_BUFFER_ALL_CPUS); | 891 | /* Shrink the max buffer again */ |
892 | if (ring_buffer_expanded) | ||
893 | ring_buffer_resize(max_tr.buffer, 1, | ||
894 | RING_BUFFER_ALL_CPUS); | ||
895 | } | ||
888 | 896 | ||
889 | printk(KERN_CONT "PASSED\n"); | 897 | printk(KERN_CONT "PASSED\n"); |
890 | } | 898 | } |
@@ -922,6 +930,9 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
922 | { | 930 | { |
923 | struct ring_buffer *buffer = tr->buffer; | 931 | struct ring_buffer *buffer = tr->buffer; |
924 | 932 | ||
933 | if (!buffer) | ||
934 | return; | ||
935 | |||
925 | ring_buffer_record_disable(buffer); | 936 | ring_buffer_record_disable(buffer); |
926 | 937 | ||
927 | /* Make sure all commits have finished */ | 938 | /* Make sure all commits have finished */ |
@@ -936,6 +947,9 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
936 | struct ring_buffer *buffer = tr->buffer; | 947 | struct ring_buffer *buffer = tr->buffer; |
937 | int cpu; | 948 | int cpu; |
938 | 949 | ||
950 | if (!buffer) | ||
951 | return; | ||
952 | |||
939 | ring_buffer_record_disable(buffer); | 953 | ring_buffer_record_disable(buffer); |
940 | 954 | ||
941 | /* Make sure all commits have finished */ | 955 | /* Make sure all commits have finished */ |
@@ -1167,7 +1181,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
1167 | 1181 | ||
1168 | entry->preempt_count = pc & 0xff; | 1182 | entry->preempt_count = pc & 0xff; |
1169 | entry->pid = (tsk) ? tsk->pid : 0; | 1183 | entry->pid = (tsk) ? tsk->pid : 0; |
1170 | entry->padding = 0; | ||
1171 | entry->flags = | 1184 | entry->flags = |
1172 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 1185 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
1173 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 1186 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
@@ -1335,7 +1348,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1335 | */ | 1348 | */ |
1336 | preempt_disable_notrace(); | 1349 | preempt_disable_notrace(); |
1337 | 1350 | ||
1338 | use_stack = ++__get_cpu_var(ftrace_stack_reserve); | 1351 | use_stack = __this_cpu_inc_return(ftrace_stack_reserve); |
1339 | /* | 1352 | /* |
1340 | * We don't need any atomic variables, just a barrier. | 1353 | * We don't need any atomic variables, just a barrier. |
1341 | * If an interrupt comes in, we don't care, because it would | 1354 | * If an interrupt comes in, we don't care, because it would |
@@ -1389,7 +1402,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1389 | out: | 1402 | out: |
1390 | /* Again, don't let gcc optimize things here */ | 1403 | /* Again, don't let gcc optimize things here */ |
1391 | barrier(); | 1404 | barrier(); |
1392 | __get_cpu_var(ftrace_stack_reserve)--; | 1405 | __this_cpu_dec(ftrace_stack_reserve); |
1393 | preempt_enable_notrace(); | 1406 | preempt_enable_notrace(); |
1394 | 1407 | ||
1395 | } | 1408 | } |
@@ -1517,7 +1530,6 @@ static struct trace_buffer_struct *trace_percpu_nmi_buffer; | |||
1517 | static char *get_trace_buf(void) | 1530 | static char *get_trace_buf(void) |
1518 | { | 1531 | { |
1519 | struct trace_buffer_struct *percpu_buffer; | 1532 | struct trace_buffer_struct *percpu_buffer; |
1520 | struct trace_buffer_struct *buffer; | ||
1521 | 1533 | ||
1522 | /* | 1534 | /* |
1523 | * If we have allocated per cpu buffers, then we do not | 1535 | * If we have allocated per cpu buffers, then we do not |
@@ -1535,9 +1547,7 @@ static char *get_trace_buf(void) | |||
1535 | if (!percpu_buffer) | 1547 | if (!percpu_buffer) |
1536 | return NULL; | 1548 | return NULL; |
1537 | 1549 | ||
1538 | buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); | 1550 | return this_cpu_ptr(&percpu_buffer->buffer[0]); |
1539 | |||
1540 | return buffer->buffer; | ||
1541 | } | 1551 | } |
1542 | 1552 | ||
1543 | static int alloc_percpu_trace_buffer(void) | 1553 | static int alloc_percpu_trace_buffer(void) |
@@ -1942,21 +1952,27 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
1942 | static void *s_start(struct seq_file *m, loff_t *pos) | 1952 | static void *s_start(struct seq_file *m, loff_t *pos) |
1943 | { | 1953 | { |
1944 | struct trace_iterator *iter = m->private; | 1954 | struct trace_iterator *iter = m->private; |
1945 | static struct tracer *old_tracer; | ||
1946 | int cpu_file = iter->cpu_file; | 1955 | int cpu_file = iter->cpu_file; |
1947 | void *p = NULL; | 1956 | void *p = NULL; |
1948 | loff_t l = 0; | 1957 | loff_t l = 0; |
1949 | int cpu; | 1958 | int cpu; |
1950 | 1959 | ||
1951 | /* copy the tracer to avoid using a global lock all around */ | 1960 | /* |
1961 | * copy the tracer to avoid using a global lock all around. | ||
1962 | * iter->trace is a copy of current_trace, the pointer to the | ||
1963 | * name may be used instead of a strcmp(), as iter->trace->name | ||
1964 | * will point to the same string as current_trace->name. | ||
1965 | */ | ||
1952 | mutex_lock(&trace_types_lock); | 1966 | mutex_lock(&trace_types_lock); |
1953 | if (unlikely(old_tracer != current_trace && current_trace)) { | 1967 | if (unlikely(current_trace && iter->trace->name != current_trace->name)) |
1954 | old_tracer = current_trace; | ||
1955 | *iter->trace = *current_trace; | 1968 | *iter->trace = *current_trace; |
1956 | } | ||
1957 | mutex_unlock(&trace_types_lock); | 1969 | mutex_unlock(&trace_types_lock); |
1958 | 1970 | ||
1959 | atomic_inc(&trace_record_cmdline_disabled); | 1971 | if (iter->snapshot && iter->trace->use_max_tr) |
1972 | return ERR_PTR(-EBUSY); | ||
1973 | |||
1974 | if (!iter->snapshot) | ||
1975 | atomic_inc(&trace_record_cmdline_disabled); | ||
1960 | 1976 | ||
1961 | if (*pos != iter->pos) { | 1977 | if (*pos != iter->pos) { |
1962 | iter->ent = NULL; | 1978 | iter->ent = NULL; |
@@ -1995,7 +2011,11 @@ static void s_stop(struct seq_file *m, void *p) | |||
1995 | { | 2011 | { |
1996 | struct trace_iterator *iter = m->private; | 2012 | struct trace_iterator *iter = m->private; |
1997 | 2013 | ||
1998 | atomic_dec(&trace_record_cmdline_disabled); | 2014 | if (iter->snapshot && iter->trace->use_max_tr) |
2015 | return; | ||
2016 | |||
2017 | if (!iter->snapshot) | ||
2018 | atomic_dec(&trace_record_cmdline_disabled); | ||
1999 | trace_access_unlock(iter->cpu_file); | 2019 | trace_access_unlock(iter->cpu_file); |
2000 | trace_event_read_unlock(); | 2020 | trace_event_read_unlock(); |
2001 | } | 2021 | } |
@@ -2080,8 +2100,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
2080 | unsigned long total; | 2100 | unsigned long total; |
2081 | const char *name = "preemption"; | 2101 | const char *name = "preemption"; |
2082 | 2102 | ||
2083 | if (type) | 2103 | name = type->name; |
2084 | name = type->name; | ||
2085 | 2104 | ||
2086 | get_total_entries(tr, &total, &entries); | 2105 | get_total_entries(tr, &total, &entries); |
2087 | 2106 | ||
@@ -2430,7 +2449,7 @@ static const struct seq_operations tracer_seq_ops = { | |||
2430 | }; | 2449 | }; |
2431 | 2450 | ||
2432 | static struct trace_iterator * | 2451 | static struct trace_iterator * |
2433 | __tracing_open(struct inode *inode, struct file *file) | 2452 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) |
2434 | { | 2453 | { |
2435 | long cpu_file = (long) inode->i_private; | 2454 | long cpu_file = (long) inode->i_private; |
2436 | struct trace_iterator *iter; | 2455 | struct trace_iterator *iter; |
@@ -2457,16 +2476,16 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2457 | if (!iter->trace) | 2476 | if (!iter->trace) |
2458 | goto fail; | 2477 | goto fail; |
2459 | 2478 | ||
2460 | if (current_trace) | 2479 | *iter->trace = *current_trace; |
2461 | *iter->trace = *current_trace; | ||
2462 | 2480 | ||
2463 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) | 2481 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
2464 | goto fail; | 2482 | goto fail; |
2465 | 2483 | ||
2466 | if (current_trace && current_trace->print_max) | 2484 | if (current_trace->print_max || snapshot) |
2467 | iter->tr = &max_tr; | 2485 | iter->tr = &max_tr; |
2468 | else | 2486 | else |
2469 | iter->tr = &global_trace; | 2487 | iter->tr = &global_trace; |
2488 | iter->snapshot = snapshot; | ||
2470 | iter->pos = -1; | 2489 | iter->pos = -1; |
2471 | mutex_init(&iter->mutex); | 2490 | mutex_init(&iter->mutex); |
2472 | iter->cpu_file = cpu_file; | 2491 | iter->cpu_file = cpu_file; |
@@ -2483,8 +2502,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2483 | if (trace_clocks[trace_clock_id].in_ns) | 2502 | if (trace_clocks[trace_clock_id].in_ns) |
2484 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 2503 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
2485 | 2504 | ||
2486 | /* stop the trace while dumping */ | 2505 | /* stop the trace while dumping if we are not opening "snapshot" */ |
2487 | tracing_stop(); | 2506 | if (!iter->snapshot) |
2507 | tracing_stop(); | ||
2488 | 2508 | ||
2489 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2509 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
2490 | for_each_tracing_cpu(cpu) { | 2510 | for_each_tracing_cpu(cpu) { |
@@ -2547,8 +2567,9 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2547 | if (iter->trace && iter->trace->close) | 2567 | if (iter->trace && iter->trace->close) |
2548 | iter->trace->close(iter); | 2568 | iter->trace->close(iter); |
2549 | 2569 | ||
2550 | /* reenable tracing if it was previously enabled */ | 2570 | if (!iter->snapshot) |
2551 | tracing_start(); | 2571 | /* reenable tracing if it was previously enabled */ |
2572 | tracing_start(); | ||
2552 | mutex_unlock(&trace_types_lock); | 2573 | mutex_unlock(&trace_types_lock); |
2553 | 2574 | ||
2554 | mutex_destroy(&iter->mutex); | 2575 | mutex_destroy(&iter->mutex); |
@@ -2576,7 +2597,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2576 | } | 2597 | } |
2577 | 2598 | ||
2578 | if (file->f_mode & FMODE_READ) { | 2599 | if (file->f_mode & FMODE_READ) { |
2579 | iter = __tracing_open(inode, file); | 2600 | iter = __tracing_open(inode, file, false); |
2580 | if (IS_ERR(iter)) | 2601 | if (IS_ERR(iter)) |
2581 | ret = PTR_ERR(iter); | 2602 | ret = PTR_ERR(iter); |
2582 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 2603 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) |
@@ -3014,10 +3035,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
3014 | int r; | 3035 | int r; |
3015 | 3036 | ||
3016 | mutex_lock(&trace_types_lock); | 3037 | mutex_lock(&trace_types_lock); |
3017 | if (current_trace) | 3038 | r = sprintf(buf, "%s\n", current_trace->name); |
3018 | r = sprintf(buf, "%s\n", current_trace->name); | ||
3019 | else | ||
3020 | r = sprintf(buf, "\n"); | ||
3021 | mutex_unlock(&trace_types_lock); | 3039 | mutex_unlock(&trace_types_lock); |
3022 | 3040 | ||
3023 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3041 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
@@ -3183,6 +3201,7 @@ static int tracing_set_tracer(const char *buf) | |||
3183 | static struct trace_option_dentry *topts; | 3201 | static struct trace_option_dentry *topts; |
3184 | struct trace_array *tr = &global_trace; | 3202 | struct trace_array *tr = &global_trace; |
3185 | struct tracer *t; | 3203 | struct tracer *t; |
3204 | bool had_max_tr; | ||
3186 | int ret = 0; | 3205 | int ret = 0; |
3187 | 3206 | ||
3188 | mutex_lock(&trace_types_lock); | 3207 | mutex_lock(&trace_types_lock); |
@@ -3207,9 +3226,21 @@ static int tracing_set_tracer(const char *buf) | |||
3207 | goto out; | 3226 | goto out; |
3208 | 3227 | ||
3209 | trace_branch_disable(); | 3228 | trace_branch_disable(); |
3210 | if (current_trace && current_trace->reset) | 3229 | if (current_trace->reset) |
3211 | current_trace->reset(tr); | 3230 | current_trace->reset(tr); |
3212 | if (current_trace && current_trace->use_max_tr) { | 3231 | |
3232 | had_max_tr = current_trace->allocated_snapshot; | ||
3233 | current_trace = &nop_trace; | ||
3234 | |||
3235 | if (had_max_tr && !t->use_max_tr) { | ||
3236 | /* | ||
3237 | * We need to make sure that the update_max_tr sees that | ||
3238 | * current_trace changed to nop_trace to keep it from | ||
3239 | * swapping the buffers after we resize it. | ||
3240 | * The update_max_tr is called from interrupts disabled | ||
3241 | * so a synchronized_sched() is sufficient. | ||
3242 | */ | ||
3243 | synchronize_sched(); | ||
3213 | /* | 3244 | /* |
3214 | * We don't free the ring buffer. instead, resize it because | 3245 | * We don't free the ring buffer. instead, resize it because |
3215 | * The max_tr ring buffer has some state (e.g. ring->clock) and | 3246 | * The max_tr ring buffer has some state (e.g. ring->clock) and |
@@ -3217,18 +3248,19 @@ static int tracing_set_tracer(const char *buf) | |||
3217 | */ | 3248 | */ |
3218 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); | 3249 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); |
3219 | set_buffer_entries(&max_tr, 1); | 3250 | set_buffer_entries(&max_tr, 1); |
3251 | tracing_reset_online_cpus(&max_tr); | ||
3252 | current_trace->allocated_snapshot = false; | ||
3220 | } | 3253 | } |
3221 | destroy_trace_option_files(topts); | 3254 | destroy_trace_option_files(topts); |
3222 | 3255 | ||
3223 | current_trace = &nop_trace; | ||
3224 | |||
3225 | topts = create_trace_option_files(t); | 3256 | topts = create_trace_option_files(t); |
3226 | if (t->use_max_tr) { | 3257 | if (t->use_max_tr && !had_max_tr) { |
3227 | /* we need to make per cpu buffer sizes equivalent */ | 3258 | /* we need to make per cpu buffer sizes equivalent */ |
3228 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, | 3259 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, |
3229 | RING_BUFFER_ALL_CPUS); | 3260 | RING_BUFFER_ALL_CPUS); |
3230 | if (ret < 0) | 3261 | if (ret < 0) |
3231 | goto out; | 3262 | goto out; |
3263 | t->allocated_snapshot = true; | ||
3232 | } | 3264 | } |
3233 | 3265 | ||
3234 | if (t->init) { | 3266 | if (t->init) { |
@@ -3336,8 +3368,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3336 | ret = -ENOMEM; | 3368 | ret = -ENOMEM; |
3337 | goto fail; | 3369 | goto fail; |
3338 | } | 3370 | } |
3339 | if (current_trace) | 3371 | *iter->trace = *current_trace; |
3340 | *iter->trace = *current_trace; | ||
3341 | 3372 | ||
3342 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 3373 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { |
3343 | ret = -ENOMEM; | 3374 | ret = -ENOMEM; |
@@ -3477,7 +3508,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
3477 | size_t cnt, loff_t *ppos) | 3508 | size_t cnt, loff_t *ppos) |
3478 | { | 3509 | { |
3479 | struct trace_iterator *iter = filp->private_data; | 3510 | struct trace_iterator *iter = filp->private_data; |
3480 | static struct tracer *old_tracer; | ||
3481 | ssize_t sret; | 3511 | ssize_t sret; |
3482 | 3512 | ||
3483 | /* return any leftover data */ | 3513 | /* return any leftover data */ |
@@ -3489,10 +3519,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
3489 | 3519 | ||
3490 | /* copy the tracer to avoid using a global lock all around */ | 3520 | /* copy the tracer to avoid using a global lock all around */ |
3491 | mutex_lock(&trace_types_lock); | 3521 | mutex_lock(&trace_types_lock); |
3492 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3522 | if (unlikely(iter->trace->name != current_trace->name)) |
3493 | old_tracer = current_trace; | ||
3494 | *iter->trace = *current_trace; | 3523 | *iter->trace = *current_trace; |
3495 | } | ||
3496 | mutex_unlock(&trace_types_lock); | 3524 | mutex_unlock(&trace_types_lock); |
3497 | 3525 | ||
3498 | /* | 3526 | /* |
@@ -3648,7 +3676,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3648 | .ops = &tracing_pipe_buf_ops, | 3676 | .ops = &tracing_pipe_buf_ops, |
3649 | .spd_release = tracing_spd_release_pipe, | 3677 | .spd_release = tracing_spd_release_pipe, |
3650 | }; | 3678 | }; |
3651 | static struct tracer *old_tracer; | ||
3652 | ssize_t ret; | 3679 | ssize_t ret; |
3653 | size_t rem; | 3680 | size_t rem; |
3654 | unsigned int i; | 3681 | unsigned int i; |
@@ -3658,10 +3685,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3658 | 3685 | ||
3659 | /* copy the tracer to avoid using a global lock all around */ | 3686 | /* copy the tracer to avoid using a global lock all around */ |
3660 | mutex_lock(&trace_types_lock); | 3687 | mutex_lock(&trace_types_lock); |
3661 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3688 | if (unlikely(iter->trace->name != current_trace->name)) |
3662 | old_tracer = current_trace; | ||
3663 | *iter->trace = *current_trace; | 3689 | *iter->trace = *current_trace; |
3664 | } | ||
3665 | mutex_unlock(&trace_types_lock); | 3690 | mutex_unlock(&trace_types_lock); |
3666 | 3691 | ||
3667 | mutex_lock(&iter->mutex); | 3692 | mutex_lock(&iter->mutex); |
@@ -4037,8 +4062,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4037 | * Reset the buffer so that it doesn't have incomparable timestamps. | 4062 | * Reset the buffer so that it doesn't have incomparable timestamps. |
4038 | */ | 4063 | */ |
4039 | tracing_reset_online_cpus(&global_trace); | 4064 | tracing_reset_online_cpus(&global_trace); |
4040 | if (max_tr.buffer) | 4065 | tracing_reset_online_cpus(&max_tr); |
4041 | tracing_reset_online_cpus(&max_tr); | ||
4042 | 4066 | ||
4043 | mutex_unlock(&trace_types_lock); | 4067 | mutex_unlock(&trace_types_lock); |
4044 | 4068 | ||
@@ -4054,6 +4078,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file) | |||
4054 | return single_open(file, tracing_clock_show, NULL); | 4078 | return single_open(file, tracing_clock_show, NULL); |
4055 | } | 4079 | } |
4056 | 4080 | ||
4081 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
4082 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | ||
4083 | { | ||
4084 | struct trace_iterator *iter; | ||
4085 | int ret = 0; | ||
4086 | |||
4087 | if (file->f_mode & FMODE_READ) { | ||
4088 | iter = __tracing_open(inode, file, true); | ||
4089 | if (IS_ERR(iter)) | ||
4090 | ret = PTR_ERR(iter); | ||
4091 | } | ||
4092 | return ret; | ||
4093 | } | ||
4094 | |||
4095 | static ssize_t | ||
4096 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
4097 | loff_t *ppos) | ||
4098 | { | ||
4099 | unsigned long val; | ||
4100 | int ret; | ||
4101 | |||
4102 | ret = tracing_update_buffers(); | ||
4103 | if (ret < 0) | ||
4104 | return ret; | ||
4105 | |||
4106 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
4107 | if (ret) | ||
4108 | return ret; | ||
4109 | |||
4110 | mutex_lock(&trace_types_lock); | ||
4111 | |||
4112 | if (current_trace->use_max_tr) { | ||
4113 | ret = -EBUSY; | ||
4114 | goto out; | ||
4115 | } | ||
4116 | |||
4117 | switch (val) { | ||
4118 | case 0: | ||
4119 | if (current_trace->allocated_snapshot) { | ||
4120 | /* free spare buffer */ | ||
4121 | ring_buffer_resize(max_tr.buffer, 1, | ||
4122 | RING_BUFFER_ALL_CPUS); | ||
4123 | set_buffer_entries(&max_tr, 1); | ||
4124 | tracing_reset_online_cpus(&max_tr); | ||
4125 | current_trace->allocated_snapshot = false; | ||
4126 | } | ||
4127 | break; | ||
4128 | case 1: | ||
4129 | if (!current_trace->allocated_snapshot) { | ||
4130 | /* allocate spare buffer */ | ||
4131 | ret = resize_buffer_duplicate_size(&max_tr, | ||
4132 | &global_trace, RING_BUFFER_ALL_CPUS); | ||
4133 | if (ret < 0) | ||
4134 | break; | ||
4135 | current_trace->allocated_snapshot = true; | ||
4136 | } | ||
4137 | |||
4138 | local_irq_disable(); | ||
4139 | /* Now, we're going to swap */ | ||
4140 | update_max_tr(&global_trace, current, smp_processor_id()); | ||
4141 | local_irq_enable(); | ||
4142 | break; | ||
4143 | default: | ||
4144 | if (current_trace->allocated_snapshot) | ||
4145 | tracing_reset_online_cpus(&max_tr); | ||
4146 | else | ||
4147 | ret = -EINVAL; | ||
4148 | break; | ||
4149 | } | ||
4150 | |||
4151 | if (ret >= 0) { | ||
4152 | *ppos += cnt; | ||
4153 | ret = cnt; | ||
4154 | } | ||
4155 | out: | ||
4156 | mutex_unlock(&trace_types_lock); | ||
4157 | return ret; | ||
4158 | } | ||
4159 | #endif /* CONFIG_TRACER_SNAPSHOT */ | ||
4160 | |||
4161 | |||
4057 | static const struct file_operations tracing_max_lat_fops = { | 4162 | static const struct file_operations tracing_max_lat_fops = { |
4058 | .open = tracing_open_generic, | 4163 | .open = tracing_open_generic, |
4059 | .read = tracing_max_lat_read, | 4164 | .read = tracing_max_lat_read, |
@@ -4110,6 +4215,16 @@ static const struct file_operations trace_clock_fops = { | |||
4110 | .write = tracing_clock_write, | 4215 | .write = tracing_clock_write, |
4111 | }; | 4216 | }; |
4112 | 4217 | ||
4218 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
4219 | static const struct file_operations snapshot_fops = { | ||
4220 | .open = tracing_snapshot_open, | ||
4221 | .read = seq_read, | ||
4222 | .write = tracing_snapshot_write, | ||
4223 | .llseek = tracing_seek, | ||
4224 | .release = tracing_release, | ||
4225 | }; | ||
4226 | #endif /* CONFIG_TRACER_SNAPSHOT */ | ||
4227 | |||
4113 | struct ftrace_buffer_info { | 4228 | struct ftrace_buffer_info { |
4114 | struct trace_array *tr; | 4229 | struct trace_array *tr; |
4115 | void *spare; | 4230 | void *spare; |
@@ -4414,6 +4529,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
4414 | cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); | 4529 | cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); |
4415 | trace_seq_printf(s, "dropped events: %ld\n", cnt); | 4530 | trace_seq_printf(s, "dropped events: %ld\n", cnt); |
4416 | 4531 | ||
4532 | cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); | ||
4533 | trace_seq_printf(s, "read events: %ld\n", cnt); | ||
4534 | |||
4417 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 4535 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); |
4418 | 4536 | ||
4419 | kfree(s); | 4537 | kfree(s); |
@@ -4490,7 +4608,7 @@ struct dentry *tracing_init_dentry(void) | |||
4490 | 4608 | ||
4491 | static struct dentry *d_percpu; | 4609 | static struct dentry *d_percpu; |
4492 | 4610 | ||
4493 | struct dentry *tracing_dentry_percpu(void) | 4611 | static struct dentry *tracing_dentry_percpu(void) |
4494 | { | 4612 | { |
4495 | static int once; | 4613 | static int once; |
4496 | struct dentry *d_tracer; | 4614 | struct dentry *d_tracer; |
@@ -4906,6 +5024,11 @@ static __init int tracer_init_debugfs(void) | |||
4906 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 5024 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
4907 | #endif | 5025 | #endif |
4908 | 5026 | ||
5027 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
5028 | trace_create_file("snapshot", 0644, d_tracer, | ||
5029 | (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); | ||
5030 | #endif | ||
5031 | |||
4909 | create_trace_options_dir(); | 5032 | create_trace_options_dir(); |
4910 | 5033 | ||
4911 | for_each_tracing_cpu(cpu) | 5034 | for_each_tracing_cpu(cpu) |
@@ -5014,6 +5137,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
5014 | if (disable_tracing) | 5137 | if (disable_tracing) |
5015 | ftrace_kill(); | 5138 | ftrace_kill(); |
5016 | 5139 | ||
5140 | /* Simulate the iterator */ | ||
5017 | trace_init_global_iter(&iter); | 5141 | trace_init_global_iter(&iter); |
5018 | 5142 | ||
5019 | for_each_tracing_cpu(cpu) { | 5143 | for_each_tracing_cpu(cpu) { |
@@ -5025,10 +5149,6 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
5025 | /* don't look at user memory in panic mode */ | 5149 | /* don't look at user memory in panic mode */ |
5026 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 5150 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
5027 | 5151 | ||
5028 | /* Simulate the iterator */ | ||
5029 | iter.tr = &global_trace; | ||
5030 | iter.trace = current_trace; | ||
5031 | |||
5032 | switch (oops_dump_mode) { | 5152 | switch (oops_dump_mode) { |
5033 | case DUMP_ALL: | 5153 | case DUMP_ALL: |
5034 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 5154 | iter.cpu_file = TRACE_PIPE_ALL_CPU; |
@@ -5173,7 +5293,7 @@ __init static int tracer_alloc_buffers(void) | |||
5173 | init_irq_work(&trace_work_wakeup, trace_wake_up); | 5293 | init_irq_work(&trace_work_wakeup, trace_wake_up); |
5174 | 5294 | ||
5175 | register_tracer(&nop_trace); | 5295 | register_tracer(&nop_trace); |
5176 | current_trace = &nop_trace; | 5296 | |
5177 | /* All seems OK, enable tracing */ | 5297 | /* All seems OK, enable tracing */ |
5178 | tracing_disabled = 0; | 5298 | tracing_disabled = 0; |
5179 | 5299 | ||