diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-05-09 13:34:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-09 13:34:30 -0400 |
commit | c4f400e837713f677de94390c57e7dc7567e0286 (patch) | |
tree | 4bbaa58f826152212ad8394be8442112b4aac04b | |
parent | cb04ff9ac424d0e689d9b612e9f73cb443ab4b7e (diff) | |
parent | 68179686ac67cb08f08b1ef28b860d5ed899f242 (diff) |
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
-rw-r--r-- | include/linux/ftrace.h | 8 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 44 | ||||
-rw-r--r-- | kernel/trace/trace.c | 74 |
3 files changed, 24 insertions, 102 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 0b5590330bca..d32cc5e4b0cc 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -491,8 +491,12 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
491 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); | 491 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
492 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | 492 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); |
493 | #else | 493 | #else |
494 | static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } | 494 | /* |
495 | static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } | 495 | * Use defines instead of static inlines because some arches will make code out |
496 | * of the CALLER_ADDR, when we really want these to be a real nop. | ||
497 | */ | ||
498 | # define trace_preempt_on(a0, a1) do { } while (0) | ||
499 | # define trace_preempt_off(a0, a1) do { } while (0) | ||
496 | #endif | 500 | #endif |
497 | 501 | ||
498 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 502 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0fa92f677c92..cf81f27ce6c6 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2469,57 +2469,35 @@ static int | |||
2469 | ftrace_avail_open(struct inode *inode, struct file *file) | 2469 | ftrace_avail_open(struct inode *inode, struct file *file) |
2470 | { | 2470 | { |
2471 | struct ftrace_iterator *iter; | 2471 | struct ftrace_iterator *iter; |
2472 | int ret; | ||
2473 | 2472 | ||
2474 | if (unlikely(ftrace_disabled)) | 2473 | if (unlikely(ftrace_disabled)) |
2475 | return -ENODEV; | 2474 | return -ENODEV; |
2476 | 2475 | ||
2477 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2476 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
2478 | if (!iter) | 2477 | if (iter) { |
2479 | return -ENOMEM; | 2478 | iter->pg = ftrace_pages_start; |
2480 | 2479 | iter->ops = &global_ops; | |
2481 | iter->pg = ftrace_pages_start; | ||
2482 | iter->ops = &global_ops; | ||
2483 | |||
2484 | ret = seq_open(file, &show_ftrace_seq_ops); | ||
2485 | if (!ret) { | ||
2486 | struct seq_file *m = file->private_data; | ||
2487 | |||
2488 | m->private = iter; | ||
2489 | } else { | ||
2490 | kfree(iter); | ||
2491 | } | 2480 | } |
2492 | 2481 | ||
2493 | return ret; | 2482 | return iter ? 0 : -ENOMEM; |
2494 | } | 2483 | } |
2495 | 2484 | ||
2496 | static int | 2485 | static int |
2497 | ftrace_enabled_open(struct inode *inode, struct file *file) | 2486 | ftrace_enabled_open(struct inode *inode, struct file *file) |
2498 | { | 2487 | { |
2499 | struct ftrace_iterator *iter; | 2488 | struct ftrace_iterator *iter; |
2500 | int ret; | ||
2501 | 2489 | ||
2502 | if (unlikely(ftrace_disabled)) | 2490 | if (unlikely(ftrace_disabled)) |
2503 | return -ENODEV; | 2491 | return -ENODEV; |
2504 | 2492 | ||
2505 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2493 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
2506 | if (!iter) | 2494 | if (iter) { |
2507 | return -ENOMEM; | 2495 | iter->pg = ftrace_pages_start; |
2508 | 2496 | iter->flags = FTRACE_ITER_ENABLED; | |
2509 | iter->pg = ftrace_pages_start; | 2497 | iter->ops = &global_ops; |
2510 | iter->flags = FTRACE_ITER_ENABLED; | ||
2511 | iter->ops = &global_ops; | ||
2512 | |||
2513 | ret = seq_open(file, &show_ftrace_seq_ops); | ||
2514 | if (!ret) { | ||
2515 | struct seq_file *m = file->private_data; | ||
2516 | |||
2517 | m->private = iter; | ||
2518 | } else { | ||
2519 | kfree(iter); | ||
2520 | } | 2498 | } |
2521 | 2499 | ||
2522 | return ret; | 2500 | return iter ? 0 : -ENOMEM; |
2523 | } | 2501 | } |
2524 | 2502 | ||
2525 | static void ftrace_filter_reset(struct ftrace_hash *hash) | 2503 | static void ftrace_filter_reset(struct ftrace_hash *hash) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f11a285ee5bb..48ef4960ec90 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -87,18 +87,6 @@ static int tracing_disabled = 1; | |||
87 | 87 | ||
88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
89 | 89 | ||
90 | static inline void ftrace_disable_cpu(void) | ||
91 | { | ||
92 | preempt_disable(); | ||
93 | __this_cpu_inc(ftrace_cpu_disabled); | ||
94 | } | ||
95 | |||
96 | static inline void ftrace_enable_cpu(void) | ||
97 | { | ||
98 | __this_cpu_dec(ftrace_cpu_disabled); | ||
99 | preempt_enable(); | ||
100 | } | ||
101 | |||
102 | cpumask_var_t __read_mostly tracing_buffer_mask; | 90 | cpumask_var_t __read_mostly tracing_buffer_mask; |
103 | 91 | ||
104 | /* | 92 | /* |
@@ -748,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
748 | 736 | ||
749 | arch_spin_lock(&ftrace_max_lock); | 737 | arch_spin_lock(&ftrace_max_lock); |
750 | 738 | ||
751 | ftrace_disable_cpu(); | ||
752 | |||
753 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 739 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); |
754 | 740 | ||
755 | if (ret == -EBUSY) { | 741 | if (ret == -EBUSY) { |
@@ -763,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
763 | "Failed to swap buffers due to commit in progress\n"); | 749 | "Failed to swap buffers due to commit in progress\n"); |
764 | } | 750 | } |
765 | 751 | ||
766 | ftrace_enable_cpu(); | ||
767 | |||
768 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 752 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
769 | 753 | ||
770 | __update_max_tr(tr, tsk, cpu); | 754 | __update_max_tr(tr, tsk, cpu); |
@@ -916,13 +900,6 @@ out: | |||
916 | mutex_unlock(&trace_types_lock); | 900 | mutex_unlock(&trace_types_lock); |
917 | } | 901 | } |
918 | 902 | ||
919 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) | ||
920 | { | ||
921 | ftrace_disable_cpu(); | ||
922 | ring_buffer_reset_cpu(buffer, cpu); | ||
923 | ftrace_enable_cpu(); | ||
924 | } | ||
925 | |||
926 | void tracing_reset(struct trace_array *tr, int cpu) | 903 | void tracing_reset(struct trace_array *tr, int cpu) |
927 | { | 904 | { |
928 | struct ring_buffer *buffer = tr->buffer; | 905 | struct ring_buffer *buffer = tr->buffer; |
@@ -931,7 +908,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
931 | 908 | ||
932 | /* Make sure all commits have finished */ | 909 | /* Make sure all commits have finished */ |
933 | synchronize_sched(); | 910 | synchronize_sched(); |
934 | __tracing_reset(buffer, cpu); | 911 | ring_buffer_reset_cpu(buffer, cpu); |
935 | 912 | ||
936 | ring_buffer_record_enable(buffer); | 913 | ring_buffer_record_enable(buffer); |
937 | } | 914 | } |
@@ -949,7 +926,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
949 | tr->time_start = ftrace_now(tr->cpu); | 926 | tr->time_start = ftrace_now(tr->cpu); |
950 | 927 | ||
951 | for_each_online_cpu(cpu) | 928 | for_each_online_cpu(cpu) |
952 | __tracing_reset(buffer, cpu); | 929 | ring_buffer_reset_cpu(buffer, cpu); |
953 | 930 | ||
954 | ring_buffer_record_enable(buffer); | 931 | ring_buffer_record_enable(buffer); |
955 | } | 932 | } |
@@ -1733,14 +1710,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk); | |||
1733 | 1710 | ||
1734 | static void trace_iterator_increment(struct trace_iterator *iter) | 1711 | static void trace_iterator_increment(struct trace_iterator *iter) |
1735 | { | 1712 | { |
1736 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1737 | ftrace_disable_cpu(); | ||
1738 | |||
1739 | iter->idx++; | 1713 | iter->idx++; |
1740 | if (iter->buffer_iter[iter->cpu]) | 1714 | if (iter->buffer_iter[iter->cpu]) |
1741 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 1715 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); |
1742 | |||
1743 | ftrace_enable_cpu(); | ||
1744 | } | 1716 | } |
1745 | 1717 | ||
1746 | static struct trace_entry * | 1718 | static struct trace_entry * |
@@ -1750,17 +1722,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | |||
1750 | struct ring_buffer_event *event; | 1722 | struct ring_buffer_event *event; |
1751 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1723 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
1752 | 1724 | ||
1753 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1754 | ftrace_disable_cpu(); | ||
1755 | |||
1756 | if (buf_iter) | 1725 | if (buf_iter) |
1757 | event = ring_buffer_iter_peek(buf_iter, ts); | 1726 | event = ring_buffer_iter_peek(buf_iter, ts); |
1758 | else | 1727 | else |
1759 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, | 1728 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
1760 | lost_events); | 1729 | lost_events); |
1761 | 1730 | ||
1762 | ftrace_enable_cpu(); | ||
1763 | |||
1764 | if (event) { | 1731 | if (event) { |
1765 | iter->ent_size = ring_buffer_event_length(event); | 1732 | iter->ent_size = ring_buffer_event_length(event); |
1766 | return ring_buffer_event_data(event); | 1733 | return ring_buffer_event_data(event); |
@@ -1850,11 +1817,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter) | |||
1850 | 1817 | ||
1851 | static void trace_consume(struct trace_iterator *iter) | 1818 | static void trace_consume(struct trace_iterator *iter) |
1852 | { | 1819 | { |
1853 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1854 | ftrace_disable_cpu(); | ||
1855 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, | 1820 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
1856 | &iter->lost_events); | 1821 | &iter->lost_events); |
1857 | ftrace_enable_cpu(); | ||
1858 | } | 1822 | } |
1859 | 1823 | ||
1860 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 1824 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
@@ -1943,16 +1907,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1943 | iter->cpu = 0; | 1907 | iter->cpu = 0; |
1944 | iter->idx = -1; | 1908 | iter->idx = -1; |
1945 | 1909 | ||
1946 | ftrace_disable_cpu(); | ||
1947 | |||
1948 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1910 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
1949 | for_each_tracing_cpu(cpu) | 1911 | for_each_tracing_cpu(cpu) |
1950 | tracing_iter_reset(iter, cpu); | 1912 | tracing_iter_reset(iter, cpu); |
1951 | } else | 1913 | } else |
1952 | tracing_iter_reset(iter, cpu_file); | 1914 | tracing_iter_reset(iter, cpu_file); |
1953 | 1915 | ||
1954 | ftrace_enable_cpu(); | ||
1955 | |||
1956 | iter->leftover = 0; | 1916 | iter->leftover = 0; |
1957 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1917 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1958 | ; | 1918 | ; |
@@ -2413,15 +2373,13 @@ static struct trace_iterator * | |||
2413 | __tracing_open(struct inode *inode, struct file *file) | 2373 | __tracing_open(struct inode *inode, struct file *file) |
2414 | { | 2374 | { |
2415 | long cpu_file = (long) inode->i_private; | 2375 | long cpu_file = (long) inode->i_private; |
2416 | void *fail_ret = ERR_PTR(-ENOMEM); | ||
2417 | struct trace_iterator *iter; | 2376 | struct trace_iterator *iter; |
2418 | struct seq_file *m; | 2377 | int cpu; |
2419 | int cpu, ret; | ||
2420 | 2378 | ||
2421 | if (tracing_disabled) | 2379 | if (tracing_disabled) |
2422 | return ERR_PTR(-ENODEV); | 2380 | return ERR_PTR(-ENODEV); |
2423 | 2381 | ||
2424 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2382 | iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); |
2425 | if (!iter) | 2383 | if (!iter) |
2426 | return ERR_PTR(-ENOMEM); | 2384 | return ERR_PTR(-ENOMEM); |
2427 | 2385 | ||
@@ -2478,32 +2436,15 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2478 | tracing_iter_reset(iter, cpu); | 2436 | tracing_iter_reset(iter, cpu); |
2479 | } | 2437 | } |
2480 | 2438 | ||
2481 | ret = seq_open(file, &tracer_seq_ops); | ||
2482 | if (ret < 0) { | ||
2483 | fail_ret = ERR_PTR(ret); | ||
2484 | goto fail_buffer; | ||
2485 | } | ||
2486 | |||
2487 | m = file->private_data; | ||
2488 | m->private = iter; | ||
2489 | |||
2490 | mutex_unlock(&trace_types_lock); | 2439 | mutex_unlock(&trace_types_lock); |
2491 | 2440 | ||
2492 | return iter; | 2441 | return iter; |
2493 | 2442 | ||
2494 | fail_buffer: | ||
2495 | for_each_tracing_cpu(cpu) { | ||
2496 | if (iter->buffer_iter[cpu]) | ||
2497 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | ||
2498 | } | ||
2499 | free_cpumask_var(iter->started); | ||
2500 | tracing_start(); | ||
2501 | fail: | 2443 | fail: |
2502 | mutex_unlock(&trace_types_lock); | 2444 | mutex_unlock(&trace_types_lock); |
2503 | kfree(iter->trace); | 2445 | kfree(iter->trace); |
2504 | kfree(iter); | 2446 | seq_release_private(inode, file); |
2505 | 2447 | return ERR_PTR(-ENOMEM); | |
2506 | return fail_ret; | ||
2507 | } | 2448 | } |
2508 | 2449 | ||
2509 | int tracing_open_generic(struct inode *inode, struct file *filp) | 2450 | int tracing_open_generic(struct inode *inode, struct file *filp) |
@@ -2539,11 +2480,10 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2539 | tracing_start(); | 2480 | tracing_start(); |
2540 | mutex_unlock(&trace_types_lock); | 2481 | mutex_unlock(&trace_types_lock); |
2541 | 2482 | ||
2542 | seq_release(inode, file); | ||
2543 | mutex_destroy(&iter->mutex); | 2483 | mutex_destroy(&iter->mutex); |
2544 | free_cpumask_var(iter->started); | 2484 | free_cpumask_var(iter->started); |
2545 | kfree(iter->trace); | 2485 | kfree(iter->trace); |
2546 | kfree(iter); | 2486 | seq_release_private(inode, file); |
2547 | return 0; | 2487 | return 0; |
2548 | } | 2488 | } |
2549 | 2489 | ||