diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 22 | ||||
-rw-r--r-- | kernel/trace/Makefile | 6 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 43 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 4 |
10 files changed, 68 insertions, 25 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1cb3e1f616af..3533c583df47 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -1,11 +1,12 @@ | |||
1 | # | 1 | # |
2 | # Architectures that offer an FTRACE implementation should select HAVE_FTRACE: | 2 | # Architectures that offer an FUNCTION_TRACER implementation should |
3 | # select HAVE_FUNCTION_TRACER: | ||
3 | # | 4 | # |
4 | 5 | ||
5 | config NOP_TRACER | 6 | config NOP_TRACER |
6 | bool | 7 | bool |
7 | 8 | ||
8 | config HAVE_FTRACE | 9 | config HAVE_FUNCTION_TRACER |
9 | bool | 10 | bool |
10 | select NOP_TRACER | 11 | select NOP_TRACER |
11 | 12 | ||
@@ -28,9 +29,9 @@ config TRACING | |||
28 | select STACKTRACE | 29 | select STACKTRACE |
29 | select TRACEPOINTS | 30 | select TRACEPOINTS |
30 | 31 | ||
31 | config FTRACE | 32 | config FUNCTION_TRACER |
32 | bool "Kernel Function Tracer" | 33 | bool "Kernel Function Tracer" |
33 | depends on HAVE_FTRACE | 34 | depends on HAVE_FUNCTION_TRACER |
34 | depends on DEBUG_KERNEL | 35 | depends on DEBUG_KERNEL |
35 | select FRAME_POINTER | 36 | select FRAME_POINTER |
36 | select TRACING | 37 | select TRACING |
@@ -49,7 +50,6 @@ config IRQSOFF_TRACER | |||
49 | default n | 50 | default n |
50 | depends on TRACE_IRQFLAGS_SUPPORT | 51 | depends on TRACE_IRQFLAGS_SUPPORT |
51 | depends on GENERIC_TIME | 52 | depends on GENERIC_TIME |
52 | depends on HAVE_FTRACE | ||
53 | depends on DEBUG_KERNEL | 53 | depends on DEBUG_KERNEL |
54 | select TRACE_IRQFLAGS | 54 | select TRACE_IRQFLAGS |
55 | select TRACING | 55 | select TRACING |
@@ -73,7 +73,6 @@ config PREEMPT_TRACER | |||
73 | default n | 73 | default n |
74 | depends on GENERIC_TIME | 74 | depends on GENERIC_TIME |
75 | depends on PREEMPT | 75 | depends on PREEMPT |
76 | depends on HAVE_FTRACE | ||
77 | depends on DEBUG_KERNEL | 76 | depends on DEBUG_KERNEL |
78 | select TRACING | 77 | select TRACING |
79 | select TRACER_MAX_TRACE | 78 | select TRACER_MAX_TRACE |
@@ -101,7 +100,6 @@ config SYSPROF_TRACER | |||
101 | 100 | ||
102 | config SCHED_TRACER | 101 | config SCHED_TRACER |
103 | bool "Scheduling Latency Tracer" | 102 | bool "Scheduling Latency Tracer" |
104 | depends on HAVE_FTRACE | ||
105 | depends on DEBUG_KERNEL | 103 | depends on DEBUG_KERNEL |
106 | select TRACING | 104 | select TRACING |
107 | select CONTEXT_SWITCH_TRACER | 105 | select CONTEXT_SWITCH_TRACER |
@@ -112,7 +110,6 @@ config SCHED_TRACER | |||
112 | 110 | ||
113 | config CONTEXT_SWITCH_TRACER | 111 | config CONTEXT_SWITCH_TRACER |
114 | bool "Trace process context switches" | 112 | bool "Trace process context switches" |
115 | depends on HAVE_FTRACE | ||
116 | depends on DEBUG_KERNEL | 113 | depends on DEBUG_KERNEL |
117 | select TRACING | 114 | select TRACING |
118 | select MARKERS | 115 | select MARKERS |
@@ -122,7 +119,6 @@ config CONTEXT_SWITCH_TRACER | |||
122 | 119 | ||
123 | config BOOT_TRACER | 120 | config BOOT_TRACER |
124 | bool "Trace boot initcalls" | 121 | bool "Trace boot initcalls" |
125 | depends on HAVE_FTRACE | ||
126 | depends on DEBUG_KERNEL | 122 | depends on DEBUG_KERNEL |
127 | select TRACING | 123 | select TRACING |
128 | help | 124 | help |
@@ -141,9 +137,9 @@ config BOOT_TRACER | |||
141 | 137 | ||
142 | config STACK_TRACER | 138 | config STACK_TRACER |
143 | bool "Trace max stack" | 139 | bool "Trace max stack" |
144 | depends on HAVE_FTRACE | 140 | depends on HAVE_FUNCTION_TRACER |
145 | depends on DEBUG_KERNEL | 141 | depends on DEBUG_KERNEL |
146 | select FTRACE | 142 | select FUNCTION_TRACER |
147 | select STACKTRACE | 143 | select STACKTRACE |
148 | help | 144 | help |
149 | This special tracer records the maximum stack footprint of the | 145 | This special tracer records the maximum stack footprint of the |
@@ -160,7 +156,7 @@ config STACK_TRACER | |||
160 | 156 | ||
161 | config DYNAMIC_FTRACE | 157 | config DYNAMIC_FTRACE |
162 | bool "enable/disable ftrace tracepoints dynamically" | 158 | bool "enable/disable ftrace tracepoints dynamically" |
163 | depends on FTRACE | 159 | depends on FUNCTION_TRACER |
164 | depends on HAVE_DYNAMIC_FTRACE | 160 | depends on HAVE_DYNAMIC_FTRACE |
165 | depends on DEBUG_KERNEL | 161 | depends on DEBUG_KERNEL |
166 | default y | 162 | default y |
@@ -170,7 +166,7 @@ config DYNAMIC_FTRACE | |||
170 | with a No-Op instruction) as they are called. A table is | 166 | with a No-Op instruction) as they are called. A table is |
171 | created to dynamically enable them again. | 167 | created to dynamically enable them again. |
172 | 168 | ||
173 | This way a CONFIG_FTRACE kernel is slightly larger, but otherwise | 169 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise |
174 | has native performance as long as no tracing is active. | 170 | has native performance as long as no tracing is active. |
175 | 171 | ||
176 | The changes to the code are done by a kernel thread that | 172 | The changes to the code are done by a kernel thread that |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index a85dfba88ba0..c8228b1a49e9 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | 1 | ||
2 | # Do not instrument the tracer itself: | 2 | # Do not instrument the tracer itself: |
3 | 3 | ||
4 | ifdef CONFIG_FTRACE | 4 | ifdef CONFIG_FUNCTION_TRACER |
5 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 5 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
6 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) | 6 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) |
7 | 7 | ||
@@ -10,13 +10,13 @@ CFLAGS_trace_selftest_dynamic.o = -pg | |||
10 | obj-y += trace_selftest_dynamic.o | 10 | obj-y += trace_selftest_dynamic.o |
11 | endif | 11 | endif |
12 | 12 | ||
13 | obj-$(CONFIG_FTRACE) += libftrace.o | 13 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o |
14 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 14 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
15 | 15 | ||
16 | obj-$(CONFIG_TRACING) += trace.o | 16 | obj-$(CONFIG_TRACING) += trace.o |
17 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 17 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
18 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | 18 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o |
19 | obj-$(CONFIG_FTRACE) += trace_functions.o | 19 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
20 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | 20 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o |
21 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | 21 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o |
22 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | 22 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4dda4f60a2a9..1f54a94189fe 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -164,10 +164,14 @@ static DEFINE_SPINLOCK(ftrace_hash_lock); | |||
164 | #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) | 164 | #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) |
165 | #define ftrace_hash_unlock(flags) \ | 165 | #define ftrace_hash_unlock(flags) \ |
166 | spin_unlock_irqrestore(&ftrace_hash_lock, flags) | 166 | spin_unlock_irqrestore(&ftrace_hash_lock, flags) |
167 | static void ftrace_release_hash(unsigned long start, unsigned long end); | ||
167 | #else | 168 | #else |
168 | /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ | 169 | /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ |
169 | #define ftrace_hash_lock(flags) do { (void)(flags); } while (0) | 170 | #define ftrace_hash_lock(flags) do { (void)(flags); } while (0) |
170 | #define ftrace_hash_unlock(flags) do { } while(0) | 171 | #define ftrace_hash_unlock(flags) do { } while(0) |
172 | static inline void ftrace_release_hash(unsigned long start, unsigned long end) | ||
173 | { | ||
174 | } | ||
171 | #endif | 175 | #endif |
172 | 176 | ||
173 | /* | 177 | /* |
@@ -347,6 +351,7 @@ void ftrace_release(void *start, unsigned long size) | |||
347 | } | 351 | } |
348 | spin_unlock(&ftrace_lock); | 352 | spin_unlock(&ftrace_lock); |
349 | 353 | ||
354 | ftrace_release_hash(s, e); | ||
350 | } | 355 | } |
351 | 356 | ||
352 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 357 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
@@ -1659,6 +1664,44 @@ void __init ftrace_init(void) | |||
1659 | ftrace_disabled = 1; | 1664 | ftrace_disabled = 1; |
1660 | } | 1665 | } |
1661 | #else /* CONFIG_FTRACE_MCOUNT_RECORD */ | 1666 | #else /* CONFIG_FTRACE_MCOUNT_RECORD */ |
1667 | |||
1668 | static void ftrace_release_hash(unsigned long start, unsigned long end) | ||
1669 | { | ||
1670 | struct dyn_ftrace *rec; | ||
1671 | struct hlist_node *t, *n; | ||
1672 | struct hlist_head *head, temp_list; | ||
1673 | unsigned long flags; | ||
1674 | int i, cpu; | ||
1675 | |||
1676 | preempt_disable_notrace(); | ||
1677 | |||
1678 | /* disable incase we call something that calls mcount */ | ||
1679 | cpu = raw_smp_processor_id(); | ||
1680 | per_cpu(ftrace_shutdown_disable_cpu, cpu)++; | ||
1681 | |||
1682 | ftrace_hash_lock(flags); | ||
1683 | |||
1684 | for (i = 0; i < FTRACE_HASHSIZE; i++) { | ||
1685 | INIT_HLIST_HEAD(&temp_list); | ||
1686 | head = &ftrace_hash[i]; | ||
1687 | |||
1688 | /* all CPUS are stopped, we are safe to modify code */ | ||
1689 | hlist_for_each_entry_safe(rec, t, n, head, node) { | ||
1690 | if (rec->flags & FTRACE_FL_FREE) | ||
1691 | continue; | ||
1692 | |||
1693 | if ((rec->ip >= start) && (rec->ip < end)) | ||
1694 | ftrace_free_rec(rec); | ||
1695 | } | ||
1696 | } | ||
1697 | |||
1698 | ftrace_hash_unlock(flags); | ||
1699 | |||
1700 | per_cpu(ftrace_shutdown_disable_cpu, cpu)--; | ||
1701 | preempt_enable_notrace(); | ||
1702 | |||
1703 | } | ||
1704 | |||
1662 | static int ftraced(void *ignore) | 1705 | static int ftraced(void *ignore) |
1663 | { | 1706 | { |
1664 | unsigned long usecs; | 1707 | unsigned long usecs; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d345d649d073..aeb2f2505bc5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -851,7 +851,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
851 | preempt_enable_notrace(); | 851 | preempt_enable_notrace(); |
852 | } | 852 | } |
853 | 853 | ||
854 | #ifdef CONFIG_FTRACE | 854 | #ifdef CONFIG_FUNCTION_TRACER |
855 | static void | 855 | static void |
856 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 856 | function_trace_call(unsigned long ip, unsigned long parent_ip) |
857 | { | 857 | { |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f1f99572cde7..6889ca48f1f1 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -335,7 +335,7 @@ void update_max_tr_single(struct trace_array *tr, | |||
335 | 335 | ||
336 | extern cycle_t ftrace_now(int cpu); | 336 | extern cycle_t ftrace_now(int cpu); |
337 | 337 | ||
338 | #ifdef CONFIG_FTRACE | 338 | #ifdef CONFIG_FUNCTION_TRACER |
339 | void tracing_start_function_trace(void); | 339 | void tracing_start_function_trace(void); |
340 | void tracing_stop_function_trace(void); | 340 | void tracing_stop_function_trace(void); |
341 | #else | 341 | #else |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index e90eb0c2c56c..0f85a64003d3 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -64,7 +64,7 @@ static void function_trace_ctrl_update(struct trace_array *tr) | |||
64 | 64 | ||
65 | static struct tracer function_trace __read_mostly = | 65 | static struct tracer function_trace __read_mostly = |
66 | { | 66 | { |
67 | .name = "ftrace", | 67 | .name = "function", |
68 | .init = function_trace_init, | 68 | .init = function_trace_init, |
69 | .reset = function_trace_reset, | 69 | .reset = function_trace_reset, |
70 | .ctrl_update = function_trace_ctrl_update, | 70 | .ctrl_update = function_trace_ctrl_update, |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a7db7f040ae0..9c74071c10e0 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -63,7 +63,7 @@ irq_trace(void) | |||
63 | */ | 63 | */ |
64 | static __cacheline_aligned_in_smp unsigned long max_sequence; | 64 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
65 | 65 | ||
66 | #ifdef CONFIG_FTRACE | 66 | #ifdef CONFIG_FUNCTION_TRACER |
67 | /* | 67 | /* |
68 | * irqsoff uses its own tracer function to keep the overhead down: | 68 | * irqsoff uses its own tracer function to keep the overhead down: |
69 | */ | 69 | */ |
@@ -104,7 +104,7 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
104 | { | 104 | { |
105 | .func = irqsoff_tracer_call, | 105 | .func = irqsoff_tracer_call, |
106 | }; | 106 | }; |
107 | #endif /* CONFIG_FTRACE */ | 107 | #endif /* CONFIG_FUNCTION_TRACER */ |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Should this new latency be reported/recorded? | 110 | * Should this new latency be reported/recorded? |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index fe4a252c2363..3ae93f16b565 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -31,7 +31,7 @@ static raw_spinlock_t wakeup_lock = | |||
31 | 31 | ||
32 | static void __wakeup_reset(struct trace_array *tr); | 32 | static void __wakeup_reset(struct trace_array *tr); |
33 | 33 | ||
34 | #ifdef CONFIG_FTRACE | 34 | #ifdef CONFIG_FUNCTION_TRACER |
35 | /* | 35 | /* |
36 | * irqsoff uses its own tracer function to keep the overhead down: | 36 | * irqsoff uses its own tracer function to keep the overhead down: |
37 | */ | 37 | */ |
@@ -96,7 +96,7 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
96 | { | 96 | { |
97 | .func = wakeup_tracer_call, | 97 | .func = wakeup_tracer_call, |
98 | }; | 98 | }; |
99 | #endif /* CONFIG_FTRACE */ | 99 | #endif /* CONFIG_FUNCTION_TRACER */ |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * Should this new latency be reported/recorded? | 102 | * Should this new latency be reported/recorded? |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 09cf230d7eca..95815d26a041 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -70,7 +70,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | #ifdef CONFIG_FTRACE | 73 | #ifdef CONFIG_FUNCTION_TRACER |
74 | 74 | ||
75 | #ifdef CONFIG_DYNAMIC_FTRACE | 75 | #ifdef CONFIG_DYNAMIC_FTRACE |
76 | 76 | ||
@@ -226,7 +226,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
226 | 226 | ||
227 | return ret; | 227 | return ret; |
228 | } | 228 | } |
229 | #endif /* CONFIG_FTRACE */ | 229 | #endif /* CONFIG_FUNCTION_TRACER */ |
230 | 230 | ||
231 | #ifdef CONFIG_IRQSOFF_TRACER | 231 | #ifdef CONFIG_IRQSOFF_TRACER |
232 | int | 232 | int |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 74c5d9a3afae..be682b62fe58 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -44,6 +44,10 @@ static inline void check_stack(void) | |||
44 | if (this_size <= max_stack_size) | 44 | if (this_size <= max_stack_size) |
45 | return; | 45 | return; |
46 | 46 | ||
47 | /* we do not handle interrupt stacks yet */ | ||
48 | if (!object_is_on_stack(&this_size)) | ||
49 | return; | ||
50 | |||
47 | raw_local_irq_save(flags); | 51 | raw_local_irq_save(flags); |
48 | __raw_spin_lock(&max_stack_lock); | 52 | __raw_spin_lock(&max_stack_lock); |
49 | 53 | ||