diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-20 03:02:39 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-20 03:02:39 -0500 |
commit | fbc2a06056c9aa3cb8c44bf1cfeb1d260e229e5c (patch) | |
tree | feb2a1c13ad3dff5a8c7ab3c0265e8eca7a0c5a3 /kernel/trace | |
parent | a3d732f93785da17e0137210deadb4616f5536fc (diff) | |
parent | ee2f6cc7f9ea2542ad46070ed62ba7aa04d08871 (diff) |
Merge branch 'linus' into x86/uv
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 31 | ||||
-rw-r--r-- | kernel/trace/Makefile | 6 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 642 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 179 | ||||
-rw-r--r-- | kernel/trace/trace.c | 82 | ||||
-rw-r--r-- | kernel/trace/trace.h | 22 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 18 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 4 |
11 files changed, 383 insertions, 611 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1cb3e1f616af..33dbefd471e8 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -1,13 +1,13 @@ | |||
1 | # | 1 | # |
2 | # Architectures that offer an FTRACE implementation should select HAVE_FTRACE: | 2 | # Architectures that offer an FUNCTION_TRACER implementation should |
3 | # select HAVE_FUNCTION_TRACER: | ||
3 | # | 4 | # |
4 | 5 | ||
5 | config NOP_TRACER | 6 | config NOP_TRACER |
6 | bool | 7 | bool |
7 | 8 | ||
8 | config HAVE_FTRACE | 9 | config HAVE_FUNCTION_TRACER |
9 | bool | 10 | bool |
10 | select NOP_TRACER | ||
11 | 11 | ||
12 | config HAVE_DYNAMIC_FTRACE | 12 | config HAVE_DYNAMIC_FTRACE |
13 | bool | 13 | bool |
@@ -25,12 +25,15 @@ config TRACING | |||
25 | bool | 25 | bool |
26 | select DEBUG_FS | 26 | select DEBUG_FS |
27 | select RING_BUFFER | 27 | select RING_BUFFER |
28 | select STACKTRACE | 28 | select STACKTRACE if STACKTRACE_SUPPORT |
29 | select TRACEPOINTS | 29 | select TRACEPOINTS |
30 | select NOP_TRACER | ||
30 | 31 | ||
31 | config FTRACE | 32 | menu "Tracers" |
33 | |||
34 | config FUNCTION_TRACER | ||
32 | bool "Kernel Function Tracer" | 35 | bool "Kernel Function Tracer" |
33 | depends on HAVE_FTRACE | 36 | depends on HAVE_FUNCTION_TRACER |
34 | depends on DEBUG_KERNEL | 37 | depends on DEBUG_KERNEL |
35 | select FRAME_POINTER | 38 | select FRAME_POINTER |
36 | select TRACING | 39 | select TRACING |
@@ -49,7 +52,6 @@ config IRQSOFF_TRACER | |||
49 | default n | 52 | default n |
50 | depends on TRACE_IRQFLAGS_SUPPORT | 53 | depends on TRACE_IRQFLAGS_SUPPORT |
51 | depends on GENERIC_TIME | 54 | depends on GENERIC_TIME |
52 | depends on HAVE_FTRACE | ||
53 | depends on DEBUG_KERNEL | 55 | depends on DEBUG_KERNEL |
54 | select TRACE_IRQFLAGS | 56 | select TRACE_IRQFLAGS |
55 | select TRACING | 57 | select TRACING |
@@ -73,7 +75,6 @@ config PREEMPT_TRACER | |||
73 | default n | 75 | default n |
74 | depends on GENERIC_TIME | 76 | depends on GENERIC_TIME |
75 | depends on PREEMPT | 77 | depends on PREEMPT |
76 | depends on HAVE_FTRACE | ||
77 | depends on DEBUG_KERNEL | 78 | depends on DEBUG_KERNEL |
78 | select TRACING | 79 | select TRACING |
79 | select TRACER_MAX_TRACE | 80 | select TRACER_MAX_TRACE |
@@ -101,7 +102,6 @@ config SYSPROF_TRACER | |||
101 | 102 | ||
102 | config SCHED_TRACER | 103 | config SCHED_TRACER |
103 | bool "Scheduling Latency Tracer" | 104 | bool "Scheduling Latency Tracer" |
104 | depends on HAVE_FTRACE | ||
105 | depends on DEBUG_KERNEL | 105 | depends on DEBUG_KERNEL |
106 | select TRACING | 106 | select TRACING |
107 | select CONTEXT_SWITCH_TRACER | 107 | select CONTEXT_SWITCH_TRACER |
@@ -112,7 +112,6 @@ config SCHED_TRACER | |||
112 | 112 | ||
113 | config CONTEXT_SWITCH_TRACER | 113 | config CONTEXT_SWITCH_TRACER |
114 | bool "Trace process context switches" | 114 | bool "Trace process context switches" |
115 | depends on HAVE_FTRACE | ||
116 | depends on DEBUG_KERNEL | 115 | depends on DEBUG_KERNEL |
117 | select TRACING | 116 | select TRACING |
118 | select MARKERS | 117 | select MARKERS |
@@ -122,9 +121,9 @@ config CONTEXT_SWITCH_TRACER | |||
122 | 121 | ||
123 | config BOOT_TRACER | 122 | config BOOT_TRACER |
124 | bool "Trace boot initcalls" | 123 | bool "Trace boot initcalls" |
125 | depends on HAVE_FTRACE | ||
126 | depends on DEBUG_KERNEL | 124 | depends on DEBUG_KERNEL |
127 | select TRACING | 125 | select TRACING |
126 | select CONTEXT_SWITCH_TRACER | ||
128 | help | 127 | help |
129 | This tracer helps developers to optimize boot times: it records | 128 | This tracer helps developers to optimize boot times: it records |
130 | the timings of the initcalls and traces key events and the identity | 129 | the timings of the initcalls and traces key events and the identity |
@@ -141,9 +140,9 @@ config BOOT_TRACER | |||
141 | 140 | ||
142 | config STACK_TRACER | 141 | config STACK_TRACER |
143 | bool "Trace max stack" | 142 | bool "Trace max stack" |
144 | depends on HAVE_FTRACE | 143 | depends on HAVE_FUNCTION_TRACER |
145 | depends on DEBUG_KERNEL | 144 | depends on DEBUG_KERNEL |
146 | select FTRACE | 145 | select FUNCTION_TRACER |
147 | select STACKTRACE | 146 | select STACKTRACE |
148 | help | 147 | help |
149 | This special tracer records the maximum stack footprint of the | 148 | This special tracer records the maximum stack footprint of the |
@@ -160,7 +159,7 @@ config STACK_TRACER | |||
160 | 159 | ||
161 | config DYNAMIC_FTRACE | 160 | config DYNAMIC_FTRACE |
162 | bool "enable/disable ftrace tracepoints dynamically" | 161 | bool "enable/disable ftrace tracepoints dynamically" |
163 | depends on FTRACE | 162 | depends on FUNCTION_TRACER |
164 | depends on HAVE_DYNAMIC_FTRACE | 163 | depends on HAVE_DYNAMIC_FTRACE |
165 | depends on DEBUG_KERNEL | 164 | depends on DEBUG_KERNEL |
166 | default y | 165 | default y |
@@ -170,7 +169,7 @@ config DYNAMIC_FTRACE | |||
170 | with a No-Op instruction) as they are called. A table is | 169 | with a No-Op instruction) as they are called. A table is |
171 | created to dynamically enable them again. | 170 | created to dynamically enable them again. |
172 | 171 | ||
173 | This way a CONFIG_FTRACE kernel is slightly larger, but otherwise | 172 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise |
174 | has native performance as long as no tracing is active. | 173 | has native performance as long as no tracing is active. |
175 | 174 | ||
176 | The changes to the code are done by a kernel thread that | 175 | The changes to the code are done by a kernel thread that |
@@ -195,3 +194,5 @@ config FTRACE_STARTUP_TEST | |||
195 | a series of tests are made to verify that the tracer is | 194 | a series of tests are made to verify that the tracer is |
196 | functioning properly. It will do tests on all the configured | 195 | functioning properly. It will do tests on all the configured |
197 | tracers of ftrace. | 196 | tracers of ftrace. |
197 | |||
198 | endmenu | ||
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index a85dfba88ba0..c8228b1a49e9 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | 1 | ||
2 | # Do not instrument the tracer itself: | 2 | # Do not instrument the tracer itself: |
3 | 3 | ||
4 | ifdef CONFIG_FTRACE | 4 | ifdef CONFIG_FUNCTION_TRACER |
5 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 5 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
6 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) | 6 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) |
7 | 7 | ||
@@ -10,13 +10,13 @@ CFLAGS_trace_selftest_dynamic.o = -pg | |||
10 | obj-y += trace_selftest_dynamic.o | 10 | obj-y += trace_selftest_dynamic.o |
11 | endif | 11 | endif |
12 | 12 | ||
13 | obj-$(CONFIG_FTRACE) += libftrace.o | 13 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o |
14 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 14 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
15 | 15 | ||
16 | obj-$(CONFIG_TRACING) += trace.o | 16 | obj-$(CONFIG_TRACING) += trace.o |
17 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 17 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
18 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | 18 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o |
19 | obj-$(CONFIG_FTRACE) += trace_functions.o | 19 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
20 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | 20 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o |
21 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | 21 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o |
22 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | 22 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4dda4f60a2a9..e60205722d0c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -25,13 +25,24 @@ | |||
25 | #include <linux/ftrace.h> | 25 | #include <linux/ftrace.h> |
26 | #include <linux/sysctl.h> | 26 | #include <linux/sysctl.h> |
27 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
28 | #include <linux/hash.h> | ||
29 | #include <linux/list.h> | 28 | #include <linux/list.h> |
30 | 29 | ||
31 | #include <asm/ftrace.h> | 30 | #include <asm/ftrace.h> |
32 | 31 | ||
33 | #include "trace.h" | 32 | #include "trace.h" |
34 | 33 | ||
34 | #define FTRACE_WARN_ON(cond) \ | ||
35 | do { \ | ||
36 | if (WARN_ON(cond)) \ | ||
37 | ftrace_kill(); \ | ||
38 | } while (0) | ||
39 | |||
40 | #define FTRACE_WARN_ON_ONCE(cond) \ | ||
41 | do { \ | ||
42 | if (WARN_ON_ONCE(cond)) \ | ||
43 | ftrace_kill(); \ | ||
44 | } while (0) | ||
45 | |||
35 | /* ftrace_enabled is a method to turn ftrace on or off */ | 46 | /* ftrace_enabled is a method to turn ftrace on or off */ |
36 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
37 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
@@ -153,21 +164,8 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
153 | } | 164 | } |
154 | 165 | ||
155 | #ifdef CONFIG_DYNAMIC_FTRACE | 166 | #ifdef CONFIG_DYNAMIC_FTRACE |
156 | |||
157 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 167 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
158 | /* | 168 | # error Dynamic ftrace depends on MCOUNT_RECORD |
159 | * The hash lock is only needed when the recording of the mcount | ||
160 | * callers are dynamic. That is, by the caller themselves and | ||
161 | * not recorded via the compilation. | ||
162 | */ | ||
163 | static DEFINE_SPINLOCK(ftrace_hash_lock); | ||
164 | #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) | ||
165 | #define ftrace_hash_unlock(flags) \ | ||
166 | spin_unlock_irqrestore(&ftrace_hash_lock, flags) | ||
167 | #else | ||
168 | /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ | ||
169 | #define ftrace_hash_lock(flags) do { (void)(flags); } while (0) | ||
170 | #define ftrace_hash_unlock(flags) do { } while(0) | ||
171 | #endif | 169 | #endif |
172 | 170 | ||
173 | /* | 171 | /* |
@@ -178,8 +176,6 @@ static DEFINE_SPINLOCK(ftrace_hash_lock); | |||
178 | */ | 176 | */ |
179 | static unsigned long mcount_addr = MCOUNT_ADDR; | 177 | static unsigned long mcount_addr = MCOUNT_ADDR; |
180 | 178 | ||
181 | static struct task_struct *ftraced_task; | ||
182 | |||
183 | enum { | 179 | enum { |
184 | FTRACE_ENABLE_CALLS = (1 << 0), | 180 | FTRACE_ENABLE_CALLS = (1 << 0), |
185 | FTRACE_DISABLE_CALLS = (1 << 1), | 181 | FTRACE_DISABLE_CALLS = (1 << 1), |
@@ -189,14 +185,9 @@ enum { | |||
189 | }; | 185 | }; |
190 | 186 | ||
191 | static int ftrace_filtered; | 187 | static int ftrace_filtered; |
192 | static int tracing_on; | ||
193 | static int frozen_record_count; | ||
194 | 188 | ||
195 | static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; | 189 | static LIST_HEAD(ftrace_new_addrs); |
196 | 190 | ||
197 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | ||
198 | |||
199 | static DEFINE_MUTEX(ftraced_lock); | ||
200 | static DEFINE_MUTEX(ftrace_regex_lock); | 191 | static DEFINE_MUTEX(ftrace_regex_lock); |
201 | 192 | ||
202 | struct ftrace_page { | 193 | struct ftrace_page { |
@@ -214,16 +205,13 @@ struct ftrace_page { | |||
214 | static struct ftrace_page *ftrace_pages_start; | 205 | static struct ftrace_page *ftrace_pages_start; |
215 | static struct ftrace_page *ftrace_pages; | 206 | static struct ftrace_page *ftrace_pages; |
216 | 207 | ||
217 | static int ftraced_trigger; | ||
218 | static int ftraced_suspend; | ||
219 | static int ftraced_stop; | ||
220 | |||
221 | static int ftrace_record_suspend; | ||
222 | |||
223 | static struct dyn_ftrace *ftrace_free_records; | 208 | static struct dyn_ftrace *ftrace_free_records; |
224 | 209 | ||
225 | 210 | ||
226 | #ifdef CONFIG_KPROBES | 211 | #ifdef CONFIG_KPROBES |
212 | |||
213 | static int frozen_record_count; | ||
214 | |||
227 | static inline void freeze_record(struct dyn_ftrace *rec) | 215 | static inline void freeze_record(struct dyn_ftrace *rec) |
228 | { | 216 | { |
229 | if (!(rec->flags & FTRACE_FL_FROZEN)) { | 217 | if (!(rec->flags & FTRACE_FL_FROZEN)) { |
@@ -250,72 +238,6 @@ static inline int record_frozen(struct dyn_ftrace *rec) | |||
250 | # define record_frozen(rec) ({ 0; }) | 238 | # define record_frozen(rec) ({ 0; }) |
251 | #endif /* CONFIG_KPROBES */ | 239 | #endif /* CONFIG_KPROBES */ |
252 | 240 | ||
253 | int skip_trace(unsigned long ip) | ||
254 | { | ||
255 | unsigned long fl; | ||
256 | struct dyn_ftrace *rec; | ||
257 | struct hlist_node *t; | ||
258 | struct hlist_head *head; | ||
259 | |||
260 | if (frozen_record_count == 0) | ||
261 | return 0; | ||
262 | |||
263 | head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)]; | ||
264 | hlist_for_each_entry_rcu(rec, t, head, node) { | ||
265 | if (rec->ip == ip) { | ||
266 | if (record_frozen(rec)) { | ||
267 | if (rec->flags & FTRACE_FL_FAILED) | ||
268 | return 1; | ||
269 | |||
270 | if (!(rec->flags & FTRACE_FL_CONVERTED)) | ||
271 | return 1; | ||
272 | |||
273 | if (!tracing_on || !ftrace_enabled) | ||
274 | return 1; | ||
275 | |||
276 | if (ftrace_filtered) { | ||
277 | fl = rec->flags & (FTRACE_FL_FILTER | | ||
278 | FTRACE_FL_NOTRACE); | ||
279 | if (!fl || (fl & FTRACE_FL_NOTRACE)) | ||
280 | return 1; | ||
281 | } | ||
282 | } | ||
283 | break; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static inline int | ||
291 | ftrace_ip_in_hash(unsigned long ip, unsigned long key) | ||
292 | { | ||
293 | struct dyn_ftrace *p; | ||
294 | struct hlist_node *t; | ||
295 | int found = 0; | ||
296 | |||
297 | hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) { | ||
298 | if (p->ip == ip) { | ||
299 | found = 1; | ||
300 | break; | ||
301 | } | ||
302 | } | ||
303 | |||
304 | return found; | ||
305 | } | ||
306 | |||
307 | static inline void | ||
308 | ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) | ||
309 | { | ||
310 | hlist_add_head_rcu(&node->node, &ftrace_hash[key]); | ||
311 | } | ||
312 | |||
313 | /* called from kstop_machine */ | ||
314 | static inline void ftrace_del_hash(struct dyn_ftrace *node) | ||
315 | { | ||
316 | hlist_del(&node->node); | ||
317 | } | ||
318 | |||
319 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 241 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
320 | { | 242 | { |
321 | rec->ip = (unsigned long)ftrace_free_records; | 243 | rec->ip = (unsigned long)ftrace_free_records; |
@@ -346,7 +268,6 @@ void ftrace_release(void *start, unsigned long size) | |||
346 | } | 268 | } |
347 | } | 269 | } |
348 | spin_unlock(&ftrace_lock); | 270 | spin_unlock(&ftrace_lock); |
349 | |||
350 | } | 271 | } |
351 | 272 | ||
352 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 273 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
@@ -358,10 +279,8 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | |||
358 | rec = ftrace_free_records; | 279 | rec = ftrace_free_records; |
359 | 280 | ||
360 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { | 281 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { |
361 | WARN_ON_ONCE(1); | 282 | FTRACE_WARN_ON_ONCE(1); |
362 | ftrace_free_records = NULL; | 283 | ftrace_free_records = NULL; |
363 | ftrace_disabled = 1; | ||
364 | ftrace_enabled = 0; | ||
365 | return NULL; | 284 | return NULL; |
366 | } | 285 | } |
367 | 286 | ||
@@ -371,76 +290,36 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | |||
371 | } | 290 | } |
372 | 291 | ||
373 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 292 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { |
374 | if (!ftrace_pages->next) | 293 | if (!ftrace_pages->next) { |
375 | return NULL; | 294 | /* allocate another page */ |
295 | ftrace_pages->next = | ||
296 | (void *)get_zeroed_page(GFP_KERNEL); | ||
297 | if (!ftrace_pages->next) | ||
298 | return NULL; | ||
299 | } | ||
376 | ftrace_pages = ftrace_pages->next; | 300 | ftrace_pages = ftrace_pages->next; |
377 | } | 301 | } |
378 | 302 | ||
379 | return &ftrace_pages->records[ftrace_pages->index++]; | 303 | return &ftrace_pages->records[ftrace_pages->index++]; |
380 | } | 304 | } |
381 | 305 | ||
382 | static void | 306 | static struct dyn_ftrace * |
383 | ftrace_record_ip(unsigned long ip) | 307 | ftrace_record_ip(unsigned long ip) |
384 | { | 308 | { |
385 | struct dyn_ftrace *node; | 309 | struct dyn_ftrace *rec; |
386 | unsigned long flags; | ||
387 | unsigned long key; | ||
388 | int resched; | ||
389 | int cpu; | ||
390 | 310 | ||
391 | if (!ftrace_enabled || ftrace_disabled) | 311 | if (!ftrace_enabled || ftrace_disabled) |
392 | return; | 312 | return NULL; |
393 | |||
394 | resched = need_resched(); | ||
395 | preempt_disable_notrace(); | ||
396 | 313 | ||
397 | /* | 314 | rec = ftrace_alloc_dyn_node(ip); |
398 | * We simply need to protect against recursion. | 315 | if (!rec) |
399 | * Use the the raw version of smp_processor_id and not | 316 | return NULL; |
400 | * __get_cpu_var which can call debug hooks that can | ||
401 | * cause a recursive crash here. | ||
402 | */ | ||
403 | cpu = raw_smp_processor_id(); | ||
404 | per_cpu(ftrace_shutdown_disable_cpu, cpu)++; | ||
405 | if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1) | ||
406 | goto out; | ||
407 | |||
408 | if (unlikely(ftrace_record_suspend)) | ||
409 | goto out; | ||
410 | |||
411 | key = hash_long(ip, FTRACE_HASHBITS); | ||
412 | |||
413 | WARN_ON_ONCE(key >= FTRACE_HASHSIZE); | ||
414 | |||
415 | if (ftrace_ip_in_hash(ip, key)) | ||
416 | goto out; | ||
417 | |||
418 | ftrace_hash_lock(flags); | ||
419 | |||
420 | /* This ip may have hit the hash before the lock */ | ||
421 | if (ftrace_ip_in_hash(ip, key)) | ||
422 | goto out_unlock; | ||
423 | |||
424 | node = ftrace_alloc_dyn_node(ip); | ||
425 | if (!node) | ||
426 | goto out_unlock; | ||
427 | |||
428 | node->ip = ip; | ||
429 | |||
430 | ftrace_add_hash(node, key); | ||
431 | 317 | ||
432 | ftraced_trigger = 1; | 318 | rec->ip = ip; |
433 | 319 | ||
434 | out_unlock: | 320 | list_add(&rec->list, &ftrace_new_addrs); |
435 | ftrace_hash_unlock(flags); | ||
436 | out: | ||
437 | per_cpu(ftrace_shutdown_disable_cpu, cpu)--; | ||
438 | 321 | ||
439 | /* prevent recursion with scheduler */ | 322 | return rec; |
440 | if (resched) | ||
441 | preempt_enable_no_resched_notrace(); | ||
442 | else | ||
443 | preempt_enable_notrace(); | ||
444 | } | 323 | } |
445 | 324 | ||
446 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 325 | #define FTRACE_ADDR ((long)(ftrace_caller)) |
@@ -559,7 +438,6 @@ static void ftrace_replace_code(int enable) | |||
559 | rec->flags |= FTRACE_FL_FAILED; | 438 | rec->flags |= FTRACE_FL_FAILED; |
560 | if ((system_state == SYSTEM_BOOTING) || | 439 | if ((system_state == SYSTEM_BOOTING) || |
561 | !core_kernel_text(rec->ip)) { | 440 | !core_kernel_text(rec->ip)) { |
562 | ftrace_del_hash(rec); | ||
563 | ftrace_free_rec(rec); | 441 | ftrace_free_rec(rec); |
564 | } | 442 | } |
565 | } | 443 | } |
@@ -567,15 +445,6 @@ static void ftrace_replace_code(int enable) | |||
567 | } | 445 | } |
568 | } | 446 | } |
569 | 447 | ||
570 | static void ftrace_shutdown_replenish(void) | ||
571 | { | ||
572 | if (ftrace_pages->next) | ||
573 | return; | ||
574 | |||
575 | /* allocate another page */ | ||
576 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
577 | } | ||
578 | |||
579 | static void print_ip_ins(const char *fmt, unsigned char *p) | 448 | static void print_ip_ins(const char *fmt, unsigned char *p) |
580 | { | 449 | { |
581 | int i; | 450 | int i; |
@@ -591,23 +460,23 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
591 | { | 460 | { |
592 | unsigned long ip; | 461 | unsigned long ip; |
593 | unsigned char *nop, *call; | 462 | unsigned char *nop, *call; |
594 | int failed; | 463 | int ret; |
595 | 464 | ||
596 | ip = rec->ip; | 465 | ip = rec->ip; |
597 | 466 | ||
598 | nop = ftrace_nop_replace(); | 467 | nop = ftrace_nop_replace(); |
599 | call = ftrace_call_replace(ip, mcount_addr); | 468 | call = ftrace_call_replace(ip, mcount_addr); |
600 | 469 | ||
601 | failed = ftrace_modify_code(ip, call, nop); | 470 | ret = ftrace_modify_code(ip, call, nop); |
602 | if (failed) { | 471 | if (ret) { |
603 | switch (failed) { | 472 | switch (ret) { |
604 | case 1: | 473 | case -EFAULT: |
605 | WARN_ON_ONCE(1); | 474 | FTRACE_WARN_ON_ONCE(1); |
606 | pr_info("ftrace faulted on modifying "); | 475 | pr_info("ftrace faulted on modifying "); |
607 | print_ip_sym(ip); | 476 | print_ip_sym(ip); |
608 | break; | 477 | break; |
609 | case 2: | 478 | case -EINVAL: |
610 | WARN_ON_ONCE(1); | 479 | FTRACE_WARN_ON_ONCE(1); |
611 | pr_info("ftrace failed to modify "); | 480 | pr_info("ftrace failed to modify "); |
612 | print_ip_sym(ip); | 481 | print_ip_sym(ip); |
613 | print_ip_ins(" expected: ", call); | 482 | print_ip_ins(" expected: ", call); |
@@ -615,6 +484,15 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
615 | print_ip_ins(" replace: ", nop); | 484 | print_ip_ins(" replace: ", nop); |
616 | printk(KERN_CONT "\n"); | 485 | printk(KERN_CONT "\n"); |
617 | break; | 486 | break; |
487 | case -EPERM: | ||
488 | FTRACE_WARN_ON_ONCE(1); | ||
489 | pr_info("ftrace faulted on writing "); | ||
490 | print_ip_sym(ip); | ||
491 | break; | ||
492 | default: | ||
493 | FTRACE_WARN_ON_ONCE(1); | ||
494 | pr_info("ftrace faulted on unknown error "); | ||
495 | print_ip_sym(ip); | ||
618 | } | 496 | } |
619 | 497 | ||
620 | rec->flags |= FTRACE_FL_FAILED; | 498 | rec->flags |= FTRACE_FL_FAILED; |
@@ -623,37 +501,18 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
623 | return 1; | 501 | return 1; |
624 | } | 502 | } |
625 | 503 | ||
626 | static int __ftrace_update_code(void *ignore); | ||
627 | |||
628 | static int __ftrace_modify_code(void *data) | 504 | static int __ftrace_modify_code(void *data) |
629 | { | 505 | { |
630 | unsigned long addr; | ||
631 | int *command = data; | 506 | int *command = data; |
632 | 507 | ||
633 | if (*command & FTRACE_ENABLE_CALLS) { | 508 | if (*command & FTRACE_ENABLE_CALLS) |
634 | /* | ||
635 | * Update any recorded ips now that we have the | ||
636 | * machine stopped | ||
637 | */ | ||
638 | __ftrace_update_code(NULL); | ||
639 | ftrace_replace_code(1); | 509 | ftrace_replace_code(1); |
640 | tracing_on = 1; | 510 | else if (*command & FTRACE_DISABLE_CALLS) |
641 | } else if (*command & FTRACE_DISABLE_CALLS) { | ||
642 | ftrace_replace_code(0); | 511 | ftrace_replace_code(0); |
643 | tracing_on = 0; | ||
644 | } | ||
645 | 512 | ||
646 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 513 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
647 | ftrace_update_ftrace_func(ftrace_trace_function); | 514 | ftrace_update_ftrace_func(ftrace_trace_function); |
648 | 515 | ||
649 | if (*command & FTRACE_ENABLE_MCOUNT) { | ||
650 | addr = (unsigned long)ftrace_record_ip; | ||
651 | ftrace_mcount_set(&addr); | ||
652 | } else if (*command & FTRACE_DISABLE_MCOUNT) { | ||
653 | addr = (unsigned long)ftrace_stub; | ||
654 | ftrace_mcount_set(&addr); | ||
655 | } | ||
656 | |||
657 | return 0; | 516 | return 0; |
658 | } | 517 | } |
659 | 518 | ||
@@ -662,26 +521,9 @@ static void ftrace_run_update_code(int command) | |||
662 | stop_machine(__ftrace_modify_code, &command, NULL); | 521 | stop_machine(__ftrace_modify_code, &command, NULL); |
663 | } | 522 | } |
664 | 523 | ||
665 | void ftrace_disable_daemon(void) | ||
666 | { | ||
667 | /* Stop the daemon from calling kstop_machine */ | ||
668 | mutex_lock(&ftraced_lock); | ||
669 | ftraced_stop = 1; | ||
670 | mutex_unlock(&ftraced_lock); | ||
671 | |||
672 | ftrace_force_update(); | ||
673 | } | ||
674 | |||
675 | void ftrace_enable_daemon(void) | ||
676 | { | ||
677 | mutex_lock(&ftraced_lock); | ||
678 | ftraced_stop = 0; | ||
679 | mutex_unlock(&ftraced_lock); | ||
680 | |||
681 | ftrace_force_update(); | ||
682 | } | ||
683 | |||
684 | static ftrace_func_t saved_ftrace_func; | 524 | static ftrace_func_t saved_ftrace_func; |
525 | static int ftrace_start; | ||
526 | static DEFINE_MUTEX(ftrace_start_lock); | ||
685 | 527 | ||
686 | static void ftrace_startup(void) | 528 | static void ftrace_startup(void) |
687 | { | 529 | { |
@@ -690,9 +532,9 @@ static void ftrace_startup(void) | |||
690 | if (unlikely(ftrace_disabled)) | 532 | if (unlikely(ftrace_disabled)) |
691 | return; | 533 | return; |
692 | 534 | ||
693 | mutex_lock(&ftraced_lock); | 535 | mutex_lock(&ftrace_start_lock); |
694 | ftraced_suspend++; | 536 | ftrace_start++; |
695 | if (ftraced_suspend == 1) | 537 | if (ftrace_start == 1) |
696 | command |= FTRACE_ENABLE_CALLS; | 538 | command |= FTRACE_ENABLE_CALLS; |
697 | 539 | ||
698 | if (saved_ftrace_func != ftrace_trace_function) { | 540 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -705,7 +547,7 @@ static void ftrace_startup(void) | |||
705 | 547 | ||
706 | ftrace_run_update_code(command); | 548 | ftrace_run_update_code(command); |
707 | out: | 549 | out: |
708 | mutex_unlock(&ftraced_lock); | 550 | mutex_unlock(&ftrace_start_lock); |
709 | } | 551 | } |
710 | 552 | ||
711 | static void ftrace_shutdown(void) | 553 | static void ftrace_shutdown(void) |
@@ -715,9 +557,9 @@ static void ftrace_shutdown(void) | |||
715 | if (unlikely(ftrace_disabled)) | 557 | if (unlikely(ftrace_disabled)) |
716 | return; | 558 | return; |
717 | 559 | ||
718 | mutex_lock(&ftraced_lock); | 560 | mutex_lock(&ftrace_start_lock); |
719 | ftraced_suspend--; | 561 | ftrace_start--; |
720 | if (!ftraced_suspend) | 562 | if (!ftrace_start) |
721 | command |= FTRACE_DISABLE_CALLS; | 563 | command |= FTRACE_DISABLE_CALLS; |
722 | 564 | ||
723 | if (saved_ftrace_func != ftrace_trace_function) { | 565 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -730,7 +572,7 @@ static void ftrace_shutdown(void) | |||
730 | 572 | ||
731 | ftrace_run_update_code(command); | 573 | ftrace_run_update_code(command); |
732 | out: | 574 | out: |
733 | mutex_unlock(&ftraced_lock); | 575 | mutex_unlock(&ftrace_start_lock); |
734 | } | 576 | } |
735 | 577 | ||
736 | static void ftrace_startup_sysctl(void) | 578 | static void ftrace_startup_sysctl(void) |
@@ -740,15 +582,15 @@ static void ftrace_startup_sysctl(void) | |||
740 | if (unlikely(ftrace_disabled)) | 582 | if (unlikely(ftrace_disabled)) |
741 | return; | 583 | return; |
742 | 584 | ||
743 | mutex_lock(&ftraced_lock); | 585 | mutex_lock(&ftrace_start_lock); |
744 | /* Force update next time */ | 586 | /* Force update next time */ |
745 | saved_ftrace_func = NULL; | 587 | saved_ftrace_func = NULL; |
746 | /* ftraced_suspend is true if we want ftrace running */ | 588 | /* ftrace_start is true if we want ftrace running */ |
747 | if (ftraced_suspend) | 589 | if (ftrace_start) |
748 | command |= FTRACE_ENABLE_CALLS; | 590 | command |= FTRACE_ENABLE_CALLS; |
749 | 591 | ||
750 | ftrace_run_update_code(command); | 592 | ftrace_run_update_code(command); |
751 | mutex_unlock(&ftraced_lock); | 593 | mutex_unlock(&ftrace_start_lock); |
752 | } | 594 | } |
753 | 595 | ||
754 | static void ftrace_shutdown_sysctl(void) | 596 | static void ftrace_shutdown_sysctl(void) |
@@ -758,112 +600,50 @@ static void ftrace_shutdown_sysctl(void) | |||
758 | if (unlikely(ftrace_disabled)) | 600 | if (unlikely(ftrace_disabled)) |
759 | return; | 601 | return; |
760 | 602 | ||
761 | mutex_lock(&ftraced_lock); | 603 | mutex_lock(&ftrace_start_lock); |
762 | /* ftraced_suspend is true if ftrace is running */ | 604 | /* ftrace_start is true if ftrace is running */ |
763 | if (ftraced_suspend) | 605 | if (ftrace_start) |
764 | command |= FTRACE_DISABLE_CALLS; | 606 | command |= FTRACE_DISABLE_CALLS; |
765 | 607 | ||
766 | ftrace_run_update_code(command); | 608 | ftrace_run_update_code(command); |
767 | mutex_unlock(&ftraced_lock); | 609 | mutex_unlock(&ftrace_start_lock); |
768 | } | 610 | } |
769 | 611 | ||
770 | static cycle_t ftrace_update_time; | 612 | static cycle_t ftrace_update_time; |
771 | static unsigned long ftrace_update_cnt; | 613 | static unsigned long ftrace_update_cnt; |
772 | unsigned long ftrace_update_tot_cnt; | 614 | unsigned long ftrace_update_tot_cnt; |
773 | 615 | ||
774 | static int __ftrace_update_code(void *ignore) | 616 | static int ftrace_update_code(void) |
775 | { | 617 | { |
776 | int i, save_ftrace_enabled; | 618 | struct dyn_ftrace *p, *t; |
777 | cycle_t start, stop; | 619 | cycle_t start, stop; |
778 | struct dyn_ftrace *p; | ||
779 | struct hlist_node *t, *n; | ||
780 | struct hlist_head *head, temp_list; | ||
781 | |||
782 | /* Don't be recording funcs now */ | ||
783 | ftrace_record_suspend++; | ||
784 | save_ftrace_enabled = ftrace_enabled; | ||
785 | ftrace_enabled = 0; | ||
786 | 620 | ||
787 | start = ftrace_now(raw_smp_processor_id()); | 621 | start = ftrace_now(raw_smp_processor_id()); |
788 | ftrace_update_cnt = 0; | 622 | ftrace_update_cnt = 0; |
789 | 623 | ||
790 | /* No locks needed, the machine is stopped! */ | 624 | list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { |
791 | for (i = 0; i < FTRACE_HASHSIZE; i++) { | ||
792 | INIT_HLIST_HEAD(&temp_list); | ||
793 | head = &ftrace_hash[i]; | ||
794 | |||
795 | /* all CPUS are stopped, we are safe to modify code */ | ||
796 | hlist_for_each_entry_safe(p, t, n, head, node) { | ||
797 | /* Skip over failed records which have not been | ||
798 | * freed. */ | ||
799 | if (p->flags & FTRACE_FL_FAILED) | ||
800 | continue; | ||
801 | |||
802 | /* Unconverted records are always at the head of the | ||
803 | * hash bucket. Once we encounter a converted record, | ||
804 | * simply skip over to the next bucket. Saves ftraced | ||
805 | * some processor cycles (ftrace does its bid for | ||
806 | * global warming :-p ). */ | ||
807 | if (p->flags & (FTRACE_FL_CONVERTED)) | ||
808 | break; | ||
809 | 625 | ||
810 | /* Ignore updates to this record's mcount site. | 626 | /* If something went wrong, bail without enabling anything */ |
811 | * Reintroduce this record at the head of this | 627 | if (unlikely(ftrace_disabled)) |
812 | * bucket to attempt to "convert" it again if | 628 | return -1; |
813 | * the kprobe on it is unregistered before the | ||
814 | * next run. */ | ||
815 | if (get_kprobe((void *)p->ip)) { | ||
816 | ftrace_del_hash(p); | ||
817 | INIT_HLIST_NODE(&p->node); | ||
818 | hlist_add_head(&p->node, &temp_list); | ||
819 | freeze_record(p); | ||
820 | continue; | ||
821 | } else { | ||
822 | unfreeze_record(p); | ||
823 | } | ||
824 | 629 | ||
825 | /* convert record (i.e, patch mcount-call with NOP) */ | 630 | list_del_init(&p->list); |
826 | if (ftrace_code_disable(p)) { | ||
827 | p->flags |= FTRACE_FL_CONVERTED; | ||
828 | ftrace_update_cnt++; | ||
829 | } else { | ||
830 | if ((system_state == SYSTEM_BOOTING) || | ||
831 | !core_kernel_text(p->ip)) { | ||
832 | ftrace_del_hash(p); | ||
833 | ftrace_free_rec(p); | ||
834 | } | ||
835 | } | ||
836 | } | ||
837 | 631 | ||
838 | hlist_for_each_entry_safe(p, t, n, &temp_list, node) { | 632 | /* convert record (i.e, patch mcount-call with NOP) */ |
839 | hlist_del(&p->node); | 633 | if (ftrace_code_disable(p)) { |
840 | INIT_HLIST_NODE(&p->node); | 634 | p->flags |= FTRACE_FL_CONVERTED; |
841 | hlist_add_head(&p->node, head); | 635 | ftrace_update_cnt++; |
842 | } | 636 | } else |
637 | ftrace_free_rec(p); | ||
843 | } | 638 | } |
844 | 639 | ||
845 | stop = ftrace_now(raw_smp_processor_id()); | 640 | stop = ftrace_now(raw_smp_processor_id()); |
846 | ftrace_update_time = stop - start; | 641 | ftrace_update_time = stop - start; |
847 | ftrace_update_tot_cnt += ftrace_update_cnt; | 642 | ftrace_update_tot_cnt += ftrace_update_cnt; |
848 | ftraced_trigger = 0; | ||
849 | |||
850 | ftrace_enabled = save_ftrace_enabled; | ||
851 | ftrace_record_suspend--; | ||
852 | 643 | ||
853 | return 0; | 644 | return 0; |
854 | } | 645 | } |
855 | 646 | ||
856 | static int ftrace_update_code(void) | ||
857 | { | ||
858 | if (unlikely(ftrace_disabled) || | ||
859 | !ftrace_enabled || !ftraced_trigger) | ||
860 | return 0; | ||
861 | |||
862 | stop_machine(__ftrace_update_code, NULL, NULL); | ||
863 | |||
864 | return 1; | ||
865 | } | ||
866 | |||
867 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | 647 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) |
868 | { | 648 | { |
869 | struct ftrace_page *pg; | 649 | struct ftrace_page *pg; |
@@ -892,8 +672,8 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
892 | pg = ftrace_pages = ftrace_pages_start; | 672 | pg = ftrace_pages = ftrace_pages_start; |
893 | 673 | ||
894 | cnt = num_to_init / ENTRIES_PER_PAGE; | 674 | cnt = num_to_init / ENTRIES_PER_PAGE; |
895 | pr_info("ftrace: allocating %ld hash entries in %d pages\n", | 675 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
896 | num_to_init, cnt); | 676 | num_to_init, cnt + 1); |
897 | 677 | ||
898 | for (i = 0; i < cnt; i++) { | 678 | for (i = 0; i < cnt; i++) { |
899 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 679 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
@@ -973,13 +753,11 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
973 | void *p = NULL; | 753 | void *p = NULL; |
974 | loff_t l = -1; | 754 | loff_t l = -1; |
975 | 755 | ||
976 | if (*pos != iter->pos) { | 756 | if (*pos > iter->pos) |
977 | for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) | 757 | *pos = iter->pos; |
978 | ; | 758 | |
979 | } else { | 759 | l = *pos; |
980 | l = *pos; | 760 | p = t_next(m, p, &l); |
981 | p = t_next(m, p, &l); | ||
982 | } | ||
983 | 761 | ||
984 | return p; | 762 | return p; |
985 | } | 763 | } |
@@ -990,15 +768,21 @@ static void t_stop(struct seq_file *m, void *p) | |||
990 | 768 | ||
991 | static int t_show(struct seq_file *m, void *v) | 769 | static int t_show(struct seq_file *m, void *v) |
992 | { | 770 | { |
771 | struct ftrace_iterator *iter = m->private; | ||
993 | struct dyn_ftrace *rec = v; | 772 | struct dyn_ftrace *rec = v; |
994 | char str[KSYM_SYMBOL_LEN]; | 773 | char str[KSYM_SYMBOL_LEN]; |
774 | int ret = 0; | ||
995 | 775 | ||
996 | if (!rec) | 776 | if (!rec) |
997 | return 0; | 777 | return 0; |
998 | 778 | ||
999 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 779 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
1000 | 780 | ||
1001 | seq_printf(m, "%s\n", str); | 781 | ret = seq_printf(m, "%s\n", str); |
782 | if (ret < 0) { | ||
783 | iter->pos--; | ||
784 | iter->idx--; | ||
785 | } | ||
1002 | 786 | ||
1003 | return 0; | 787 | return 0; |
1004 | } | 788 | } |
@@ -1024,7 +808,7 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
1024 | return -ENOMEM; | 808 | return -ENOMEM; |
1025 | 809 | ||
1026 | iter->pg = ftrace_pages_start; | 810 | iter->pg = ftrace_pages_start; |
1027 | iter->pos = -1; | 811 | iter->pos = 0; |
1028 | 812 | ||
1029 | ret = seq_open(file, &show_ftrace_seq_ops); | 813 | ret = seq_open(file, &show_ftrace_seq_ops); |
1030 | if (!ret) { | 814 | if (!ret) { |
@@ -1111,7 +895,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1111 | 895 | ||
1112 | if (file->f_mode & FMODE_READ) { | 896 | if (file->f_mode & FMODE_READ) { |
1113 | iter->pg = ftrace_pages_start; | 897 | iter->pg = ftrace_pages_start; |
1114 | iter->pos = -1; | 898 | iter->pos = 0; |
1115 | iter->flags = enable ? FTRACE_ITER_FILTER : | 899 | iter->flags = enable ? FTRACE_ITER_FILTER : |
1116 | FTRACE_ITER_NOTRACE; | 900 | FTRACE_ITER_NOTRACE; |
1117 | 901 | ||
@@ -1401,10 +1185,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1401 | } | 1185 | } |
1402 | 1186 | ||
1403 | mutex_lock(&ftrace_sysctl_lock); | 1187 | mutex_lock(&ftrace_sysctl_lock); |
1404 | mutex_lock(&ftraced_lock); | 1188 | mutex_lock(&ftrace_start_lock); |
1405 | if (iter->filtered && ftraced_suspend && ftrace_enabled) | 1189 | if (iter->filtered && ftrace_start && ftrace_enabled) |
1406 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1190 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1407 | mutex_unlock(&ftraced_lock); | 1191 | mutex_unlock(&ftrace_start_lock); |
1408 | mutex_unlock(&ftrace_sysctl_lock); | 1192 | mutex_unlock(&ftrace_sysctl_lock); |
1409 | 1193 | ||
1410 | kfree(iter); | 1194 | kfree(iter); |
@@ -1424,55 +1208,6 @@ ftrace_notrace_release(struct inode *inode, struct file *file) | |||
1424 | return ftrace_regex_release(inode, file, 0); | 1208 | return ftrace_regex_release(inode, file, 0); |
1425 | } | 1209 | } |
1426 | 1210 | ||
1427 | static ssize_t | ||
1428 | ftraced_read(struct file *filp, char __user *ubuf, | ||
1429 | size_t cnt, loff_t *ppos) | ||
1430 | { | ||
1431 | /* don't worry about races */ | ||
1432 | char *buf = ftraced_stop ? "disabled\n" : "enabled\n"; | ||
1433 | int r = strlen(buf); | ||
1434 | |||
1435 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
1436 | } | ||
1437 | |||
1438 | static ssize_t | ||
1439 | ftraced_write(struct file *filp, const char __user *ubuf, | ||
1440 | size_t cnt, loff_t *ppos) | ||
1441 | { | ||
1442 | char buf[64]; | ||
1443 | long val; | ||
1444 | int ret; | ||
1445 | |||
1446 | if (cnt >= sizeof(buf)) | ||
1447 | return -EINVAL; | ||
1448 | |||
1449 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1450 | return -EFAULT; | ||
1451 | |||
1452 | if (strncmp(buf, "enable", 6) == 0) | ||
1453 | val = 1; | ||
1454 | else if (strncmp(buf, "disable", 7) == 0) | ||
1455 | val = 0; | ||
1456 | else { | ||
1457 | buf[cnt] = 0; | ||
1458 | |||
1459 | ret = strict_strtoul(buf, 10, &val); | ||
1460 | if (ret < 0) | ||
1461 | return ret; | ||
1462 | |||
1463 | val = !!val; | ||
1464 | } | ||
1465 | |||
1466 | if (val) | ||
1467 | ftrace_enable_daemon(); | ||
1468 | else | ||
1469 | ftrace_disable_daemon(); | ||
1470 | |||
1471 | filp->f_pos += cnt; | ||
1472 | |||
1473 | return cnt; | ||
1474 | } | ||
1475 | |||
1476 | static struct file_operations ftrace_avail_fops = { | 1211 | static struct file_operations ftrace_avail_fops = { |
1477 | .open = ftrace_avail_open, | 1212 | .open = ftrace_avail_open, |
1478 | .read = seq_read, | 1213 | .read = seq_read, |
@@ -1503,54 +1238,6 @@ static struct file_operations ftrace_notrace_fops = { | |||
1503 | .release = ftrace_notrace_release, | 1238 | .release = ftrace_notrace_release, |
1504 | }; | 1239 | }; |
1505 | 1240 | ||
1506 | static struct file_operations ftraced_fops = { | ||
1507 | .open = tracing_open_generic, | ||
1508 | .read = ftraced_read, | ||
1509 | .write = ftraced_write, | ||
1510 | }; | ||
1511 | |||
1512 | /** | ||
1513 | * ftrace_force_update - force an update to all recording ftrace functions | ||
1514 | */ | ||
1515 | int ftrace_force_update(void) | ||
1516 | { | ||
1517 | int ret = 0; | ||
1518 | |||
1519 | if (unlikely(ftrace_disabled)) | ||
1520 | return -ENODEV; | ||
1521 | |||
1522 | mutex_lock(&ftrace_sysctl_lock); | ||
1523 | mutex_lock(&ftraced_lock); | ||
1524 | |||
1525 | /* | ||
1526 | * If ftraced_trigger is not set, then there is nothing | ||
1527 | * to update. | ||
1528 | */ | ||
1529 | if (ftraced_trigger && !ftrace_update_code()) | ||
1530 | ret = -EBUSY; | ||
1531 | |||
1532 | mutex_unlock(&ftraced_lock); | ||
1533 | mutex_unlock(&ftrace_sysctl_lock); | ||
1534 | |||
1535 | return ret; | ||
1536 | } | ||
1537 | |||
1538 | static void ftrace_force_shutdown(void) | ||
1539 | { | ||
1540 | struct task_struct *task; | ||
1541 | int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC; | ||
1542 | |||
1543 | mutex_lock(&ftraced_lock); | ||
1544 | task = ftraced_task; | ||
1545 | ftraced_task = NULL; | ||
1546 | ftraced_suspend = -1; | ||
1547 | ftrace_run_update_code(command); | ||
1548 | mutex_unlock(&ftraced_lock); | ||
1549 | |||
1550 | if (task) | ||
1551 | kthread_stop(task); | ||
1552 | } | ||
1553 | |||
1554 | static __init int ftrace_init_debugfs(void) | 1241 | static __init int ftrace_init_debugfs(void) |
1555 | { | 1242 | { |
1556 | struct dentry *d_tracer; | 1243 | struct dentry *d_tracer; |
@@ -1581,17 +1268,11 @@ static __init int ftrace_init_debugfs(void) | |||
1581 | pr_warning("Could not create debugfs " | 1268 | pr_warning("Could not create debugfs " |
1582 | "'set_ftrace_notrace' entry\n"); | 1269 | "'set_ftrace_notrace' entry\n"); |
1583 | 1270 | ||
1584 | entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer, | ||
1585 | NULL, &ftraced_fops); | ||
1586 | if (!entry) | ||
1587 | pr_warning("Could not create debugfs " | ||
1588 | "'ftraced_enabled' entry\n"); | ||
1589 | return 0; | 1271 | return 0; |
1590 | } | 1272 | } |
1591 | 1273 | ||
1592 | fs_initcall(ftrace_init_debugfs); | 1274 | fs_initcall(ftrace_init_debugfs); |
1593 | 1275 | ||
1594 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | ||
1595 | static int ftrace_convert_nops(unsigned long *start, | 1276 | static int ftrace_convert_nops(unsigned long *start, |
1596 | unsigned long *end) | 1277 | unsigned long *end) |
1597 | { | 1278 | { |
@@ -1599,20 +1280,18 @@ static int ftrace_convert_nops(unsigned long *start, | |||
1599 | unsigned long addr; | 1280 | unsigned long addr; |
1600 | unsigned long flags; | 1281 | unsigned long flags; |
1601 | 1282 | ||
1283 | mutex_lock(&ftrace_start_lock); | ||
1602 | p = start; | 1284 | p = start; |
1603 | while (p < end) { | 1285 | while (p < end) { |
1604 | addr = ftrace_call_adjust(*p++); | 1286 | addr = ftrace_call_adjust(*p++); |
1605 | /* should not be called from interrupt context */ | ||
1606 | spin_lock(&ftrace_lock); | ||
1607 | ftrace_record_ip(addr); | 1287 | ftrace_record_ip(addr); |
1608 | spin_unlock(&ftrace_lock); | ||
1609 | ftrace_shutdown_replenish(); | ||
1610 | } | 1288 | } |
1611 | 1289 | ||
1612 | /* p is ignored */ | 1290 | /* disable interrupts to prevent kstop machine */ |
1613 | local_irq_save(flags); | 1291 | local_irq_save(flags); |
1614 | __ftrace_update_code(p); | 1292 | ftrace_update_code(); |
1615 | local_irq_restore(flags); | 1293 | local_irq_restore(flags); |
1294 | mutex_unlock(&ftrace_start_lock); | ||
1616 | 1295 | ||
1617 | return 0; | 1296 | return 0; |
1618 | } | 1297 | } |
@@ -1658,130 +1337,34 @@ void __init ftrace_init(void) | |||
1658 | failed: | 1337 | failed: |
1659 | ftrace_disabled = 1; | 1338 | ftrace_disabled = 1; |
1660 | } | 1339 | } |
1661 | #else /* CONFIG_FTRACE_MCOUNT_RECORD */ | ||
1662 | static int ftraced(void *ignore) | ||
1663 | { | ||
1664 | unsigned long usecs; | ||
1665 | |||
1666 | while (!kthread_should_stop()) { | ||
1667 | |||
1668 | set_current_state(TASK_INTERRUPTIBLE); | ||
1669 | 1340 | ||
1670 | /* check once a second */ | 1341 | #else |
1671 | schedule_timeout(HZ); | ||
1672 | |||
1673 | if (unlikely(ftrace_disabled)) | ||
1674 | continue; | ||
1675 | |||
1676 | mutex_lock(&ftrace_sysctl_lock); | ||
1677 | mutex_lock(&ftraced_lock); | ||
1678 | if (!ftraced_suspend && !ftraced_stop && | ||
1679 | ftrace_update_code()) { | ||
1680 | usecs = nsecs_to_usecs(ftrace_update_time); | ||
1681 | if (ftrace_update_tot_cnt > 100000) { | ||
1682 | ftrace_update_tot_cnt = 0; | ||
1683 | pr_info("hm, dftrace overflow: %lu change%s" | ||
1684 | " (%lu total) in %lu usec%s\n", | ||
1685 | ftrace_update_cnt, | ||
1686 | ftrace_update_cnt != 1 ? "s" : "", | ||
1687 | ftrace_update_tot_cnt, | ||
1688 | usecs, usecs != 1 ? "s" : ""); | ||
1689 | ftrace_disabled = 1; | ||
1690 | WARN_ON_ONCE(1); | ||
1691 | } | ||
1692 | } | ||
1693 | mutex_unlock(&ftraced_lock); | ||
1694 | mutex_unlock(&ftrace_sysctl_lock); | ||
1695 | |||
1696 | ftrace_shutdown_replenish(); | ||
1697 | } | ||
1698 | __set_current_state(TASK_RUNNING); | ||
1699 | return 0; | ||
1700 | } | ||
1701 | 1342 | ||
1702 | static int __init ftrace_dynamic_init(void) | 1343 | static int __init ftrace_nodyn_init(void) |
1703 | { | 1344 | { |
1704 | struct task_struct *p; | 1345 | ftrace_enabled = 1; |
1705 | unsigned long addr; | ||
1706 | int ret; | ||
1707 | |||
1708 | addr = (unsigned long)ftrace_record_ip; | ||
1709 | |||
1710 | stop_machine(ftrace_dyn_arch_init, &addr, NULL); | ||
1711 | |||
1712 | /* ftrace_dyn_arch_init places the return code in addr */ | ||
1713 | if (addr) { | ||
1714 | ret = (int)addr; | ||
1715 | goto failed; | ||
1716 | } | ||
1717 | |||
1718 | ret = ftrace_dyn_table_alloc(NR_TO_INIT); | ||
1719 | if (ret) | ||
1720 | goto failed; | ||
1721 | |||
1722 | p = kthread_run(ftraced, NULL, "ftraced"); | ||
1723 | if (IS_ERR(p)) { | ||
1724 | ret = -1; | ||
1725 | goto failed; | ||
1726 | } | ||
1727 | |||
1728 | last_ftrace_enabled = ftrace_enabled = 1; | ||
1729 | ftraced_task = p; | ||
1730 | |||
1731 | return 0; | 1346 | return 0; |
1732 | |||
1733 | failed: | ||
1734 | ftrace_disabled = 1; | ||
1735 | return ret; | ||
1736 | } | 1347 | } |
1348 | device_initcall(ftrace_nodyn_init); | ||
1737 | 1349 | ||
1738 | core_initcall(ftrace_dynamic_init); | ||
1739 | #endif /* CONFIG_FTRACE_MCOUNT_RECORD */ | ||
1740 | |||
1741 | #else | ||
1742 | # define ftrace_startup() do { } while (0) | 1350 | # define ftrace_startup() do { } while (0) |
1743 | # define ftrace_shutdown() do { } while (0) | 1351 | # define ftrace_shutdown() do { } while (0) |
1744 | # define ftrace_startup_sysctl() do { } while (0) | 1352 | # define ftrace_startup_sysctl() do { } while (0) |
1745 | # define ftrace_shutdown_sysctl() do { } while (0) | 1353 | # define ftrace_shutdown_sysctl() do { } while (0) |
1746 | # define ftrace_force_shutdown() do { } while (0) | ||
1747 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1354 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1748 | 1355 | ||
1749 | /** | 1356 | /** |
1750 | * ftrace_kill_atomic - kill ftrace from critical sections | 1357 | * ftrace_kill - kill ftrace |
1751 | * | 1358 | * |
1752 | * This function should be used by panic code. It stops ftrace | 1359 | * This function should be used by panic code. It stops ftrace |
1753 | * but in a not so nice way. If you need to simply kill ftrace | 1360 | * but in a not so nice way. If you need to simply kill ftrace |
1754 | * from a non-atomic section, use ftrace_kill. | 1361 | * from a non-atomic section, use ftrace_kill. |
1755 | */ | 1362 | */ |
1756 | void ftrace_kill_atomic(void) | ||
1757 | { | ||
1758 | ftrace_disabled = 1; | ||
1759 | ftrace_enabled = 0; | ||
1760 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
1761 | ftraced_suspend = -1; | ||
1762 | #endif | ||
1763 | clear_ftrace_function(); | ||
1764 | } | ||
1765 | |||
1766 | /** | ||
1767 | * ftrace_kill - totally shutdown ftrace | ||
1768 | * | ||
1769 | * This is a safety measure. If something was detected that seems | ||
1770 | * wrong, calling this function will keep ftrace from doing | ||
1771 | * any more modifications, and updates. | ||
1772 | * used when something went wrong. | ||
1773 | */ | ||
1774 | void ftrace_kill(void) | 1363 | void ftrace_kill(void) |
1775 | { | 1364 | { |
1776 | mutex_lock(&ftrace_sysctl_lock); | ||
1777 | ftrace_disabled = 1; | 1365 | ftrace_disabled = 1; |
1778 | ftrace_enabled = 0; | 1366 | ftrace_enabled = 0; |
1779 | |||
1780 | clear_ftrace_function(); | 1367 | clear_ftrace_function(); |
1781 | mutex_unlock(&ftrace_sysctl_lock); | ||
1782 | |||
1783 | /* Try to totally disable ftrace */ | ||
1784 | ftrace_force_shutdown(); | ||
1785 | } | 1368 | } |
1786 | 1369 | ||
1787 | /** | 1370 | /** |
@@ -1870,3 +1453,4 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1870 | mutex_unlock(&ftrace_sysctl_lock); | 1453 | mutex_unlock(&ftrace_sysctl_lock); |
1871 | return ret; | 1454 | return ret; |
1872 | } | 1455 | } |
1456 | |||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 94af1fe56bb4..036456cbb4f7 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -16,14 +16,49 @@ | |||
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | 18 | ||
19 | #include "trace.h" | ||
20 | |||
21 | /* Global flag to disable all recording to ring buffers */ | ||
22 | static int ring_buffers_off __read_mostly; | ||
23 | |||
24 | /** | ||
25 | * tracing_on - enable all tracing buffers | ||
26 | * | ||
27 | * This function enables all tracing buffers that may have been | ||
28 | * disabled with tracing_off. | ||
29 | */ | ||
30 | void tracing_on(void) | ||
31 | { | ||
32 | ring_buffers_off = 0; | ||
33 | } | ||
34 | |||
35 | /** | ||
36 | * tracing_off - turn off all tracing buffers | ||
37 | * | ||
38 | * This function stops all tracing buffers from recording data. | ||
39 | * It does not disable any overhead the tracers themselves may | ||
40 | * be causing. This function simply causes all recording to | ||
41 | * the ring buffers to fail. | ||
42 | */ | ||
43 | void tracing_off(void) | ||
44 | { | ||
45 | ring_buffers_off = 1; | ||
46 | } | ||
47 | |||
19 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 48 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
20 | #define DEBUG_SHIFT 0 | 49 | #define DEBUG_SHIFT 0 |
21 | 50 | ||
22 | /* FIXME!!! */ | 51 | /* FIXME!!! */ |
23 | u64 ring_buffer_time_stamp(int cpu) | 52 | u64 ring_buffer_time_stamp(int cpu) |
24 | { | 53 | { |
54 | u64 time; | ||
55 | |||
56 | preempt_disable_notrace(); | ||
25 | /* shift to debug/test normalization and TIME_EXTENTS */ | 57 | /* shift to debug/test normalization and TIME_EXTENTS */ |
26 | return sched_clock() << DEBUG_SHIFT; | 58 | time = sched_clock() << DEBUG_SHIFT; |
59 | preempt_enable_notrace(); | ||
60 | |||
61 | return time; | ||
27 | } | 62 | } |
28 | 63 | ||
29 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 64 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
@@ -130,7 +165,7 @@ struct buffer_page { | |||
130 | static inline void free_buffer_page(struct buffer_page *bpage) | 165 | static inline void free_buffer_page(struct buffer_page *bpage) |
131 | { | 166 | { |
132 | if (bpage->page) | 167 | if (bpage->page) |
133 | __free_page(bpage->page); | 168 | free_page((unsigned long)bpage->page); |
134 | kfree(bpage); | 169 | kfree(bpage); |
135 | } | 170 | } |
136 | 171 | ||
@@ -503,6 +538,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
503 | LIST_HEAD(pages); | 538 | LIST_HEAD(pages); |
504 | int i, cpu; | 539 | int i, cpu; |
505 | 540 | ||
541 | /* | ||
542 | * Always succeed at resizing a non-existent buffer: | ||
543 | */ | ||
544 | if (!buffer) | ||
545 | return size; | ||
546 | |||
506 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 547 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
507 | size *= BUF_PAGE_SIZE; | 548 | size *= BUF_PAGE_SIZE; |
508 | buffer_size = buffer->pages * BUF_PAGE_SIZE; | 549 | buffer_size = buffer->pages * BUF_PAGE_SIZE; |
@@ -966,7 +1007,9 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
966 | if (unlikely(*delta > (1ULL << 59) && !once++)) { | 1007 | if (unlikely(*delta > (1ULL << 59) && !once++)) { |
967 | printk(KERN_WARNING "Delta way too big! %llu" | 1008 | printk(KERN_WARNING "Delta way too big! %llu" |
968 | " ts=%llu write stamp = %llu\n", | 1009 | " ts=%llu write stamp = %llu\n", |
969 | *delta, *ts, cpu_buffer->write_stamp); | 1010 | (unsigned long long)*delta, |
1011 | (unsigned long long)*ts, | ||
1012 | (unsigned long long)cpu_buffer->write_stamp); | ||
970 | WARN_ON(1); | 1013 | WARN_ON(1); |
971 | } | 1014 | } |
972 | 1015 | ||
@@ -1020,8 +1063,23 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1020 | struct ring_buffer_event *event; | 1063 | struct ring_buffer_event *event; |
1021 | u64 ts, delta; | 1064 | u64 ts, delta; |
1022 | int commit = 0; | 1065 | int commit = 0; |
1066 | int nr_loops = 0; | ||
1023 | 1067 | ||
1024 | again: | 1068 | again: |
1069 | /* | ||
1070 | * We allow for interrupts to reenter here and do a trace. | ||
1071 | * If one does, it will cause this original code to loop | ||
1072 | * back here. Even with heavy interrupts happening, this | ||
1073 | * should only happen a few times in a row. If this happens | ||
1074 | * 1000 times in a row, there must be either an interrupt | ||
1075 | * storm or we have something buggy. | ||
1076 | * Bail! | ||
1077 | */ | ||
1078 | if (unlikely(++nr_loops > 1000)) { | ||
1079 | RB_WARN_ON(cpu_buffer, 1); | ||
1080 | return NULL; | ||
1081 | } | ||
1082 | |||
1025 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1083 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1026 | 1084 | ||
1027 | /* | 1085 | /* |
@@ -1043,7 +1101,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1043 | 1101 | ||
1044 | /* Did the write stamp get updated already? */ | 1102 | /* Did the write stamp get updated already? */ |
1045 | if (unlikely(ts < cpu_buffer->write_stamp)) | 1103 | if (unlikely(ts < cpu_buffer->write_stamp)) |
1046 | goto again; | 1104 | delta = 0; |
1047 | 1105 | ||
1048 | if (test_time_stamp(delta)) { | 1106 | if (test_time_stamp(delta)) { |
1049 | 1107 | ||
@@ -1116,6 +1174,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1116 | struct ring_buffer_event *event; | 1174 | struct ring_buffer_event *event; |
1117 | int cpu, resched; | 1175 | int cpu, resched; |
1118 | 1176 | ||
1177 | if (ring_buffers_off) | ||
1178 | return NULL; | ||
1179 | |||
1119 | if (atomic_read(&buffer->record_disabled)) | 1180 | if (atomic_read(&buffer->record_disabled)) |
1120 | return NULL; | 1181 | return NULL; |
1121 | 1182 | ||
@@ -1232,6 +1293,9 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1232 | int ret = -EBUSY; | 1293 | int ret = -EBUSY; |
1233 | int cpu, resched; | 1294 | int cpu, resched; |
1234 | 1295 | ||
1296 | if (ring_buffers_off) | ||
1297 | return -EBUSY; | ||
1298 | |||
1235 | if (atomic_read(&buffer->record_disabled)) | 1299 | if (atomic_read(&buffer->record_disabled)) |
1236 | return -EBUSY; | 1300 | return -EBUSY; |
1237 | 1301 | ||
@@ -1530,10 +1594,23 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1530 | { | 1594 | { |
1531 | struct buffer_page *reader = NULL; | 1595 | struct buffer_page *reader = NULL; |
1532 | unsigned long flags; | 1596 | unsigned long flags; |
1597 | int nr_loops = 0; | ||
1533 | 1598 | ||
1534 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 1599 | spin_lock_irqsave(&cpu_buffer->lock, flags); |
1535 | 1600 | ||
1536 | again: | 1601 | again: |
1602 | /* | ||
1603 | * This should normally only loop twice. But because the | ||
1604 | * start of the reader inserts an empty page, it causes | ||
1605 | * a case where we will loop three times. There should be no | ||
1606 | * reason to loop four times (that I know of). | ||
1607 | */ | ||
1608 | if (unlikely(++nr_loops > 3)) { | ||
1609 | RB_WARN_ON(cpu_buffer, 1); | ||
1610 | reader = NULL; | ||
1611 | goto out; | ||
1612 | } | ||
1613 | |||
1537 | reader = cpu_buffer->reader_page; | 1614 | reader = cpu_buffer->reader_page; |
1538 | 1615 | ||
1539 | /* If there's more to read, return this page */ | 1616 | /* If there's more to read, return this page */ |
@@ -1663,6 +1740,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1663 | struct ring_buffer_per_cpu *cpu_buffer; | 1740 | struct ring_buffer_per_cpu *cpu_buffer; |
1664 | struct ring_buffer_event *event; | 1741 | struct ring_buffer_event *event; |
1665 | struct buffer_page *reader; | 1742 | struct buffer_page *reader; |
1743 | int nr_loops = 0; | ||
1666 | 1744 | ||
1667 | if (!cpu_isset(cpu, buffer->cpumask)) | 1745 | if (!cpu_isset(cpu, buffer->cpumask)) |
1668 | return NULL; | 1746 | return NULL; |
@@ -1670,6 +1748,19 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1670 | cpu_buffer = buffer->buffers[cpu]; | 1748 | cpu_buffer = buffer->buffers[cpu]; |
1671 | 1749 | ||
1672 | again: | 1750 | again: |
1751 | /* | ||
1752 | * We repeat when a timestamp is encountered. It is possible | ||
1753 | * to get multiple timestamps from an interrupt entering just | ||
1754 | * as one timestamp is about to be written. The max times | ||
1755 | * that this can happen is the number of nested interrupts we | ||
1756 | * can have. Nesting 10 deep of interrupts is clearly | ||
1757 | * an anomaly. | ||
1758 | */ | ||
1759 | if (unlikely(++nr_loops > 10)) { | ||
1760 | RB_WARN_ON(cpu_buffer, 1); | ||
1761 | return NULL; | ||
1762 | } | ||
1763 | |||
1673 | reader = rb_get_reader_page(cpu_buffer); | 1764 | reader = rb_get_reader_page(cpu_buffer); |
1674 | if (!reader) | 1765 | if (!reader) |
1675 | return NULL; | 1766 | return NULL; |
@@ -1720,6 +1811,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1720 | struct ring_buffer *buffer; | 1811 | struct ring_buffer *buffer; |
1721 | struct ring_buffer_per_cpu *cpu_buffer; | 1812 | struct ring_buffer_per_cpu *cpu_buffer; |
1722 | struct ring_buffer_event *event; | 1813 | struct ring_buffer_event *event; |
1814 | int nr_loops = 0; | ||
1723 | 1815 | ||
1724 | if (ring_buffer_iter_empty(iter)) | 1816 | if (ring_buffer_iter_empty(iter)) |
1725 | return NULL; | 1817 | return NULL; |
@@ -1728,6 +1820,19 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1728 | buffer = cpu_buffer->buffer; | 1820 | buffer = cpu_buffer->buffer; |
1729 | 1821 | ||
1730 | again: | 1822 | again: |
1823 | /* | ||
1824 | * We repeat when a timestamp is encountered. It is possible | ||
1825 | * to get multiple timestamps from an interrupt entering just | ||
1826 | * as one timestamp is about to be written. The max times | ||
1827 | * that this can happen is the number of nested interrupts we | ||
1828 | * can have. Nesting 10 deep of interrupts is clearly | ||
1829 | * an anomaly. | ||
1830 | */ | ||
1831 | if (unlikely(++nr_loops > 10)) { | ||
1832 | RB_WARN_ON(cpu_buffer, 1); | ||
1833 | return NULL; | ||
1834 | } | ||
1835 | |||
1731 | if (rb_per_cpu_empty(cpu_buffer)) | 1836 | if (rb_per_cpu_empty(cpu_buffer)) |
1732 | return NULL; | 1837 | return NULL; |
1733 | 1838 | ||
@@ -2012,3 +2117,69 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2012 | return 0; | 2117 | return 0; |
2013 | } | 2118 | } |
2014 | 2119 | ||
2120 | static ssize_t | ||
2121 | rb_simple_read(struct file *filp, char __user *ubuf, | ||
2122 | size_t cnt, loff_t *ppos) | ||
2123 | { | ||
2124 | int *p = filp->private_data; | ||
2125 | char buf[64]; | ||
2126 | int r; | ||
2127 | |||
2128 | /* !ring_buffers_off == tracing_on */ | ||
2129 | r = sprintf(buf, "%d\n", !*p); | ||
2130 | |||
2131 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
2132 | } | ||
2133 | |||
2134 | static ssize_t | ||
2135 | rb_simple_write(struct file *filp, const char __user *ubuf, | ||
2136 | size_t cnt, loff_t *ppos) | ||
2137 | { | ||
2138 | int *p = filp->private_data; | ||
2139 | char buf[64]; | ||
2140 | long val; | ||
2141 | int ret; | ||
2142 | |||
2143 | if (cnt >= sizeof(buf)) | ||
2144 | return -EINVAL; | ||
2145 | |||
2146 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2147 | return -EFAULT; | ||
2148 | |||
2149 | buf[cnt] = 0; | ||
2150 | |||
2151 | ret = strict_strtoul(buf, 10, &val); | ||
2152 | if (ret < 0) | ||
2153 | return ret; | ||
2154 | |||
2155 | /* !ring_buffers_off == tracing_on */ | ||
2156 | *p = !val; | ||
2157 | |||
2158 | (*ppos)++; | ||
2159 | |||
2160 | return cnt; | ||
2161 | } | ||
2162 | |||
2163 | static struct file_operations rb_simple_fops = { | ||
2164 | .open = tracing_open_generic, | ||
2165 | .read = rb_simple_read, | ||
2166 | .write = rb_simple_write, | ||
2167 | }; | ||
2168 | |||
2169 | |||
2170 | static __init int rb_init_debugfs(void) | ||
2171 | { | ||
2172 | struct dentry *d_tracer; | ||
2173 | struct dentry *entry; | ||
2174 | |||
2175 | d_tracer = tracing_init_dentry(); | ||
2176 | |||
2177 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, | ||
2178 | &ring_buffers_off, &rb_simple_fops); | ||
2179 | if (!entry) | ||
2180 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); | ||
2181 | |||
2182 | return 0; | ||
2183 | } | ||
2184 | |||
2185 | fs_initcall(rb_init_debugfs); | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d345d649d073..697eda36b86a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <linux/stacktrace.h> | 35 | #include <linux/stacktrace.h> |
36 | #include <linux/ring_buffer.h> | 36 | #include <linux/ring_buffer.h> |
37 | #include <linux/irqflags.h> | ||
37 | 38 | ||
38 | #include "trace.h" | 39 | #include "trace.h" |
39 | 40 | ||
@@ -655,7 +656,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
655 | entry->preempt_count = pc & 0xff; | 656 | entry->preempt_count = pc & 0xff; |
656 | entry->pid = (tsk) ? tsk->pid : 0; | 657 | entry->pid = (tsk) ? tsk->pid : 0; |
657 | entry->flags = | 658 | entry->flags = |
659 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | ||
658 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 660 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
661 | #else | ||
662 | TRACE_FLAG_IRQS_NOSUPPORT | | ||
663 | #endif | ||
659 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 664 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
660 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 665 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
661 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 666 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
@@ -700,6 +705,7 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
700 | unsigned long flags, | 705 | unsigned long flags, |
701 | int skip, int pc) | 706 | int skip, int pc) |
702 | { | 707 | { |
708 | #ifdef CONFIG_STACKTRACE | ||
703 | struct ring_buffer_event *event; | 709 | struct ring_buffer_event *event; |
704 | struct stack_entry *entry; | 710 | struct stack_entry *entry; |
705 | struct stack_trace trace; | 711 | struct stack_trace trace; |
@@ -725,6 +731,7 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
725 | 731 | ||
726 | save_stack_trace(&trace); | 732 | save_stack_trace(&trace); |
727 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 733 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
734 | #endif | ||
728 | } | 735 | } |
729 | 736 | ||
730 | void __trace_stack(struct trace_array *tr, | 737 | void __trace_stack(struct trace_array *tr, |
@@ -851,7 +858,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
851 | preempt_enable_notrace(); | 858 | preempt_enable_notrace(); |
852 | } | 859 | } |
853 | 860 | ||
854 | #ifdef CONFIG_FTRACE | 861 | #ifdef CONFIG_FUNCTION_TRACER |
855 | static void | 862 | static void |
856 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 863 | function_trace_call(unsigned long ip, unsigned long parent_ip) |
857 | { | 864 | { |
@@ -865,9 +872,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
865 | if (unlikely(!ftrace_function_enabled)) | 872 | if (unlikely(!ftrace_function_enabled)) |
866 | return; | 873 | return; |
867 | 874 | ||
868 | if (skip_trace(ip)) | ||
869 | return; | ||
870 | |||
871 | pc = preempt_count(); | 875 | pc = preempt_count(); |
872 | resched = need_resched(); | 876 | resched = need_resched(); |
873 | preempt_disable_notrace(); | 877 | preempt_disable_notrace(); |
@@ -1084,17 +1088,20 @@ static void s_stop(struct seq_file *m, void *p) | |||
1084 | mutex_unlock(&trace_types_lock); | 1088 | mutex_unlock(&trace_types_lock); |
1085 | } | 1089 | } |
1086 | 1090 | ||
1087 | #define KRETPROBE_MSG "[unknown/kretprobe'd]" | ||
1088 | |||
1089 | #ifdef CONFIG_KRETPROBES | 1091 | #ifdef CONFIG_KRETPROBES |
1090 | static inline int kretprobed(unsigned long addr) | 1092 | static inline const char *kretprobed(const char *name) |
1091 | { | 1093 | { |
1092 | return addr == (unsigned long)kretprobe_trampoline; | 1094 | static const char tramp_name[] = "kretprobe_trampoline"; |
1095 | int size = sizeof(tramp_name); | ||
1096 | |||
1097 | if (strncmp(tramp_name, name, size) == 0) | ||
1098 | return "[unknown/kretprobe'd]"; | ||
1099 | return name; | ||
1093 | } | 1100 | } |
1094 | #else | 1101 | #else |
1095 | static inline int kretprobed(unsigned long addr) | 1102 | static inline const char *kretprobed(const char *name) |
1096 | { | 1103 | { |
1097 | return 0; | 1104 | return name; |
1098 | } | 1105 | } |
1099 | #endif /* CONFIG_KRETPROBES */ | 1106 | #endif /* CONFIG_KRETPROBES */ |
1100 | 1107 | ||
@@ -1103,10 +1110,13 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | |||
1103 | { | 1110 | { |
1104 | #ifdef CONFIG_KALLSYMS | 1111 | #ifdef CONFIG_KALLSYMS |
1105 | char str[KSYM_SYMBOL_LEN]; | 1112 | char str[KSYM_SYMBOL_LEN]; |
1113 | const char *name; | ||
1106 | 1114 | ||
1107 | kallsyms_lookup(address, NULL, NULL, NULL, str); | 1115 | kallsyms_lookup(address, NULL, NULL, NULL, str); |
1108 | 1116 | ||
1109 | return trace_seq_printf(s, fmt, str); | 1117 | name = kretprobed(str); |
1118 | |||
1119 | return trace_seq_printf(s, fmt, name); | ||
1110 | #endif | 1120 | #endif |
1111 | return 1; | 1121 | return 1; |
1112 | } | 1122 | } |
@@ -1117,9 +1127,12 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
1117 | { | 1127 | { |
1118 | #ifdef CONFIG_KALLSYMS | 1128 | #ifdef CONFIG_KALLSYMS |
1119 | char str[KSYM_SYMBOL_LEN]; | 1129 | char str[KSYM_SYMBOL_LEN]; |
1130 | const char *name; | ||
1120 | 1131 | ||
1121 | sprint_symbol(str, address); | 1132 | sprint_symbol(str, address); |
1122 | return trace_seq_printf(s, fmt, str); | 1133 | name = kretprobed(str); |
1134 | |||
1135 | return trace_seq_printf(s, fmt, name); | ||
1123 | #endif | 1136 | #endif |
1124 | return 1; | 1137 | return 1; |
1125 | } | 1138 | } |
@@ -1246,7 +1259,8 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | |||
1246 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); | 1259 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); |
1247 | trace_seq_printf(s, "%3d", cpu); | 1260 | trace_seq_printf(s, "%3d", cpu); |
1248 | trace_seq_printf(s, "%c%c", | 1261 | trace_seq_printf(s, "%c%c", |
1249 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', | 1262 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : |
1263 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.', | ||
1250 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); | 1264 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); |
1251 | 1265 | ||
1252 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | 1266 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; |
@@ -1372,10 +1386,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1372 | 1386 | ||
1373 | seq_print_ip_sym(s, field->ip, sym_flags); | 1387 | seq_print_ip_sym(s, field->ip, sym_flags); |
1374 | trace_seq_puts(s, " ("); | 1388 | trace_seq_puts(s, " ("); |
1375 | if (kretprobed(field->parent_ip)) | 1389 | seq_print_ip_sym(s, field->parent_ip, sym_flags); |
1376 | trace_seq_puts(s, KRETPROBE_MSG); | ||
1377 | else | ||
1378 | seq_print_ip_sym(s, field->parent_ip, sym_flags); | ||
1379 | trace_seq_puts(s, ")\n"); | 1390 | trace_seq_puts(s, ")\n"); |
1380 | break; | 1391 | break; |
1381 | } | 1392 | } |
@@ -1491,12 +1502,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1491 | ret = trace_seq_printf(s, " <-"); | 1502 | ret = trace_seq_printf(s, " <-"); |
1492 | if (!ret) | 1503 | if (!ret) |
1493 | return TRACE_TYPE_PARTIAL_LINE; | 1504 | return TRACE_TYPE_PARTIAL_LINE; |
1494 | if (kretprobed(field->parent_ip)) | 1505 | ret = seq_print_ip_sym(s, |
1495 | ret = trace_seq_puts(s, KRETPROBE_MSG); | 1506 | field->parent_ip, |
1496 | else | 1507 | sym_flags); |
1497 | ret = seq_print_ip_sym(s, | ||
1498 | field->parent_ip, | ||
1499 | sym_flags); | ||
1500 | if (!ret) | 1508 | if (!ret) |
1501 | return TRACE_TYPE_PARTIAL_LINE; | 1509 | return TRACE_TYPE_PARTIAL_LINE; |
1502 | } | 1510 | } |
@@ -1747,7 +1755,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
1747 | return TRACE_TYPE_HANDLED; | 1755 | return TRACE_TYPE_HANDLED; |
1748 | 1756 | ||
1749 | SEQ_PUT_FIELD_RET(s, entry->pid); | 1757 | SEQ_PUT_FIELD_RET(s, entry->pid); |
1750 | SEQ_PUT_FIELD_RET(s, iter->cpu); | 1758 | SEQ_PUT_FIELD_RET(s, entry->cpu); |
1751 | SEQ_PUT_FIELD_RET(s, iter->ts); | 1759 | SEQ_PUT_FIELD_RET(s, iter->ts); |
1752 | 1760 | ||
1753 | switch (entry->type) { | 1761 | switch (entry->type) { |
@@ -2379,9 +2387,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2379 | int i; | 2387 | int i; |
2380 | size_t ret; | 2388 | size_t ret; |
2381 | 2389 | ||
2390 | ret = cnt; | ||
2391 | |||
2382 | if (cnt > max_tracer_type_len) | 2392 | if (cnt > max_tracer_type_len) |
2383 | cnt = max_tracer_type_len; | 2393 | cnt = max_tracer_type_len; |
2384 | ret = cnt; | ||
2385 | 2394 | ||
2386 | if (copy_from_user(&buf, ubuf, cnt)) | 2395 | if (copy_from_user(&buf, ubuf, cnt)) |
2387 | return -EFAULT; | 2396 | return -EFAULT; |
@@ -2414,8 +2423,8 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2414 | out: | 2423 | out: |
2415 | mutex_unlock(&trace_types_lock); | 2424 | mutex_unlock(&trace_types_lock); |
2416 | 2425 | ||
2417 | if (ret == cnt) | 2426 | if (ret > 0) |
2418 | filp->f_pos += cnt; | 2427 | filp->f_pos += ret; |
2419 | 2428 | ||
2420 | return ret; | 2429 | return ret; |
2421 | } | 2430 | } |
@@ -2667,7 +2676,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2667 | { | 2676 | { |
2668 | unsigned long val; | 2677 | unsigned long val; |
2669 | char buf[64]; | 2678 | char buf[64]; |
2670 | int ret; | 2679 | int ret, cpu; |
2671 | struct trace_array *tr = filp->private_data; | 2680 | struct trace_array *tr = filp->private_data; |
2672 | 2681 | ||
2673 | if (cnt >= sizeof(buf)) | 2682 | if (cnt >= sizeof(buf)) |
@@ -2695,6 +2704,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2695 | goto out; | 2704 | goto out; |
2696 | } | 2705 | } |
2697 | 2706 | ||
2707 | /* disable all cpu buffers */ | ||
2708 | for_each_tracing_cpu(cpu) { | ||
2709 | if (global_trace.data[cpu]) | ||
2710 | atomic_inc(&global_trace.data[cpu]->disabled); | ||
2711 | if (max_tr.data[cpu]) | ||
2712 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
2713 | } | ||
2714 | |||
2698 | if (val != global_trace.entries) { | 2715 | if (val != global_trace.entries) { |
2699 | ret = ring_buffer_resize(global_trace.buffer, val); | 2716 | ret = ring_buffer_resize(global_trace.buffer, val); |
2700 | if (ret < 0) { | 2717 | if (ret < 0) { |
@@ -2726,6 +2743,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2726 | if (tracing_disabled) | 2743 | if (tracing_disabled) |
2727 | cnt = -ENOMEM; | 2744 | cnt = -ENOMEM; |
2728 | out: | 2745 | out: |
2746 | for_each_tracing_cpu(cpu) { | ||
2747 | if (global_trace.data[cpu]) | ||
2748 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
2749 | if (max_tr.data[cpu]) | ||
2750 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
2751 | } | ||
2752 | |||
2729 | max_tr.entries = global_trace.entries; | 2753 | max_tr.entries = global_trace.entries; |
2730 | mutex_unlock(&trace_types_lock); | 2754 | mutex_unlock(&trace_types_lock); |
2731 | 2755 | ||
@@ -3097,7 +3121,7 @@ void ftrace_dump(void) | |||
3097 | dump_ran = 1; | 3121 | dump_ran = 1; |
3098 | 3122 | ||
3099 | /* No turning back! */ | 3123 | /* No turning back! */ |
3100 | ftrace_kill_atomic(); | 3124 | ftrace_kill(); |
3101 | 3125 | ||
3102 | for_each_tracing_cpu(cpu) { | 3126 | for_each_tracing_cpu(cpu) { |
3103 | atomic_inc(&global_trace.data[cpu]->disabled); | 3127 | atomic_inc(&global_trace.data[cpu]->disabled); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f1f99572cde7..8465ad052707 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -120,18 +120,20 @@ struct trace_boot { | |||
120 | /* | 120 | /* |
121 | * trace_flag_type is an enumeration that holds different | 121 | * trace_flag_type is an enumeration that holds different |
122 | * states when a trace occurs. These are: | 122 | * states when a trace occurs. These are: |
123 | * IRQS_OFF - interrupts were disabled | 123 | * IRQS_OFF - interrupts were disabled |
124 | * NEED_RESCED - reschedule is requested | 124 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
125 | * HARDIRQ - inside an interrupt handler | 125 | * NEED_RESCED - reschedule is requested |
126 | * SOFTIRQ - inside a softirq handler | 126 | * HARDIRQ - inside an interrupt handler |
127 | * CONT - multiple entries hold the trace item | 127 | * SOFTIRQ - inside a softirq handler |
128 | * CONT - multiple entries hold the trace item | ||
128 | */ | 129 | */ |
129 | enum trace_flag_type { | 130 | enum trace_flag_type { |
130 | TRACE_FLAG_IRQS_OFF = 0x01, | 131 | TRACE_FLAG_IRQS_OFF = 0x01, |
131 | TRACE_FLAG_NEED_RESCHED = 0x02, | 132 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
132 | TRACE_FLAG_HARDIRQ = 0x04, | 133 | TRACE_FLAG_NEED_RESCHED = 0x04, |
133 | TRACE_FLAG_SOFTIRQ = 0x08, | 134 | TRACE_FLAG_HARDIRQ = 0x08, |
134 | TRACE_FLAG_CONT = 0x10, | 135 | TRACE_FLAG_SOFTIRQ = 0x10, |
136 | TRACE_FLAG_CONT = 0x20, | ||
135 | }; | 137 | }; |
136 | 138 | ||
137 | #define TRACE_BUF_SIZE 1024 | 139 | #define TRACE_BUF_SIZE 1024 |
@@ -335,7 +337,7 @@ void update_max_tr_single(struct trace_array *tr, | |||
335 | 337 | ||
336 | extern cycle_t ftrace_now(int cpu); | 338 | extern cycle_t ftrace_now(int cpu); |
337 | 339 | ||
338 | #ifdef CONFIG_FTRACE | 340 | #ifdef CONFIG_FUNCTION_TRACER |
339 | void tracing_start_function_trace(void); | 341 | void tracing_start_function_trace(void); |
340 | void tracing_stop_function_trace(void); | 342 | void tracing_stop_function_trace(void); |
341 | #else | 343 | #else |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index e90eb0c2c56c..0f85a64003d3 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -64,7 +64,7 @@ static void function_trace_ctrl_update(struct trace_array *tr) | |||
64 | 64 | ||
65 | static struct tracer function_trace __read_mostly = | 65 | static struct tracer function_trace __read_mostly = |
66 | { | 66 | { |
67 | .name = "ftrace", | 67 | .name = "function", |
68 | .init = function_trace_init, | 68 | .init = function_trace_init, |
69 | .reset = function_trace_reset, | 69 | .reset = function_trace_reset, |
70 | .ctrl_update = function_trace_ctrl_update, | 70 | .ctrl_update = function_trace_ctrl_update, |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a7db7f040ae0..9c74071c10e0 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -63,7 +63,7 @@ irq_trace(void) | |||
63 | */ | 63 | */ |
64 | static __cacheline_aligned_in_smp unsigned long max_sequence; | 64 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
65 | 65 | ||
66 | #ifdef CONFIG_FTRACE | 66 | #ifdef CONFIG_FUNCTION_TRACER |
67 | /* | 67 | /* |
68 | * irqsoff uses its own tracer function to keep the overhead down: | 68 | * irqsoff uses its own tracer function to keep the overhead down: |
69 | */ | 69 | */ |
@@ -104,7 +104,7 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
104 | { | 104 | { |
105 | .func = irqsoff_tracer_call, | 105 | .func = irqsoff_tracer_call, |
106 | }; | 106 | }; |
107 | #endif /* CONFIG_FTRACE */ | 107 | #endif /* CONFIG_FUNCTION_TRACER */ |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Should this new latency be reported/recorded? | 110 | * Should this new latency be reported/recorded? |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index fe4a252c2363..3ae93f16b565 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -31,7 +31,7 @@ static raw_spinlock_t wakeup_lock = | |||
31 | 31 | ||
32 | static void __wakeup_reset(struct trace_array *tr); | 32 | static void __wakeup_reset(struct trace_array *tr); |
33 | 33 | ||
34 | #ifdef CONFIG_FTRACE | 34 | #ifdef CONFIG_FUNCTION_TRACER |
35 | /* | 35 | /* |
36 | * irqsoff uses its own tracer function to keep the overhead down: | 36 | * irqsoff uses its own tracer function to keep the overhead down: |
37 | */ | 37 | */ |
@@ -96,7 +96,7 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
96 | { | 96 | { |
97 | .func = wakeup_tracer_call, | 97 | .func = wakeup_tracer_call, |
98 | }; | 98 | }; |
99 | #endif /* CONFIG_FTRACE */ | 99 | #endif /* CONFIG_FUNCTION_TRACER */ |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * Should this new latency be reported/recorded? | 102 | * Should this new latency be reported/recorded? |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 09cf230d7eca..90bc752a7580 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -70,7 +70,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | #ifdef CONFIG_FTRACE | 73 | #ifdef CONFIG_FUNCTION_TRACER |
74 | 74 | ||
75 | #ifdef CONFIG_DYNAMIC_FTRACE | 75 | #ifdef CONFIG_DYNAMIC_FTRACE |
76 | 76 | ||
@@ -99,13 +99,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
99 | /* passed in by parameter to fool gcc from optimizing */ | 99 | /* passed in by parameter to fool gcc from optimizing */ |
100 | func(); | 100 | func(); |
101 | 101 | ||
102 | /* update the records */ | ||
103 | ret = ftrace_force_update(); | ||
104 | if (ret) { | ||
105 | printk(KERN_CONT ".. ftraced failed .. "); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | /* | 102 | /* |
110 | * Some archs *cough*PowerPC*cough* add charachters to the | 103 | * Some archs *cough*PowerPC*cough* add charachters to the |
111 | * start of the function names. We simply put a '*' to | 104 | * start of the function names. We simply put a '*' to |
@@ -183,13 +176,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
183 | /* make sure msleep has been recorded */ | 176 | /* make sure msleep has been recorded */ |
184 | msleep(1); | 177 | msleep(1); |
185 | 178 | ||
186 | /* force the recorded functions to be traced */ | ||
187 | ret = ftrace_force_update(); | ||
188 | if (ret) { | ||
189 | printk(KERN_CONT ".. ftraced failed .. "); | ||
190 | return ret; | ||
191 | } | ||
192 | |||
193 | /* start the tracing */ | 179 | /* start the tracing */ |
194 | ftrace_enabled = 1; | 180 | ftrace_enabled = 1; |
195 | tracer_enabled = 1; | 181 | tracer_enabled = 1; |
@@ -226,7 +212,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
226 | 212 | ||
227 | return ret; | 213 | return ret; |
228 | } | 214 | } |
229 | #endif /* CONFIG_FTRACE */ | 215 | #endif /* CONFIG_FUNCTION_TRACER */ |
230 | 216 | ||
231 | #ifdef CONFIG_IRQSOFF_TRACER | 217 | #ifdef CONFIG_IRQSOFF_TRACER |
232 | int | 218 | int |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 74c5d9a3afae..be682b62fe58 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -44,6 +44,10 @@ static inline void check_stack(void) | |||
44 | if (this_size <= max_stack_size) | 44 | if (this_size <= max_stack_size) |
45 | return; | 45 | return; |
46 | 46 | ||
47 | /* we do not handle interrupt stacks yet */ | ||
48 | if (!object_is_on_stack(&this_size)) | ||
49 | return; | ||
50 | |||
47 | raw_local_irq_save(flags); | 51 | raw_local_irq_save(flags); |
48 | __raw_spin_lock(&max_stack_lock); | 52 | __raw_spin_lock(&max_stack_lock); |
49 | 53 | ||