diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-01-24 07:39:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-01-24 07:39:31 -0500 |
commit | 4913ae3991acf00b414701852ee2193d1edd9c2d (patch) | |
tree | b079652f0a897f86c5ecc16b2d4ba4bc3fefd220 /kernel | |
parent | ff7532ca2c631e7e96dcd305a967b610259dc0ea (diff) | |
parent | 0b07436d95b5404134da4d661fd183eac863513e (diff) |
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
Pull tracing updates from Steve Rostedt.
This commit:
tracing: Remove the extra 4 bytes of padding in events
changes the ABI. All involved parties seem to agree that it's safe to
do now, but the devil is in the details ...
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kprobes.c | 8 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 8 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 88 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 90 | ||||
-rw-r--r-- | kernel/trace/trace.c | 44 | ||||
-rw-r--r-- | kernel/trace/trace.h | 133 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 61 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 60 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 21 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 18 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 6 |
14 files changed, 373 insertions, 169 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 098f396aa409..f423c3ef4a82 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -919,7 +919,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) | |||
919 | } | 919 | } |
920 | #endif /* CONFIG_OPTPROBES */ | 920 | #endif /* CONFIG_OPTPROBES */ |
921 | 921 | ||
922 | #ifdef KPROBES_CAN_USE_FTRACE | 922 | #ifdef CONFIG_KPROBES_ON_FTRACE |
923 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { | 923 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { |
924 | .func = kprobe_ftrace_handler, | 924 | .func = kprobe_ftrace_handler, |
925 | .flags = FTRACE_OPS_FL_SAVE_REGS, | 925 | .flags = FTRACE_OPS_FL_SAVE_REGS, |
@@ -964,7 +964,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) | |||
964 | (unsigned long)p->addr, 1, 0); | 964 | (unsigned long)p->addr, 1, 0); |
965 | WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); | 965 | WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); |
966 | } | 966 | } |
967 | #else /* !KPROBES_CAN_USE_FTRACE */ | 967 | #else /* !CONFIG_KPROBES_ON_FTRACE */ |
968 | #define prepare_kprobe(p) arch_prepare_kprobe(p) | 968 | #define prepare_kprobe(p) arch_prepare_kprobe(p) |
969 | #define arm_kprobe_ftrace(p) do {} while (0) | 969 | #define arm_kprobe_ftrace(p) do {} while (0) |
970 | #define disarm_kprobe_ftrace(p) do {} while (0) | 970 | #define disarm_kprobe_ftrace(p) do {} while (0) |
@@ -1414,12 +1414,12 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p, | |||
1414 | */ | 1414 | */ |
1415 | ftrace_addr = ftrace_location((unsigned long)p->addr); | 1415 | ftrace_addr = ftrace_location((unsigned long)p->addr); |
1416 | if (ftrace_addr) { | 1416 | if (ftrace_addr) { |
1417 | #ifdef KPROBES_CAN_USE_FTRACE | 1417 | #ifdef CONFIG_KPROBES_ON_FTRACE |
1418 | /* Given address is not on the instruction boundary */ | 1418 | /* Given address is not on the instruction boundary */ |
1419 | if ((unsigned long)p->addr != ftrace_addr) | 1419 | if ((unsigned long)p->addr != ftrace_addr) |
1420 | return -EILSEQ; | 1420 | return -EILSEQ; |
1421 | p->flags |= KPROBE_FLAG_FTRACE; | 1421 | p->flags |= KPROBE_FLAG_FTRACE; |
1422 | #else /* !KPROBES_CAN_USE_FTRACE */ | 1422 | #else /* !CONFIG_KPROBES_ON_FTRACE */ |
1423 | return -EINVAL; | 1423 | return -EINVAL; |
1424 | #endif | 1424 | #endif |
1425 | } | 1425 | } |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 5d89335a485f..cdc9d284d24e 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -39,6 +39,9 @@ config HAVE_DYNAMIC_FTRACE | |||
39 | help | 39 | help |
40 | See Documentation/trace/ftrace-design.txt | 40 | See Documentation/trace/ftrace-design.txt |
41 | 41 | ||
42 | config HAVE_DYNAMIC_FTRACE_WITH_REGS | ||
43 | bool | ||
44 | |||
42 | config HAVE_FTRACE_MCOUNT_RECORD | 45 | config HAVE_FTRACE_MCOUNT_RECORD |
43 | bool | 46 | bool |
44 | help | 47 | help |
@@ -434,6 +437,11 @@ config DYNAMIC_FTRACE | |||
434 | were made. If so, it runs stop_machine (stops all CPUS) | 437 | were made. If so, it runs stop_machine (stops all CPUS) |
435 | and modifies the code to jump over the call to ftrace. | 438 | and modifies the code to jump over the call to ftrace. |
436 | 439 | ||
440 | config DYNAMIC_FTRACE_WITH_REGS | ||
441 | def_bool y | ||
442 | depends on DYNAMIC_FTRACE | ||
443 | depends on HAVE_DYNAMIC_FTRACE_WITH_REGS | ||
444 | |||
437 | config FUNCTION_PROFILER | 445 | config FUNCTION_PROFILER |
438 | bool "Kernel function profiler" | 446 | bool "Kernel function profiler" |
439 | depends on FUNCTION_TRACER | 447 | depends on FUNCTION_TRACER |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c0bd0308741c..71259e2b6b61 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -147,7 +147,7 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) | |||
147 | return; | 147 | return; |
148 | 148 | ||
149 | local_irq_save(flags); | 149 | local_irq_save(flags); |
150 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); | 150 | buf = this_cpu_ptr(bt->msg_data); |
151 | va_start(args, fmt); | 151 | va_start(args, fmt); |
152 | n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); | 152 | n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); |
153 | va_end(args); | 153 | va_end(args); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 41473b4ad7a4..ce8c3d68292f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | |||
111 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) | 111 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | /* | ||
115 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | ||
116 | * can use rcu_dereference_raw() is that elements removed from this list | ||
117 | * are simply leaked, so there is no need to interact with a grace-period | ||
118 | * mechanism. The rcu_dereference_raw() calls are needed to handle | ||
119 | * concurrent insertions into the ftrace_global_list. | ||
120 | * | ||
121 | * Silly Alpha and silly pointer-speculation compiler optimizations! | ||
122 | */ | ||
123 | #define do_for_each_ftrace_op(op, list) \ | ||
124 | op = rcu_dereference_raw(list); \ | ||
125 | do | ||
126 | |||
127 | /* | ||
128 | * Optimized for just a single item in the list (as that is the normal case). | ||
129 | */ | ||
130 | #define while_for_each_ftrace_op(op) \ | ||
131 | while (likely(op = rcu_dereference_raw((op)->next)) && \ | ||
132 | unlikely((op) != &ftrace_list_end)) | ||
133 | |||
114 | /** | 134 | /** |
115 | * ftrace_nr_registered_ops - return number of ops registered | 135 | * ftrace_nr_registered_ops - return number of ops registered |
116 | * | 136 | * |
@@ -132,29 +152,21 @@ int ftrace_nr_registered_ops(void) | |||
132 | return cnt; | 152 | return cnt; |
133 | } | 153 | } |
134 | 154 | ||
135 | /* | ||
136 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | ||
137 | * can use rcu_dereference_raw() is that elements removed from this list | ||
138 | * are simply leaked, so there is no need to interact with a grace-period | ||
139 | * mechanism. The rcu_dereference_raw() calls are needed to handle | ||
140 | * concurrent insertions into the ftrace_global_list. | ||
141 | * | ||
142 | * Silly Alpha and silly pointer-speculation compiler optimizations! | ||
143 | */ | ||
144 | static void | 155 | static void |
145 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, | 156 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, |
146 | struct ftrace_ops *op, struct pt_regs *regs) | 157 | struct ftrace_ops *op, struct pt_regs *regs) |
147 | { | 158 | { |
148 | if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) | 159 | int bit; |
160 | |||
161 | bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX); | ||
162 | if (bit < 0) | ||
149 | return; | 163 | return; |
150 | 164 | ||
151 | trace_recursion_set(TRACE_GLOBAL_BIT); | 165 | do_for_each_ftrace_op(op, ftrace_global_list) { |
152 | op = rcu_dereference_raw(ftrace_global_list); /*see above*/ | ||
153 | while (op != &ftrace_list_end) { | ||
154 | op->func(ip, parent_ip, op, regs); | 166 | op->func(ip, parent_ip, op, regs); |
155 | op = rcu_dereference_raw(op->next); /*see above*/ | 167 | } while_for_each_ftrace_op(op); |
156 | }; | 168 | |
157 | trace_recursion_clear(TRACE_GLOBAL_BIT); | 169 | trace_clear_recursion(bit); |
158 | } | 170 | } |
159 | 171 | ||
160 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, | 172 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
@@ -221,10 +233,24 @@ static void update_global_ops(void) | |||
221 | * registered callers. | 233 | * registered callers. |
222 | */ | 234 | */ |
223 | if (ftrace_global_list == &ftrace_list_end || | 235 | if (ftrace_global_list == &ftrace_list_end || |
224 | ftrace_global_list->next == &ftrace_list_end) | 236 | ftrace_global_list->next == &ftrace_list_end) { |
225 | func = ftrace_global_list->func; | 237 | func = ftrace_global_list->func; |
226 | else | 238 | /* |
239 | * As we are calling the function directly. | ||
240 | * If it does not have recursion protection, | ||
241 | * the function_trace_op needs to be updated | ||
242 | * accordingly. | ||
243 | */ | ||
244 | if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) | ||
245 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
246 | else | ||
247 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; | ||
248 | } else { | ||
227 | func = ftrace_global_list_func; | 249 | func = ftrace_global_list_func; |
250 | /* The list has its own recursion protection. */ | ||
251 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
252 | } | ||
253 | |||
228 | 254 | ||
229 | /* If we filter on pids, update to use the pid function */ | 255 | /* If we filter on pids, update to use the pid function */ |
230 | if (!list_empty(&ftrace_pids)) { | 256 | if (!list_empty(&ftrace_pids)) { |
@@ -337,7 +363,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
337 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) | 363 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) |
338 | return -EINVAL; | 364 | return -EINVAL; |
339 | 365 | ||
340 | #ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS | 366 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
341 | /* | 367 | /* |
342 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | 368 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used |
343 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. | 369 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. |
@@ -4090,14 +4116,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
4090 | */ | 4116 | */ |
4091 | preempt_disable_notrace(); | 4117 | preempt_disable_notrace(); |
4092 | trace_recursion_set(TRACE_CONTROL_BIT); | 4118 | trace_recursion_set(TRACE_CONTROL_BIT); |
4093 | op = rcu_dereference_raw(ftrace_control_list); | 4119 | do_for_each_ftrace_op(op, ftrace_control_list) { |
4094 | while (op != &ftrace_list_end) { | ||
4095 | if (!ftrace_function_local_disabled(op) && | 4120 | if (!ftrace_function_local_disabled(op) && |
4096 | ftrace_ops_test(op, ip)) | 4121 | ftrace_ops_test(op, ip)) |
4097 | op->func(ip, parent_ip, op, regs); | 4122 | op->func(ip, parent_ip, op, regs); |
4098 | 4123 | } while_for_each_ftrace_op(op); | |
4099 | op = rcu_dereference_raw(op->next); | ||
4100 | }; | ||
4101 | trace_recursion_clear(TRACE_CONTROL_BIT); | 4124 | trace_recursion_clear(TRACE_CONTROL_BIT); |
4102 | preempt_enable_notrace(); | 4125 | preempt_enable_notrace(); |
4103 | } | 4126 | } |
@@ -4112,27 +4135,26 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4112 | struct ftrace_ops *ignored, struct pt_regs *regs) | 4135 | struct ftrace_ops *ignored, struct pt_regs *regs) |
4113 | { | 4136 | { |
4114 | struct ftrace_ops *op; | 4137 | struct ftrace_ops *op; |
4138 | int bit; | ||
4115 | 4139 | ||
4116 | if (function_trace_stop) | 4140 | if (function_trace_stop) |
4117 | return; | 4141 | return; |
4118 | 4142 | ||
4119 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) | 4143 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); |
4144 | if (bit < 0) | ||
4120 | return; | 4145 | return; |
4121 | 4146 | ||
4122 | trace_recursion_set(TRACE_INTERNAL_BIT); | ||
4123 | /* | 4147 | /* |
4124 | * Some of the ops may be dynamically allocated, | 4148 | * Some of the ops may be dynamically allocated, |
4125 | * they must be freed after a synchronize_sched(). | 4149 | * they must be freed after a synchronize_sched(). |
4126 | */ | 4150 | */ |
4127 | preempt_disable_notrace(); | 4151 | preempt_disable_notrace(); |
4128 | op = rcu_dereference_raw(ftrace_ops_list); | 4152 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4129 | while (op != &ftrace_list_end) { | ||
4130 | if (ftrace_ops_test(op, ip)) | 4153 | if (ftrace_ops_test(op, ip)) |
4131 | op->func(ip, parent_ip, op, regs); | 4154 | op->func(ip, parent_ip, op, regs); |
4132 | op = rcu_dereference_raw(op->next); | 4155 | } while_for_each_ftrace_op(op); |
4133 | }; | ||
4134 | preempt_enable_notrace(); | 4156 | preempt_enable_notrace(); |
4135 | trace_recursion_clear(TRACE_INTERNAL_BIT); | 4157 | trace_clear_recursion(bit); |
4136 | } | 4158 | } |
4137 | 4159 | ||
4138 | /* | 4160 | /* |
@@ -4143,8 +4165,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4143 | * Archs are to support both the regs and ftrace_ops at the same time. | 4165 | * Archs are to support both the regs and ftrace_ops at the same time. |
4144 | * If they support ftrace_ops, it is assumed they support regs. | 4166 | * If they support ftrace_ops, it is assumed they support regs. |
4145 | * If call backs want to use regs, they must either check for regs | 4167 | * If call backs want to use regs, they must either check for regs |
4146 | * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS. | 4168 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. |
4147 | * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved. | 4169 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. |
4148 | * An architecture can pass partial regs with ftrace_ops and still | 4170 | * An architecture can pass partial regs with ftrace_ops and still |
4149 | * set the ARCH_SUPPORT_FTARCE_OPS. | 4171 | * set the ARCH_SUPPORT_FTARCE_OPS. |
4150 | */ | 4172 | */ |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ce8514feedcd..13950d9027cb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -3,8 +3,10 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
5 | */ | 5 | */ |
6 | #include <linux/ftrace_event.h> | ||
6 | #include <linux/ring_buffer.h> | 7 | #include <linux/ring_buffer.h> |
7 | #include <linux/trace_clock.h> | 8 | #include <linux/trace_clock.h> |
9 | #include <linux/trace_seq.h> | ||
8 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
9 | #include <linux/debugfs.h> | 11 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
@@ -21,7 +23,6 @@ | |||
21 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
22 | 24 | ||
23 | #include <asm/local.h> | 25 | #include <asm/local.h> |
24 | #include "trace.h" | ||
25 | 26 | ||
26 | static void update_pages_handler(struct work_struct *work); | 27 | static void update_pages_handler(struct work_struct *work); |
27 | 28 | ||
@@ -2432,41 +2433,76 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2432 | 2433 | ||
2433 | #ifdef CONFIG_TRACING | 2434 | #ifdef CONFIG_TRACING |
2434 | 2435 | ||
2435 | #define TRACE_RECURSIVE_DEPTH 16 | 2436 | /* |
2437 | * The lock and unlock are done within a preempt disable section. | ||
2438 | * The current_context per_cpu variable can only be modified | ||
2439 | * by the current task between lock and unlock. But it can | ||
2440 | * be modified more than once via an interrupt. To pass this | ||
2441 | * information from the lock to the unlock without having to | ||
2442 | * access the 'in_interrupt()' functions again (which do show | ||
2443 | * a bit of overhead in something as critical as function tracing, | ||
2444 | * we use a bitmask trick. | ||
2445 | * | ||
2446 | * bit 0 = NMI context | ||
2447 | * bit 1 = IRQ context | ||
2448 | * bit 2 = SoftIRQ context | ||
2449 | * bit 3 = normal context. | ||
2450 | * | ||
2451 | * This works because this is the order of contexts that can | ||
2452 | * preempt other contexts. A SoftIRQ never preempts an IRQ | ||
2453 | * context. | ||
2454 | * | ||
2455 | * When the context is determined, the corresponding bit is | ||
2456 | * checked and set (if it was set, then a recursion of that context | ||
2457 | * happened). | ||
2458 | * | ||
2459 | * On unlock, we need to clear this bit. To do so, just subtract | ||
2460 | * 1 from the current_context and AND it to itself. | ||
2461 | * | ||
2462 | * (binary) | ||
2463 | * 101 - 1 = 100 | ||
2464 | * 101 & 100 = 100 (clearing bit zero) | ||
2465 | * | ||
2466 | * 1010 - 1 = 1001 | ||
2467 | * 1010 & 1001 = 1000 (clearing bit 1) | ||
2468 | * | ||
2469 | * The least significant bit can be cleared this way, and it | ||
2470 | * just so happens that it is the same bit corresponding to | ||
2471 | * the current context. | ||
2472 | */ | ||
2473 | static DEFINE_PER_CPU(unsigned int, current_context); | ||
2436 | 2474 | ||
2437 | /* Keep this code out of the fast path cache */ | 2475 | static __always_inline int trace_recursive_lock(void) |
2438 | static noinline void trace_recursive_fail(void) | ||
2439 | { | 2476 | { |
2440 | /* Disable all tracing before we do anything else */ | 2477 | unsigned int val = this_cpu_read(current_context); |
2441 | tracing_off_permanent(); | 2478 | int bit; |
2442 | 2479 | ||
2443 | printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" | 2480 | if (in_interrupt()) { |
2444 | "HC[%lu]:SC[%lu]:NMI[%lu]\n", | 2481 | if (in_nmi()) |
2445 | trace_recursion_buffer(), | 2482 | bit = 0; |
2446 | hardirq_count() >> HARDIRQ_SHIFT, | 2483 | else if (in_irq()) |
2447 | softirq_count() >> SOFTIRQ_SHIFT, | 2484 | bit = 1; |
2448 | in_nmi()); | 2485 | else |
2449 | 2486 | bit = 2; | |
2450 | WARN_ON_ONCE(1); | 2487 | } else |
2451 | } | 2488 | bit = 3; |
2452 | |||
2453 | static inline int trace_recursive_lock(void) | ||
2454 | { | ||
2455 | trace_recursion_inc(); | ||
2456 | 2489 | ||
2457 | if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) | 2490 | if (unlikely(val & (1 << bit))) |
2458 | return 0; | 2491 | return 1; |
2459 | 2492 | ||
2460 | trace_recursive_fail(); | 2493 | val |= (1 << bit); |
2494 | this_cpu_write(current_context, val); | ||
2461 | 2495 | ||
2462 | return -1; | 2496 | return 0; |
2463 | } | 2497 | } |
2464 | 2498 | ||
2465 | static inline void trace_recursive_unlock(void) | 2499 | static __always_inline void trace_recursive_unlock(void) |
2466 | { | 2500 | { |
2467 | WARN_ON_ONCE(!trace_recursion_buffer()); | 2501 | unsigned int val = this_cpu_read(current_context); |
2468 | 2502 | ||
2469 | trace_recursion_dec(); | 2503 | val--; |
2504 | val &= this_cpu_read(current_context); | ||
2505 | this_cpu_write(current_context, val); | ||
2470 | } | 2506 | } |
2471 | 2507 | ||
2472 | #else | 2508 | #else |
@@ -3425,7 +3461,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
3425 | /* check for end of page padding */ | 3461 | /* check for end of page padding */ |
3426 | if ((iter->head >= rb_page_size(iter->head_page)) && | 3462 | if ((iter->head >= rb_page_size(iter->head_page)) && |
3427 | (iter->head_page != cpu_buffer->commit_page)) | 3463 | (iter->head_page != cpu_buffer->commit_page)) |
3428 | rb_advance_iter(iter); | 3464 | rb_inc_iter(iter); |
3429 | } | 3465 | } |
3430 | 3466 | ||
3431 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) | 3467 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3c13e46d7d24..d2a658349ca1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -709,10 +709,14 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
709 | return; | 709 | return; |
710 | 710 | ||
711 | WARN_ON_ONCE(!irqs_disabled()); | 711 | WARN_ON_ONCE(!irqs_disabled()); |
712 | if (!current_trace->use_max_tr) { | 712 | |
713 | WARN_ON_ONCE(1); | 713 | /* If we disabled the tracer, stop now */ |
714 | if (current_trace == &nop_trace) | ||
714 | return; | 715 | return; |
715 | } | 716 | |
717 | if (WARN_ON_ONCE(!current_trace->use_max_tr)) | ||
718 | return; | ||
719 | |||
716 | arch_spin_lock(&ftrace_max_lock); | 720 | arch_spin_lock(&ftrace_max_lock); |
717 | 721 | ||
718 | tr->buffer = max_tr.buffer; | 722 | tr->buffer = max_tr.buffer; |
@@ -922,6 +926,9 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
922 | { | 926 | { |
923 | struct ring_buffer *buffer = tr->buffer; | 927 | struct ring_buffer *buffer = tr->buffer; |
924 | 928 | ||
929 | if (!buffer) | ||
930 | return; | ||
931 | |||
925 | ring_buffer_record_disable(buffer); | 932 | ring_buffer_record_disable(buffer); |
926 | 933 | ||
927 | /* Make sure all commits have finished */ | 934 | /* Make sure all commits have finished */ |
@@ -936,6 +943,9 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
936 | struct ring_buffer *buffer = tr->buffer; | 943 | struct ring_buffer *buffer = tr->buffer; |
937 | int cpu; | 944 | int cpu; |
938 | 945 | ||
946 | if (!buffer) | ||
947 | return; | ||
948 | |||
939 | ring_buffer_record_disable(buffer); | 949 | ring_buffer_record_disable(buffer); |
940 | 950 | ||
941 | /* Make sure all commits have finished */ | 951 | /* Make sure all commits have finished */ |
@@ -1167,7 +1177,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
1167 | 1177 | ||
1168 | entry->preempt_count = pc & 0xff; | 1178 | entry->preempt_count = pc & 0xff; |
1169 | entry->pid = (tsk) ? tsk->pid : 0; | 1179 | entry->pid = (tsk) ? tsk->pid : 0; |
1170 | entry->padding = 0; | ||
1171 | entry->flags = | 1180 | entry->flags = |
1172 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 1181 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
1173 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 1182 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
@@ -1517,7 +1526,6 @@ static struct trace_buffer_struct *trace_percpu_nmi_buffer; | |||
1517 | static char *get_trace_buf(void) | 1526 | static char *get_trace_buf(void) |
1518 | { | 1527 | { |
1519 | struct trace_buffer_struct *percpu_buffer; | 1528 | struct trace_buffer_struct *percpu_buffer; |
1520 | struct trace_buffer_struct *buffer; | ||
1521 | 1529 | ||
1522 | /* | 1530 | /* |
1523 | * If we have allocated per cpu buffers, then we do not | 1531 | * If we have allocated per cpu buffers, then we do not |
@@ -1535,9 +1543,7 @@ static char *get_trace_buf(void) | |||
1535 | if (!percpu_buffer) | 1543 | if (!percpu_buffer) |
1536 | return NULL; | 1544 | return NULL; |
1537 | 1545 | ||
1538 | buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); | 1546 | return this_cpu_ptr(&percpu_buffer->buffer[0]); |
1539 | |||
1540 | return buffer->buffer; | ||
1541 | } | 1547 | } |
1542 | 1548 | ||
1543 | static int alloc_percpu_trace_buffer(void) | 1549 | static int alloc_percpu_trace_buffer(void) |
@@ -3183,6 +3189,7 @@ static int tracing_set_tracer(const char *buf) | |||
3183 | static struct trace_option_dentry *topts; | 3189 | static struct trace_option_dentry *topts; |
3184 | struct trace_array *tr = &global_trace; | 3190 | struct trace_array *tr = &global_trace; |
3185 | struct tracer *t; | 3191 | struct tracer *t; |
3192 | bool had_max_tr; | ||
3186 | int ret = 0; | 3193 | int ret = 0; |
3187 | 3194 | ||
3188 | mutex_lock(&trace_types_lock); | 3195 | mutex_lock(&trace_types_lock); |
@@ -3209,7 +3216,19 @@ static int tracing_set_tracer(const char *buf) | |||
3209 | trace_branch_disable(); | 3216 | trace_branch_disable(); |
3210 | if (current_trace && current_trace->reset) | 3217 | if (current_trace && current_trace->reset) |
3211 | current_trace->reset(tr); | 3218 | current_trace->reset(tr); |
3212 | if (current_trace && current_trace->use_max_tr) { | 3219 | |
3220 | had_max_tr = current_trace && current_trace->use_max_tr; | ||
3221 | current_trace = &nop_trace; | ||
3222 | |||
3223 | if (had_max_tr && !t->use_max_tr) { | ||
3224 | /* | ||
3225 | * We need to make sure that the update_max_tr sees that | ||
3226 | * current_trace changed to nop_trace to keep it from | ||
3227 | * swapping the buffers after we resize it. | ||
3228 | * The update_max_tr is called from interrupts disabled | ||
3229 | * so a synchronized_sched() is sufficient. | ||
3230 | */ | ||
3231 | synchronize_sched(); | ||
3213 | /* | 3232 | /* |
3214 | * We don't free the ring buffer. instead, resize it because | 3233 | * We don't free the ring buffer. instead, resize it because |
3215 | * The max_tr ring buffer has some state (e.g. ring->clock) and | 3234 | * The max_tr ring buffer has some state (e.g. ring->clock) and |
@@ -3220,10 +3239,8 @@ static int tracing_set_tracer(const char *buf) | |||
3220 | } | 3239 | } |
3221 | destroy_trace_option_files(topts); | 3240 | destroy_trace_option_files(topts); |
3222 | 3241 | ||
3223 | current_trace = &nop_trace; | ||
3224 | |||
3225 | topts = create_trace_option_files(t); | 3242 | topts = create_trace_option_files(t); |
3226 | if (t->use_max_tr) { | 3243 | if (t->use_max_tr && !had_max_tr) { |
3227 | /* we need to make per cpu buffer sizes equivalent */ | 3244 | /* we need to make per cpu buffer sizes equivalent */ |
3228 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, | 3245 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, |
3229 | RING_BUFFER_ALL_CPUS); | 3246 | RING_BUFFER_ALL_CPUS); |
@@ -4037,8 +4054,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4037 | * Reset the buffer so that it doesn't have incomparable timestamps. | 4054 | * Reset the buffer so that it doesn't have incomparable timestamps. |
4038 | */ | 4055 | */ |
4039 | tracing_reset_online_cpus(&global_trace); | 4056 | tracing_reset_online_cpus(&global_trace); |
4040 | if (max_tr.buffer) | 4057 | tracing_reset_online_cpus(&max_tr); |
4041 | tracing_reset_online_cpus(&max_tr); | ||
4042 | 4058 | ||
4043 | mutex_unlock(&trace_types_lock); | 4059 | mutex_unlock(&trace_types_lock); |
4044 | 4060 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index c75d7988902c..04a2c7ab1735 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -291,16 +291,57 @@ struct tracer { | |||
291 | 291 | ||
292 | 292 | ||
293 | /* Only current can touch trace_recursion */ | 293 | /* Only current can touch trace_recursion */ |
294 | #define trace_recursion_inc() do { (current)->trace_recursion++; } while (0) | ||
295 | #define trace_recursion_dec() do { (current)->trace_recursion--; } while (0) | ||
296 | 294 | ||
297 | /* Ring buffer has the 10 LSB bits to count */ | 295 | /* |
298 | #define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) | 296 | * For function tracing recursion: |
299 | 297 | * The order of these bits are important. | |
300 | /* for function tracing recursion */ | 298 | * |
301 | #define TRACE_INTERNAL_BIT (1<<11) | 299 | * When function tracing occurs, the following steps are made: |
302 | #define TRACE_GLOBAL_BIT (1<<12) | 300 | * If arch does not support a ftrace feature: |
303 | #define TRACE_CONTROL_BIT (1<<13) | 301 | * call internal function (uses INTERNAL bits) which calls... |
302 | * If callback is registered to the "global" list, the list | ||
303 | * function is called and recursion checks the GLOBAL bits. | ||
304 | * then this function calls... | ||
305 | * The function callback, which can use the FTRACE bits to | ||
306 | * check for recursion. | ||
307 | * | ||
308 | * Now if the arch does not suppport a feature, and it calls | ||
309 | * the global list function which calls the ftrace callback | ||
310 | * all three of these steps will do a recursion protection. | ||
311 | * There's no reason to do one if the previous caller already | ||
312 | * did. The recursion that we are protecting against will | ||
313 | * go through the same steps again. | ||
314 | * | ||
315 | * To prevent the multiple recursion checks, if a recursion | ||
316 | * bit is set that is higher than the MAX bit of the current | ||
317 | * check, then we know that the check was made by the previous | ||
318 | * caller, and we can skip the current check. | ||
319 | */ | ||
320 | enum { | ||
321 | TRACE_BUFFER_BIT, | ||
322 | TRACE_BUFFER_NMI_BIT, | ||
323 | TRACE_BUFFER_IRQ_BIT, | ||
324 | TRACE_BUFFER_SIRQ_BIT, | ||
325 | |||
326 | /* Start of function recursion bits */ | ||
327 | TRACE_FTRACE_BIT, | ||
328 | TRACE_FTRACE_NMI_BIT, | ||
329 | TRACE_FTRACE_IRQ_BIT, | ||
330 | TRACE_FTRACE_SIRQ_BIT, | ||
331 | |||
332 | /* GLOBAL_BITs must be greater than FTRACE_BITs */ | ||
333 | TRACE_GLOBAL_BIT, | ||
334 | TRACE_GLOBAL_NMI_BIT, | ||
335 | TRACE_GLOBAL_IRQ_BIT, | ||
336 | TRACE_GLOBAL_SIRQ_BIT, | ||
337 | |||
338 | /* INTERNAL_BITs must be greater than GLOBAL_BITs */ | ||
339 | TRACE_INTERNAL_BIT, | ||
340 | TRACE_INTERNAL_NMI_BIT, | ||
341 | TRACE_INTERNAL_IRQ_BIT, | ||
342 | TRACE_INTERNAL_SIRQ_BIT, | ||
343 | |||
344 | TRACE_CONTROL_BIT, | ||
304 | 345 | ||
305 | /* | 346 | /* |
306 | * Abuse of the trace_recursion. | 347 | * Abuse of the trace_recursion. |
@@ -309,11 +350,77 @@ struct tracer { | |||
309 | * was called in irq context but we have irq tracing off. Since this | 350 | * was called in irq context but we have irq tracing off. Since this |
310 | * can only be modified by current, we can reuse trace_recursion. | 351 | * can only be modified by current, we can reuse trace_recursion. |
311 | */ | 352 | */ |
312 | #define TRACE_IRQ_BIT (1<<13) | 353 | TRACE_IRQ_BIT, |
354 | }; | ||
355 | |||
356 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) | ||
357 | #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) | ||
358 | #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) | ||
359 | |||
360 | #define TRACE_CONTEXT_BITS 4 | ||
361 | |||
362 | #define TRACE_FTRACE_START TRACE_FTRACE_BIT | ||
363 | #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) | ||
364 | |||
365 | #define TRACE_GLOBAL_START TRACE_GLOBAL_BIT | ||
366 | #define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1) | ||
367 | |||
368 | #define TRACE_LIST_START TRACE_INTERNAL_BIT | ||
369 | #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) | ||
370 | |||
371 | #define TRACE_CONTEXT_MASK TRACE_LIST_MAX | ||
372 | |||
373 | static __always_inline int trace_get_context_bit(void) | ||
374 | { | ||
375 | int bit; | ||
313 | 376 | ||
314 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) | 377 | if (in_interrupt()) { |
315 | #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) | 378 | if (in_nmi()) |
316 | #define trace_recursion_test(bit) ((current)->trace_recursion & (bit)) | 379 | bit = 0; |
380 | |||
381 | else if (in_irq()) | ||
382 | bit = 1; | ||
383 | else | ||
384 | bit = 2; | ||
385 | } else | ||
386 | bit = 3; | ||
387 | |||
388 | return bit; | ||
389 | } | ||
390 | |||
391 | static __always_inline int trace_test_and_set_recursion(int start, int max) | ||
392 | { | ||
393 | unsigned int val = current->trace_recursion; | ||
394 | int bit; | ||
395 | |||
396 | /* A previous recursion check was made */ | ||
397 | if ((val & TRACE_CONTEXT_MASK) > max) | ||
398 | return 0; | ||
399 | |||
400 | bit = trace_get_context_bit() + start; | ||
401 | if (unlikely(val & (1 << bit))) | ||
402 | return -1; | ||
403 | |||
404 | val |= 1 << bit; | ||
405 | current->trace_recursion = val; | ||
406 | barrier(); | ||
407 | |||
408 | return bit; | ||
409 | } | ||
410 | |||
411 | static __always_inline void trace_clear_recursion(int bit) | ||
412 | { | ||
413 | unsigned int val = current->trace_recursion; | ||
414 | |||
415 | if (!bit) | ||
416 | return; | ||
417 | |||
418 | bit = 1 << bit; | ||
419 | val &= ~bit; | ||
420 | |||
421 | barrier(); | ||
422 | current->trace_recursion = val; | ||
423 | } | ||
317 | 424 | ||
318 | #define TRACE_PIPE_ALL_CPU -1 | 425 | #define TRACE_PIPE_ALL_CPU -1 |
319 | 426 | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 394783531cbb..22b638b28e48 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -21,8 +21,6 @@ | |||
21 | #include <linux/ktime.h> | 21 | #include <linux/ktime.h> |
22 | #include <linux/trace_clock.h> | 22 | #include <linux/trace_clock.h> |
23 | 23 | ||
24 | #include "trace.h" | ||
25 | |||
26 | /* | 24 | /* |
27 | * trace_clock_local(): the simplest and least coherent tracing clock. | 25 | * trace_clock_local(): the simplest and least coherent tracing clock. |
28 | * | 26 | * |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 880073d0b946..57e9b284250c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -116,7 +116,6 @@ static int trace_define_common_fields(void) | |||
116 | __common_field(unsigned char, flags); | 116 | __common_field(unsigned char, flags); |
117 | __common_field(unsigned char, preempt_count); | 117 | __common_field(unsigned char, preempt_count); |
118 | __common_field(int, pid); | 118 | __common_field(int, pid); |
119 | __common_field(int, padding); | ||
120 | 119 | ||
121 | return ret; | 120 | return ret; |
122 | } | 121 | } |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 8e3ad8082ab7..1c327ef13a9a 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -47,34 +47,6 @@ static void function_trace_start(struct trace_array *tr) | |||
47 | tracing_reset_online_cpus(tr); | 47 | tracing_reset_online_cpus(tr); |
48 | } | 48 | } |
49 | 49 | ||
50 | static void | ||
51 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, | ||
52 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
53 | { | ||
54 | struct trace_array *tr = func_trace; | ||
55 | struct trace_array_cpu *data; | ||
56 | unsigned long flags; | ||
57 | long disabled; | ||
58 | int cpu; | ||
59 | int pc; | ||
60 | |||
61 | if (unlikely(!ftrace_function_enabled)) | ||
62 | return; | ||
63 | |||
64 | pc = preempt_count(); | ||
65 | preempt_disable_notrace(); | ||
66 | local_save_flags(flags); | ||
67 | cpu = raw_smp_processor_id(); | ||
68 | data = tr->data[cpu]; | ||
69 | disabled = atomic_inc_return(&data->disabled); | ||
70 | |||
71 | if (likely(disabled == 1)) | ||
72 | trace_function(tr, ip, parent_ip, flags, pc); | ||
73 | |||
74 | atomic_dec(&data->disabled); | ||
75 | preempt_enable_notrace(); | ||
76 | } | ||
77 | |||
78 | /* Our option */ | 50 | /* Our option */ |
79 | enum { | 51 | enum { |
80 | TRACE_FUNC_OPT_STACK = 0x1, | 52 | TRACE_FUNC_OPT_STACK = 0x1, |
@@ -85,34 +57,34 @@ static struct tracer_flags func_flags; | |||
85 | static void | 57 | static void |
86 | function_trace_call(unsigned long ip, unsigned long parent_ip, | 58 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
87 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 59 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
88 | |||
89 | { | 60 | { |
90 | struct trace_array *tr = func_trace; | 61 | struct trace_array *tr = func_trace; |
91 | struct trace_array_cpu *data; | 62 | struct trace_array_cpu *data; |
92 | unsigned long flags; | 63 | unsigned long flags; |
93 | long disabled; | 64 | unsigned int bit; |
94 | int cpu; | 65 | int cpu; |
95 | int pc; | 66 | int pc; |
96 | 67 | ||
97 | if (unlikely(!ftrace_function_enabled)) | 68 | if (unlikely(!ftrace_function_enabled)) |
98 | return; | 69 | return; |
99 | 70 | ||
100 | /* | 71 | pc = preempt_count(); |
101 | * Need to use raw, since this must be called before the | 72 | preempt_disable_notrace(); |
102 | * recursive protection is performed. | ||
103 | */ | ||
104 | local_irq_save(flags); | ||
105 | cpu = raw_smp_processor_id(); | ||
106 | data = tr->data[cpu]; | ||
107 | disabled = atomic_inc_return(&data->disabled); | ||
108 | 73 | ||
109 | if (likely(disabled == 1)) { | 74 | bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); |
110 | pc = preempt_count(); | 75 | if (bit < 0) |
76 | goto out; | ||
77 | |||
78 | cpu = smp_processor_id(); | ||
79 | data = tr->data[cpu]; | ||
80 | if (!atomic_read(&data->disabled)) { | ||
81 | local_save_flags(flags); | ||
111 | trace_function(tr, ip, parent_ip, flags, pc); | 82 | trace_function(tr, ip, parent_ip, flags, pc); |
112 | } | 83 | } |
84 | trace_clear_recursion(bit); | ||
113 | 85 | ||
114 | atomic_dec(&data->disabled); | 86 | out: |
115 | local_irq_restore(flags); | 87 | preempt_enable_notrace(); |
116 | } | 88 | } |
117 | 89 | ||
118 | static void | 90 | static void |
@@ -185,11 +157,6 @@ static void tracing_start_function_trace(void) | |||
185 | { | 157 | { |
186 | ftrace_function_enabled = 0; | 158 | ftrace_function_enabled = 0; |
187 | 159 | ||
188 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
189 | trace_ops.func = function_trace_call_preempt_only; | ||
190 | else | ||
191 | trace_ops.func = function_trace_call; | ||
192 | |||
193 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | 160 | if (func_flags.val & TRACE_FUNC_OPT_STACK) |
194 | register_ftrace_function(&trace_stack_ops); | 161 | register_ftrace_function(&trace_stack_ops); |
195 | else | 162 | else |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 4edb4b74eb7e..7008d2e13cf2 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -47,6 +47,8 @@ struct fgraph_data { | |||
47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | 47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 | 48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 |
49 | 49 | ||
50 | static unsigned int max_depth; | ||
51 | |||
50 | static struct tracer_opt trace_opts[] = { | 52 | static struct tracer_opt trace_opts[] = { |
51 | /* Display overruns? (for self-debug purpose) */ | 53 | /* Display overruns? (for self-debug purpose) */ |
52 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 54 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
@@ -250,8 +252,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
250 | return 0; | 252 | return 0; |
251 | 253 | ||
252 | /* trace it when it is-nested-in or is a function enabled. */ | 254 | /* trace it when it is-nested-in or is a function enabled. */ |
253 | if (!(trace->depth || ftrace_graph_addr(trace->func)) || | 255 | if ((!(trace->depth || ftrace_graph_addr(trace->func)) || |
254 | ftrace_graph_ignore_irqs()) | 256 | ftrace_graph_ignore_irqs()) || |
257 | (max_depth && trace->depth >= max_depth)) | ||
255 | return 0; | 258 | return 0; |
256 | 259 | ||
257 | local_irq_save(flags); | 260 | local_irq_save(flags); |
@@ -1457,6 +1460,59 @@ static struct tracer graph_trace __read_mostly = { | |||
1457 | #endif | 1460 | #endif |
1458 | }; | 1461 | }; |
1459 | 1462 | ||
1463 | |||
1464 | static ssize_t | ||
1465 | graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
1466 | loff_t *ppos) | ||
1467 | { | ||
1468 | unsigned long val; | ||
1469 | int ret; | ||
1470 | |||
1471 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
1472 | if (ret) | ||
1473 | return ret; | ||
1474 | |||
1475 | max_depth = val; | ||
1476 | |||
1477 | *ppos += cnt; | ||
1478 | |||
1479 | return cnt; | ||
1480 | } | ||
1481 | |||
1482 | static ssize_t | ||
1483 | graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
1484 | loff_t *ppos) | ||
1485 | { | ||
1486 | char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ | ||
1487 | int n; | ||
1488 | |||
1489 | n = sprintf(buf, "%d\n", max_depth); | ||
1490 | |||
1491 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); | ||
1492 | } | ||
1493 | |||
1494 | static const struct file_operations graph_depth_fops = { | ||
1495 | .open = tracing_open_generic, | ||
1496 | .write = graph_depth_write, | ||
1497 | .read = graph_depth_read, | ||
1498 | .llseek = generic_file_llseek, | ||
1499 | }; | ||
1500 | |||
1501 | static __init int init_graph_debugfs(void) | ||
1502 | { | ||
1503 | struct dentry *d_tracer; | ||
1504 | |||
1505 | d_tracer = tracing_init_dentry(); | ||
1506 | if (!d_tracer) | ||
1507 | return 0; | ||
1508 | |||
1509 | trace_create_file("max_graph_depth", 0644, d_tracer, | ||
1510 | NULL, &graph_depth_fops); | ||
1511 | |||
1512 | return 0; | ||
1513 | } | ||
1514 | fs_initcall(init_graph_debugfs); | ||
1515 | |||
1460 | static __init int init_graph_trace(void) | 1516 | static __init int init_graph_trace(void) |
1461 | { | 1517 | { |
1462 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | 1518 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 47623169a815..51c819c12c29 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -415,7 +415,8 @@ static void trace_selftest_test_recursion_func(unsigned long ip, | |||
415 | * The ftrace infrastructure should provide the recursion | 415 | * The ftrace infrastructure should provide the recursion |
416 | * protection. If not, this will crash the kernel! | 416 | * protection. If not, this will crash the kernel! |
417 | */ | 417 | */ |
418 | trace_selftest_recursion_cnt++; | 418 | if (trace_selftest_recursion_cnt++ > 10) |
419 | return; | ||
419 | DYN_FTRACE_TEST_NAME(); | 420 | DYN_FTRACE_TEST_NAME(); |
420 | } | 421 | } |
421 | 422 | ||
@@ -452,7 +453,6 @@ trace_selftest_function_recursion(void) | |||
452 | char *func_name; | 453 | char *func_name; |
453 | int len; | 454 | int len; |
454 | int ret; | 455 | int ret; |
455 | int cnt; | ||
456 | 456 | ||
457 | /* The previous test PASSED */ | 457 | /* The previous test PASSED */ |
458 | pr_cont("PASSED\n"); | 458 | pr_cont("PASSED\n"); |
@@ -510,19 +510,10 @@ trace_selftest_function_recursion(void) | |||
510 | 510 | ||
511 | unregister_ftrace_function(&test_recsafe_probe); | 511 | unregister_ftrace_function(&test_recsafe_probe); |
512 | 512 | ||
513 | /* | ||
514 | * If arch supports all ftrace features, and no other task | ||
515 | * was on the list, we should be fine. | ||
516 | */ | ||
517 | if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC) | ||
518 | cnt = 2; /* Should have recursed */ | ||
519 | else | ||
520 | cnt = 1; | ||
521 | |||
522 | ret = -1; | 513 | ret = -1; |
523 | if (trace_selftest_recursion_cnt != cnt) { | 514 | if (trace_selftest_recursion_cnt != 2) { |
524 | pr_cont("*callback not called expected %d times (%d)* ", | 515 | pr_cont("*callback not called expected 2 times (%d)* ", |
525 | cnt, trace_selftest_recursion_cnt); | 516 | trace_selftest_recursion_cnt); |
526 | goto out; | 517 | goto out; |
527 | } | 518 | } |
528 | 519 | ||
@@ -568,7 +559,7 @@ trace_selftest_function_regs(void) | |||
568 | int ret; | 559 | int ret; |
569 | int supported = 0; | 560 | int supported = 0; |
570 | 561 | ||
571 | #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS | 562 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
572 | supported = 1; | 563 | supported = 1; |
573 | #endif | 564 | #endif |
574 | 565 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7609dd6714c2..5329e13e74a1 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -77,7 +77,7 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr) | |||
77 | return syscalls_metadata[nr]; | 77 | return syscalls_metadata[nr]; |
78 | } | 78 | } |
79 | 79 | ||
80 | enum print_line_t | 80 | static enum print_line_t |
81 | print_syscall_enter(struct trace_iterator *iter, int flags, | 81 | print_syscall_enter(struct trace_iterator *iter, int flags, |
82 | struct trace_event *event) | 82 | struct trace_event *event) |
83 | { | 83 | { |
@@ -130,7 +130,7 @@ end: | |||
130 | return TRACE_TYPE_HANDLED; | 130 | return TRACE_TYPE_HANDLED; |
131 | } | 131 | } |
132 | 132 | ||
133 | enum print_line_t | 133 | static enum print_line_t |
134 | print_syscall_exit(struct trace_iterator *iter, int flags, | 134 | print_syscall_exit(struct trace_iterator *iter, int flags, |
135 | struct trace_event *event) | 135 | struct trace_event *event) |
136 | { | 136 | { |
@@ -270,7 +270,7 @@ static int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
270 | return ret; | 270 | return ret; |
271 | } | 271 | } |
272 | 272 | ||
273 | void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | 273 | static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) |
274 | { | 274 | { |
275 | struct syscall_trace_enter *entry; | 275 | struct syscall_trace_enter *entry; |
276 | struct syscall_metadata *sys_data; | 276 | struct syscall_metadata *sys_data; |
@@ -305,7 +305,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
305 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 305 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); |
306 | } | 306 | } |
307 | 307 | ||
308 | void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | 308 | static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) |
309 | { | 309 | { |
310 | struct syscall_trace_exit *entry; | 310 | struct syscall_trace_exit *entry; |
311 | struct syscall_metadata *sys_data; | 311 | struct syscall_metadata *sys_data; |
@@ -337,7 +337,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
337 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 337 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); |
338 | } | 338 | } |
339 | 339 | ||
340 | int reg_event_syscall_enter(struct ftrace_event_call *call) | 340 | static int reg_event_syscall_enter(struct ftrace_event_call *call) |
341 | { | 341 | { |
342 | int ret = 0; | 342 | int ret = 0; |
343 | int num; | 343 | int num; |
@@ -356,7 +356,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
356 | return ret; | 356 | return ret; |
357 | } | 357 | } |
358 | 358 | ||
359 | void unreg_event_syscall_enter(struct ftrace_event_call *call) | 359 | static void unreg_event_syscall_enter(struct ftrace_event_call *call) |
360 | { | 360 | { |
361 | int num; | 361 | int num; |
362 | 362 | ||
@@ -371,7 +371,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call) | |||
371 | mutex_unlock(&syscall_trace_lock); | 371 | mutex_unlock(&syscall_trace_lock); |
372 | } | 372 | } |
373 | 373 | ||
374 | int reg_event_syscall_exit(struct ftrace_event_call *call) | 374 | static int reg_event_syscall_exit(struct ftrace_event_call *call) |
375 | { | 375 | { |
376 | int ret = 0; | 376 | int ret = 0; |
377 | int num; | 377 | int num; |
@@ -390,7 +390,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
390 | return ret; | 390 | return ret; |
391 | } | 391 | } |
392 | 392 | ||
393 | void unreg_event_syscall_exit(struct ftrace_event_call *call) | 393 | static void unreg_event_syscall_exit(struct ftrace_event_call *call) |
394 | { | 394 | { |
395 | int num; | 395 | int num; |
396 | 396 | ||
@@ -459,7 +459,7 @@ unsigned long __init __weak arch_syscall_addr(int nr) | |||
459 | return (unsigned long)sys_call_table[nr]; | 459 | return (unsigned long)sys_call_table[nr]; |
460 | } | 460 | } |
461 | 461 | ||
462 | int __init init_ftrace_syscalls(void) | 462 | static int __init init_ftrace_syscalls(void) |
463 | { | 463 | { |
464 | struct syscall_metadata *meta; | 464 | struct syscall_metadata *meta; |
465 | unsigned long addr; | 465 | unsigned long addr; |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c86e6d4f67fb..87b6db4ccbc5 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -258,6 +258,10 @@ static int create_trace_uprobe(int argc, char **argv) | |||
258 | goto fail_address_parse; | 258 | goto fail_address_parse; |
259 | 259 | ||
260 | inode = igrab(path.dentry->d_inode); | 260 | inode = igrab(path.dentry->d_inode); |
261 | if (!S_ISREG(inode->i_mode)) { | ||
262 | ret = -EINVAL; | ||
263 | goto fail_address_parse; | ||
264 | } | ||
261 | 265 | ||
262 | argc -= 2; | 266 | argc -= 2; |
263 | argv += 2; | 267 | argv += 2; |
@@ -356,7 +360,7 @@ fail_address_parse: | |||
356 | if (inode) | 360 | if (inode) |
357 | iput(inode); | 361 | iput(inode); |
358 | 362 | ||
359 | pr_info("Failed to parse address.\n"); | 363 | pr_info("Failed to parse address or file.\n"); |
360 | 364 | ||
361 | return ret; | 365 | return ret; |
362 | } | 366 | } |