aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 14:50:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 14:50:00 -0400
commitb8c0aa46b3e86083721b57ed2eec6bd2c29ebfba (patch)
tree45e349bf8a14aa99279d323fdc515e849fd349f3 /arch/x86/kernel
parentc7ed326fa7cafb83ced5a8b02517a61672fe9e90 (diff)
parentdc6f03f26f570104a2bb03f9d1deb588026d7c75 (diff)
Merge tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "This pull request has a lot of work done. The main thing is the changes to the ftrace function callback infrastructure. It's introducing a way to allow different functions to call directly different trampolines instead of all calling the same "mcount" one. The only user of this for now is the function graph tracer, which always had a different trampoline, but the function tracer trampoline was called and did basically nothing, and then the function graph tracer trampoline was called. The difference now, is that the function graph tracer trampoline can be called directly if a function is only being traced by the function graph trampoline. If function tracing is also happening on the same function, the old way is still done. The accounting for this takes up more memory when function graph tracing is activated, as it needs to keep track of which functions it uses. I have a new way that wont take as much memory, but it's not ready yet for this merge window, and will have to wait for the next one. Another big change was the removal of the ftrace_start/stop() calls that were used by the suspend/resume code that stopped function tracing when entering into suspend and resume paths. The stop of ftrace was done because there was some function that would crash the system if one called smp_processor_id()! The stop/start was a big hammer to solve the issue at the time, which was when ftrace was first introduced into Linux. Now ftrace has better infrastructure to debug such issues, and I found the problem function and labeled it with "notrace" and function tracing can now safely be activated all the way down into the guts of suspend and resume Other changes include clean ups of uprobe code, clean up of the trace_seq() code, and other various small fixes and clean ups to ftrace and tracing" * tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits) ftrace: Add warning if tramp hash does not match nr_trampolines ftrace: Fix trampoline hash update check on rec->flags ring-buffer: Use rb_page_size() instead of open coded head_page size ftrace: Rename ftrace_ops field from trampolines to nr_trampolines tracing: Convert local function_graph functions to static ftrace: Do not copy old hash when resetting tracing: let user specify tracing_thresh after selecting function_graph ring-buffer: Always run per-cpu ring buffer resize with schedule_work_on() tracing: Remove function_trace_stop and HAVE_FUNCTION_TRACE_MCOUNT_TEST s390/ftrace: remove check of obsolete variable function_trace_stop arm64, ftrace: Remove check of obsolete variable function_trace_stop Blackfin: ftrace: Remove check of obsolete variable function_trace_stop metag: ftrace: Remove check of obsolete variable function_trace_stop microblaze: ftrace: Remove check of obsolete variable function_trace_stop MIPS: ftrace: Remove check of obsolete variable function_trace_stop parisc: ftrace: Remove check of obsolete variable function_trace_stop sh: ftrace: Remove check of obsolete variable function_trace_stop sparc64,ftrace: Remove check of obsolete variable function_trace_stop tile: ftrace: Remove check of obsolete variable function_trace_stop ftrace: x86: Remove check of obsolete variable function_trace_stop ...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/kernel/ftrace.c3
-rw-r--r--arch/x86/kernel/mcount_64.S13
3 files changed, 4 insertions, 21 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 0d0c9d4ab6d5..47c410d99f5d 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1059,9 +1059,6 @@ ENTRY(mcount)
1059END(mcount) 1059END(mcount)
1060 1060
1061ENTRY(ftrace_caller) 1061ENTRY(ftrace_caller)
1062 cmpl $0, function_trace_stop
1063 jne ftrace_stub
1064
1065 pushl %eax 1062 pushl %eax
1066 pushl %ecx 1063 pushl %ecx
1067 pushl %edx 1064 pushl %edx
@@ -1093,8 +1090,6 @@ END(ftrace_caller)
1093 1090
1094ENTRY(ftrace_regs_caller) 1091ENTRY(ftrace_regs_caller)
1095 pushf /* push flags before compare (in cs location) */ 1092 pushf /* push flags before compare (in cs location) */
1096 cmpl $0, function_trace_stop
1097 jne ftrace_restore_flags
1098 1093
1099 /* 1094 /*
1100 * i386 does not save SS and ESP when coming from kernel. 1095 * i386 does not save SS and ESP when coming from kernel.
@@ -1153,7 +1148,6 @@ GLOBAL(ftrace_regs_call)
1153 popf /* Pop flags at end (no addl to corrupt flags) */ 1148 popf /* Pop flags at end (no addl to corrupt flags) */
1154 jmp ftrace_ret 1149 jmp ftrace_ret
1155 1150
1156ftrace_restore_flags:
1157 popf 1151 popf
1158 jmp ftrace_stub 1152 jmp ftrace_stub
1159#else /* ! CONFIG_DYNAMIC_FTRACE */ 1153#else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -1162,9 +1156,6 @@ ENTRY(mcount)
1162 cmpl $__PAGE_OFFSET, %esp 1156 cmpl $__PAGE_OFFSET, %esp
1163 jb ftrace_stub /* Paging not enabled yet? */ 1157 jb ftrace_stub /* Paging not enabled yet? */
1164 1158
1165 cmpl $0, function_trace_stop
1166 jne ftrace_stub
1167
1168 cmpl $ftrace_stub, ftrace_trace_function 1159 cmpl $ftrace_stub, ftrace_trace_function
1169 jnz trace 1160 jnz trace
1170#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1161#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index cbc4a91b131e..3386dc9aa333 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -703,6 +703,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
703 unsigned long return_hooker = (unsigned long) 703 unsigned long return_hooker = (unsigned long)
704 &return_to_handler; 704 &return_to_handler;
705 705
706 if (unlikely(ftrace_graph_is_dead()))
707 return;
708
706 if (unlikely(atomic_read(&current->tracing_graph_pause))) 709 if (unlikely(atomic_read(&current->tracing_graph_pause)))
707 return; 710 return;
708 711
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index c050a0153168..c73aecf10d34 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -46,10 +46,6 @@ END(function_hook)
46.endm 46.endm
47 47
48ENTRY(ftrace_caller) 48ENTRY(ftrace_caller)
49 /* Check if tracing was disabled (quick check) */
50 cmpl $0, function_trace_stop
51 jne ftrace_stub
52
53 ftrace_caller_setup 49 ftrace_caller_setup
54 /* regs go into 4th parameter (but make it NULL) */ 50 /* regs go into 4th parameter (but make it NULL) */
55 movq $0, %rcx 51 movq $0, %rcx
@@ -73,10 +69,6 @@ ENTRY(ftrace_regs_caller)
73 /* Save the current flags before compare (in SS location)*/ 69 /* Save the current flags before compare (in SS location)*/
74 pushfq 70 pushfq
75 71
76 /* Check if tracing was disabled (quick check) */
77 cmpl $0, function_trace_stop
78 jne ftrace_restore_flags
79
80 /* skip=8 to skip flags saved in SS */ 72 /* skip=8 to skip flags saved in SS */
81 ftrace_caller_setup 8 73 ftrace_caller_setup 8
82 74
@@ -131,7 +123,7 @@ GLOBAL(ftrace_regs_call)
131 popfq 123 popfq
132 124
133 jmp ftrace_return 125 jmp ftrace_return
134ftrace_restore_flags: 126
135 popfq 127 popfq
136 jmp ftrace_stub 128 jmp ftrace_stub
137 129
@@ -141,9 +133,6 @@ END(ftrace_regs_caller)
141#else /* ! CONFIG_DYNAMIC_FTRACE */ 133#else /* ! CONFIG_DYNAMIC_FTRACE */
142 134
143ENTRY(function_hook) 135ENTRY(function_hook)
144 cmpl $0, function_trace_stop
145 jne ftrace_stub
146
147 cmpq $ftrace_stub, ftrace_trace_function 136 cmpq $ftrace_stub, ftrace_trace_function
148 jnz trace 137 jnz trace
149 138