aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-08-15 06:33:46 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-09-09 02:53:28 -0400
commit2481a87b0250bbf429fc8cdc78331efbc44a0221 (patch)
tree0bf818ad656c2c12bbe4bc5b7882c99826439b93 /arch/s390
parent0f1b1ff54b386926ef1a524e60ef89ae7738bbd5 (diff)
s390/ftrace: optimize function graph caller code
When the function graph tracer is disabled we can skip three additional instructions. So let's just do this. So if function tracing is enabled but function graph tracing is runtime disabled, we get away with a single unconditional branch. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/ftrace.h1
-rw-r--r--arch/s390/kernel/ftrace.c24
-rw-r--r--arch/s390/kernel/mcount64.S15
3 files changed, 34 insertions, 6 deletions
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index bf246dae1367..7b8e456d76c9 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,6 +4,7 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6extern void _mcount(void); 6extern void _mcount(void);
7extern char ftrace_graph_caller_end;
7 8
8struct dyn_arch_ftrace { }; 9struct dyn_arch_ftrace { };
9 10
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 54d6493c4a56..de55efa5b64e 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -170,6 +170,29 @@ out:
170 * directly after the instructions. To enable the call we calculate 170 * directly after the instructions. To enable the call we calculate
171 * the original offset to prepare_ftrace_return and put it back. 171 * the original offset to prepare_ftrace_return and put it back.
172 */ 172 */
173
174#ifdef CONFIG_64BIT
175
176int ftrace_enable_ftrace_graph_caller(void)
177{
178 static unsigned short offset = 0x0002;
179
180 return probe_kernel_write((void *) ftrace_graph_caller + 2,
181 &offset, sizeof(offset));
182}
183
184int ftrace_disable_ftrace_graph_caller(void)
185{
186 unsigned short offset;
187
188 offset = ((void *) &ftrace_graph_caller_end -
189 (void *) ftrace_graph_caller) / 2;
190 return probe_kernel_write((void *) ftrace_graph_caller + 2,
191 &offset, sizeof(offset));
192}
193
194#else /* CONFIG_64BIT */
195
173int ftrace_enable_ftrace_graph_caller(void) 196int ftrace_enable_ftrace_graph_caller(void)
174{ 197{
175 unsigned short offset; 198 unsigned short offset;
@@ -188,5 +211,6 @@ int ftrace_disable_ftrace_graph_caller(void)
188 &offset, sizeof(offset)); 211 &offset, sizeof(offset));
189} 212}
190 213
214#endif /* CONFIG_64BIT */
191#endif /* CONFIG_DYNAMIC_FTRACE */ 215#endif /* CONFIG_DYNAMIC_FTRACE */
192#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 216#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index c67a8bf0fd9a..5b33c83adde9 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -32,14 +32,17 @@ ENTRY(ftrace_caller)
32 lg %r14,0(%r14) 32 lg %r14,0(%r14)
33 basr %r14,%r14 33 basr %r14,%r14
34#ifdef CONFIG_FUNCTION_GRAPH_TRACER 34#ifdef CONFIG_FUNCTION_GRAPH_TRACER
35# The j instruction gets runtime patched to a nop instruction.
36# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
37# j .+4
38ENTRY(ftrace_graph_caller)
39 j ftrace_graph_caller_end
35 lg %r2,168(%r15) 40 lg %r2,168(%r15)
36 lg %r3,272(%r15) 41 lg %r3,272(%r15)
37ENTRY(ftrace_graph_caller) 42 brasl %r14,prepare_ftrace_return
38# The bras instruction gets runtime patched to call prepare_ftrace_return. 43 stg %r2,168(%r15)
39# See ftrace_enable_ftrace_graph_caller. The patched instruction is: 44ftrace_graph_caller_end:
40# bras %r14,prepare_ftrace_return 45 .globl ftrace_graph_caller_end
41 bras %r14,0f
420: stg %r2,168(%r15)
43#endif 46#endif
44 aghi %r15,160 47 aghi %r15,160
45 lmg %r2,%r5,32(%r15) 48 lmg %r2,%r5,32(%r15)