aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/Kconfig6
-rw-r--r--arch/s390/include/asm/ftrace.h6
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/ftrace.c79
-rw-r--r--arch/s390/kernel/mcount.S87
-rw-r--r--arch/s390/kernel/mcount64.S80
-rw-r--r--scripts/recordmcount.c4
-rwxr-xr-xscripts/recordmcount.pl7
8 files changed, 56 insertions, 216 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 608adfb65dd3..95174d2cc4fb 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -117,11 +117,11 @@ config S390
117 select HAVE_CMPXCHG_LOCAL 117 select HAVE_CMPXCHG_LOCAL
118 select HAVE_C_RECORDMCOUNT 118 select HAVE_C_RECORDMCOUNT
119 select HAVE_DEBUG_KMEMLEAK 119 select HAVE_DEBUG_KMEMLEAK
120 select HAVE_DYNAMIC_FTRACE 120 select HAVE_DYNAMIC_FTRACE if 64BIT
121 select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT 121 select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
122 select HAVE_FTRACE_MCOUNT_RECORD 122 select HAVE_FTRACE_MCOUNT_RECORD
123 select HAVE_FUNCTION_GRAPH_TRACER 123 select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
124 select HAVE_FUNCTION_TRACER 124 select HAVE_FUNCTION_TRACER if 64BIT
125 select HAVE_FUTEX_CMPXCHG if FUTEX 125 select HAVE_FUTEX_CMPXCHG if FUTEX
126 select HAVE_KERNEL_BZIP2 126 select HAVE_KERNEL_BZIP2
127 select HAVE_KERNEL_GZIP 127 select HAVE_KERNEL_GZIP
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index d419362dc231..3aef8afec336 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -18,14 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
18 18
19#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
20 20
21#ifdef CONFIG_64BIT
22#define MCOUNT_INSN_SIZE 18 21#define MCOUNT_INSN_SIZE 18
23#else
24#define MCOUNT_INSN_SIZE 22
25#endif
26 22
27#ifdef CONFIG_64BIT
28#define ARCH_SUPPORTS_FTRACE_OPS 1 23#define ARCH_SUPPORTS_FTRACE_OPS 1
29#endif
30 24
31#endif /* _ASM_S390_FTRACE_H */ 25#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c249785669f3..204c43a4c245 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -52,8 +52,7 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
52 52
53obj-$(CONFIG_STACKTRACE) += stacktrace.o 53obj-$(CONFIG_STACKTRACE) += stacktrace.o
54obj-$(CONFIG_KPROBES) += kprobes.o 54obj-$(CONFIG_KPROBES) += kprobes.o
55obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) 55obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
56obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
57obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 56obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
58obj-$(CONFIG_UPROBES) += uprobes.o 57obj-$(CONFIG_UPROBES) += uprobes.o
59 58
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index fcb009d3edde..f0072125926c 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -21,9 +21,8 @@ void mcount_replace_code(void);
21void ftrace_disable_code(void); 21void ftrace_disable_code(void);
22void ftrace_enable_insn(void); 22void ftrace_enable_insn(void);
23 23
24#ifdef CONFIG_64BIT
25/* 24/*
26 * The 64-bit mcount code looks like this: 25 * The mcount code looks like this:
27 * stg %r14,8(%r15) # offset 0 26 * stg %r14,8(%r15) # offset 0
28 * larl %r1,<&counter> # offset 6 27 * larl %r1,<&counter> # offset 6
29 * brasl %r14,_mcount # offset 12 28 * brasl %r14,_mcount # offset 12
@@ -34,7 +33,7 @@ void ftrace_enable_insn(void);
34 * Note: we do not patch the first instruction to an unconditional branch, 33 * Note: we do not patch the first instruction to an unconditional branch,
35 * since that would break kprobes/jprobes. It is easier to leave the larl 34 * since that would break kprobes/jprobes. It is easier to leave the larl
36 * instruction in and only modify the second instruction. 35 * instruction in and only modify the second instruction.
37 * The 64-bit enabled ftrace code block looks like this: 36 * The enabled ftrace code block looks like this:
38 * larl %r0,.+24 # offset 0 37 * larl %r0,.+24 # offset 0
39 * > lg %r1,__LC_FTRACE_FUNC # offset 6 38 * > lg %r1,__LC_FTRACE_FUNC # offset 6
40 * br %r1 # offset 12 39 * br %r1 # offset 12
@@ -71,65 +70,15 @@ asm(
71#define MCOUNT_INSN_OFFSET 6 70#define MCOUNT_INSN_OFFSET 6
72#define FTRACE_INSN_SIZE 6 71#define FTRACE_INSN_SIZE 6
73 72
74#else /* CONFIG_64BIT */
75/*
76 * The 31-bit mcount code looks like this:
77 * st %r14,4(%r15) # offset 0
78 * > bras %r1,0f # offset 4
79 * > .long _mcount # offset 8
80 * > .long <&counter> # offset 12
81 * > 0: l %r14,0(%r1) # offset 16
82 * > l %r1,4(%r1) # offset 20
83 * basr %r14,%r14 # offset 24
84 * l %r14,4(%r15) # offset 26
85 * Total length is 30 bytes. The twenty bytes starting from offset 4
86 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
87 * The 31-bit enabled ftrace code block looks like this:
88 * st %r14,4(%r15) # offset 0
89 * > l %r14,__LC_FTRACE_FUNC # offset 4
90 * > j 0f # offset 8
91 * > .fill 12,1,0x07 # offset 12
92 * 0: basr %r14,%r14 # offset 24
93 * l %r14,4(%r14) # offset 26
94 * The return points of the mcount/ftrace function have the same offset 26.
95 * The 31-bit disabled ftrace code block looks like this:
96 * st %r14,4(%r15) # offset 0
97 * > j .+26 # offset 4
98 * > j 0f # offset 8
99 * > .fill 12,1,0x07 # offset 12
100 * 0: basr %r14,%r14 # offset 24
101 * l %r14,4(%r14) # offset 26
102 * The j instruction branches to offset 30 to skip as many instructions
103 * as possible.
104 */
105asm(
106 " .align 4\n"
107 "ftrace_disable_code:\n"
108 " j 1f\n"
109 " j 0f\n"
110 " .fill 12,1,0x07\n"
111 "0: basr %r14,%r14\n"
112 "1:\n"
113 " .align 4\n"
114 "ftrace_enable_insn:\n"
115 " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
116
117#define FTRACE_INSN_SIZE 4
118
119#endif /* CONFIG_64BIT */
120
121#ifdef CONFIG_64BIT
122int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 73int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
123 unsigned long addr) 74 unsigned long addr)
124{ 75{
125 return 0; 76 return 0;
126} 77}
127#endif
128 78
129int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 79int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
130 unsigned long addr) 80 unsigned long addr)
131{ 81{
132#ifdef CONFIG_64BIT
133 /* Initial replacement of the whole mcount block */ 82 /* Initial replacement of the whole mcount block */
134 if (addr == MCOUNT_ADDR) { 83 if (addr == MCOUNT_ADDR) {
135 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, 84 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
@@ -138,7 +87,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
138 return -EPERM; 87 return -EPERM;
139 return 0; 88 return 0;
140 } 89 }
141#endif
142 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, 90 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
143 MCOUNT_INSN_SIZE)) 91 MCOUNT_INSN_SIZE))
144 return -EPERM; 92 return -EPERM;
@@ -196,8 +144,6 @@ out:
196 * the original offset to prepare_ftrace_return and put it back. 144 * the original offset to prepare_ftrace_return and put it back.
197 */ 145 */
198 146
199#ifdef CONFIG_64BIT
200
201int ftrace_enable_ftrace_graph_caller(void) 147int ftrace_enable_ftrace_graph_caller(void)
202{ 148{
203 static unsigned short offset = 0x0002; 149 static unsigned short offset = 0x0002;
@@ -216,25 +162,4 @@ int ftrace_disable_ftrace_graph_caller(void)
216 &offset, sizeof(offset)); 162 &offset, sizeof(offset));
217} 163}
218 164
219#else /* CONFIG_64BIT */
220
221int ftrace_enable_ftrace_graph_caller(void)
222{
223 unsigned short offset;
224
225 offset = ((void *) prepare_ftrace_return -
226 (void *) ftrace_graph_caller) / 2;
227 return probe_kernel_write((void *) ftrace_graph_caller + 2,
228 &offset, sizeof(offset));
229}
230
231int ftrace_disable_ftrace_graph_caller(void)
232{
233 static unsigned short offset = 0x0002;
234
235 return probe_kernel_write((void *) ftrace_graph_caller + 2,
236 &offset, sizeof(offset));
237}
238
239#endif /* CONFIG_64BIT */
240#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 165#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index be6dbd9a81a7..07abe8d464d4 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -8,60 +8,73 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h> 10#include <asm/ftrace.h>
11#include <asm/ptrace.h>
11 12
12 .section .kprobes.text, "ax" 13 .section .kprobes.text, "ax"
13 14
14ENTRY(ftrace_stub) 15ENTRY(ftrace_stub)
15 br %r14 16 br %r14
16 17
18#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
19#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
20#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
21#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
22
17ENTRY(_mcount) 23ENTRY(_mcount)
18 br %r14 24 br %r14
19 25
20ENTRY(ftrace_caller) 26ENTRY(ftrace_caller)
21 stm %r2,%r5,16(%r15) 27 .globl ftrace_regs_caller
22 bras %r1,1f 28 .set ftrace_regs_caller,ftrace_caller
230: .long ftrace_trace_function 29 lgr %r1,%r15
241: st %r14,56(%r15) 30 aghi %r15,-STACK_FRAME_SIZE
25 lr %r0,%r15 31 stg %r1,__SF_BACKCHAIN(%r15)
26 ahi %r15,-96 32 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
27 l %r3,100(%r15) 33 stg %r0,(STACK_PTREGS_PSW+8)(%r15)
28 la %r2,0(%r14) 34 stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
29 st %r0,__SF_BACKCHAIN(%r15) 35#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
30 la %r3,0(%r3) 36 aghik %r2,%r0,-MCOUNT_INSN_SIZE
31 ahi %r2,-MCOUNT_INSN_SIZE 37 lgrl %r4,function_trace_op
32 l %r14,0b-0b(%r1) 38 lgrl %r1,ftrace_trace_function
33 l %r14,0(%r14) 39#else
34 basr %r14,%r14 40 lgr %r2,%r0
41 aghi %r2,-MCOUNT_INSN_SIZE
42 larl %r4,function_trace_op
43 lg %r4,0(%r4)
44 larl %r1,ftrace_trace_function
45 lg %r1,0(%r1)
46#endif
47 lgr %r3,%r14
48 la %r5,STACK_PTREGS(%r15)
49 basr %r14,%r1
35#ifdef CONFIG_FUNCTION_GRAPH_TRACER 50#ifdef CONFIG_FUNCTION_GRAPH_TRACER
36 l %r2,100(%r15) 51# The j instruction gets runtime patched to a nop instruction.
37 l %r3,152(%r15)
38ENTRY(ftrace_graph_caller)
39# The bras instruction gets runtime patched to call prepare_ftrace_return.
40# See ftrace_enable_ftrace_graph_caller. The patched instruction is: 52# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
41# bras %r14,prepare_ftrace_return 53# j .+4
42 bras %r14,0f 54ENTRY(ftrace_graph_caller)
430: st %r2,100(%r15) 55 j ftrace_graph_caller_end
56 lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
57 lg %r3,(STACK_PTREGS_PSW+8)(%r15)
58 brasl %r14,prepare_ftrace_return
59 stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
60ftrace_graph_caller_end:
61 .globl ftrace_graph_caller_end
44#endif 62#endif
45 ahi %r15,96 63 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
46 l %r14,56(%r15) 64 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
47 lm %r2,%r5,16(%r15) 65 br %r1
48 br %r14
49 66
50#ifdef CONFIG_FUNCTION_GRAPH_TRACER 67#ifdef CONFIG_FUNCTION_GRAPH_TRACER
51 68
52ENTRY(return_to_handler) 69ENTRY(return_to_handler)
53 stm %r2,%r5,16(%r15) 70 stmg %r2,%r5,32(%r15)
54 st %r14,56(%r15) 71 lgr %r1,%r15
55 lr %r0,%r15 72 aghi %r15,-STACK_FRAME_OVERHEAD
56 ahi %r15,-96 73 stg %r1,__SF_BACKCHAIN(%r15)
57 st %r0,__SF_BACKCHAIN(%r15) 74 brasl %r14,ftrace_return_to_handler
58 bras %r1,0f 75 aghi %r15,STACK_FRAME_OVERHEAD
59 .long ftrace_return_to_handler 76 lgr %r14,%r2
600: l %r2,0b-0b(%r1) 77 lmg %r2,%r5,32(%r15)
61 basr %r14,%r2
62 lr %r14,%r2
63 ahi %r15,96
64 lm %r2,%r5,16(%r15)
65 br %r14 78 br %r14
66 79
67#endif 80#endif
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
deleted file mode 100644
index 07abe8d464d4..000000000000
--- a/arch/s390/kernel/mcount64.S
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Copyright IBM Corp. 2008, 2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 *
6 */
7
8#include <linux/linkage.h>
9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
11#include <asm/ptrace.h>
12
13 .section .kprobes.text, "ax"
14
15ENTRY(ftrace_stub)
16 br %r14
17
18#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
19#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
20#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
21#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
22
23ENTRY(_mcount)
24 br %r14
25
26ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller
28 .set ftrace_regs_caller,ftrace_caller
29 lgr %r1,%r15
30 aghi %r15,-STACK_FRAME_SIZE
31 stg %r1,__SF_BACKCHAIN(%r15)
32 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
33 stg %r0,(STACK_PTREGS_PSW+8)(%r15)
34 stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
35#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
36 aghik %r2,%r0,-MCOUNT_INSN_SIZE
37 lgrl %r4,function_trace_op
38 lgrl %r1,ftrace_trace_function
39#else
40 lgr %r2,%r0
41 aghi %r2,-MCOUNT_INSN_SIZE
42 larl %r4,function_trace_op
43 lg %r4,0(%r4)
44 larl %r1,ftrace_trace_function
45 lg %r1,0(%r1)
46#endif
47 lgr %r3,%r14
48 la %r5,STACK_PTREGS(%r15)
49 basr %r14,%r1
50#ifdef CONFIG_FUNCTION_GRAPH_TRACER
51# The j instruction gets runtime patched to a nop instruction.
52# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
53# j .+4
54ENTRY(ftrace_graph_caller)
55 j ftrace_graph_caller_end
56 lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
57 lg %r3,(STACK_PTREGS_PSW+8)(%r15)
58 brasl %r14,prepare_ftrace_return
59 stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
60ftrace_graph_caller_end:
61 .globl ftrace_graph_caller_end
62#endif
63 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
64 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
65 br %r1
66
67#ifdef CONFIG_FUNCTION_GRAPH_TRACER
68
69ENTRY(return_to_handler)
70 stmg %r2,%r5,32(%r15)
71 lgr %r1,%r15
72 aghi %r15,-STACK_FRAME_OVERHEAD
73 stg %r1,__SF_BACKCHAIN(%r15)
74 brasl %r14,ftrace_return_to_handler
75 aghi %r15,STACK_FRAME_OVERHEAD
76 lgr %r14,%r2
77 lmg %r2,%r5,32(%r15)
78 br %r14
79
80#endif
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 650ecc83d7d7..001facfa5b74 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -388,10 +388,6 @@ do_file(char const *const fname)
388 "unrecognized ET_REL file: %s\n", fname); 388 "unrecognized ET_REL file: %s\n", fname);
389 fail_file(); 389 fail_file();
390 } 390 }
391 if (w2(ehdr->e_machine) == EM_S390) {
392 reltype = R_390_32;
393 mcount_adjust_32 = -4;
394 }
395 if (w2(ehdr->e_machine) == EM_MIPS) { 391 if (w2(ehdr->e_machine) == EM_MIPS) {
396 reltype = R_MIPS_32; 392 reltype = R_MIPS_32;
397 is_fake_mcount32 = MIPS32_is_fake_mcount; 393 is_fake_mcount32 = MIPS32_is_fake_mcount;
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 397b6b84e8c5..d4b665610d67 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -241,13 +241,6 @@ if ($arch eq "x86_64") {
241 $objcopy .= " -O elf32-i386"; 241 $objcopy .= " -O elf32-i386";
242 $cc .= " -m32"; 242 $cc .= " -m32";
243 243
244} elsif ($arch eq "s390" && $bits == 32) {
245 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$";
246 $mcount_adjust = -4;
247 $alignment = 4;
248 $ld .= " -m elf_s390";
249 $cc .= " -m31";
250
251} elsif ($arch eq "s390" && $bits == 64) { 244} elsif ($arch eq "s390" && $bits == 64) {
252 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; 245 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
253 $mcount_adjust = -8; 246 $mcount_adjust = -8;