diff options
Diffstat (limited to 'arch/s390/kernel/ftrace.c')
-rw-r--r-- | arch/s390/kernel/ftrace.c | 57 |
1 files changed, 40 insertions, 17 deletions
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index f908e42e11c4..fcb009d3edde 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Dynamic function tracer architecture backend. | 2 | * Dynamic function tracer architecture backend. |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2009 | 4 | * Copyright IBM Corp. 2009,2014 |
5 | * | 5 | * |
6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | 6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, |
7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | #include "entry.h" | 18 | #include "entry.h" |
19 | 19 | ||
20 | void mcount_replace_code(void); | ||
20 | void ftrace_disable_code(void); | 21 | void ftrace_disable_code(void); |
21 | void ftrace_enable_insn(void); | 22 | void ftrace_enable_insn(void); |
22 | 23 | ||
@@ -24,38 +25,50 @@ void ftrace_enable_insn(void); | |||
24 | /* | 25 | /* |
25 | * The 64-bit mcount code looks like this: | 26 | * The 64-bit mcount code looks like this: |
26 | * stg %r14,8(%r15) # offset 0 | 27 | * stg %r14,8(%r15) # offset 0 |
27 | * > larl %r1,<&counter> # offset 6 | 28 | * larl %r1,<&counter> # offset 6 |
28 | * > brasl %r14,_mcount # offset 12 | 29 | * brasl %r14,_mcount # offset 12 |
29 | * lg %r14,8(%r15) # offset 18 | 30 | * lg %r14,8(%r15) # offset 18 |
30 | * Total length is 24 bytes. The middle two instructions of the mcount | 31 | * Total length is 24 bytes. The complete mcount block initially gets replaced |
31 | * block get overwritten by ftrace_make_nop / ftrace_make_call. | 32 | * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop |
33 | * only patch the jg/lg instruction within the block. | ||
34 | * Note: we do not patch the first instruction to an unconditional branch, | ||
35 | * since that would break kprobes/jprobes. It is easier to leave the larl | ||
36 | * instruction in and only modify the second instruction. | ||
32 | * The 64-bit enabled ftrace code block looks like this: | 37 | * The 64-bit enabled ftrace code block looks like this: |
33 | * stg %r14,8(%r15) # offset 0 | 38 | * larl %r0,.+24 # offset 0 |
34 | * > lg %r1,__LC_FTRACE_FUNC # offset 6 | 39 | * > lg %r1,__LC_FTRACE_FUNC # offset 6 |
35 | * > lgr %r0,%r0 # offset 12 | 40 | * br %r1 # offset 12 |
36 | * > basr %r14,%r1 # offset 16 | 41 | * brcl 0,0 # offset 14 |
37 | * lg %r14,8(%15) # offset 18 | 42 | * brc 0,0 # offset 20 |
38 | * The return points of the mcount/ftrace function have the same offset 18. | 43 | * The ftrace function gets called with a non-standard C function call ABI |
39 | * The 64-bit disable ftrace code block looks like this: | 44 | * where r0 contains the return address. It is also expected that the called |
40 | * stg %r14,8(%r15) # offset 0 | 45 | * function only clobbers r0 and r1, but restores r2-r15. |
46 | * The return point of the ftrace function has offset 24, so execution | ||
47 | * continues behind the mcount block. | ||
48 | * larl %r0,.+24 # offset 0 | ||
41 | * > jg .+18 # offset 6 | 49 | * > jg .+18 # offset 6 |
42 | * > lgr %r0,%r0 # offset 12 | 50 | * br %r1 # offset 12 |
43 | * > basr %r14,%r1 # offset 16 | 51 | * brcl 0,0 # offset 14 |
44 | * lg %r14,8(%15) # offset 18 | 52 | * brc 0,0 # offset 20 |
45 | * The jg instruction branches to offset 24 to skip as many instructions | 53 | * The jg instruction branches to offset 24 to skip as many instructions |
46 | * as possible. | 54 | * as possible. |
47 | */ | 55 | */ |
48 | asm( | 56 | asm( |
49 | " .align 4\n" | 57 | " .align 4\n" |
58 | "mcount_replace_code:\n" | ||
59 | " larl %r0,0f\n" | ||
50 | "ftrace_disable_code:\n" | 60 | "ftrace_disable_code:\n" |
51 | " jg 0f\n" | 61 | " jg 0f\n" |
52 | " lgr %r0,%r0\n" | 62 | " br %r1\n" |
53 | " basr %r14,%r1\n" | 63 | " brcl 0,0\n" |
64 | " brc 0,0\n" | ||
54 | "0:\n" | 65 | "0:\n" |
55 | " .align 4\n" | 66 | " .align 4\n" |
56 | "ftrace_enable_insn:\n" | 67 | "ftrace_enable_insn:\n" |
57 | " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); | 68 | " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); |
58 | 69 | ||
70 | #define MCOUNT_BLOCK_SIZE 24 | ||
71 | #define MCOUNT_INSN_OFFSET 6 | ||
59 | #define FTRACE_INSN_SIZE 6 | 72 | #define FTRACE_INSN_SIZE 6 |
60 | 73 | ||
61 | #else /* CONFIG_64BIT */ | 74 | #else /* CONFIG_64BIT */ |
@@ -116,6 +129,16 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |||
116 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | 129 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, |
117 | unsigned long addr) | 130 | unsigned long addr) |
118 | { | 131 | { |
132 | #ifdef CONFIG_64BIT | ||
133 | /* Initial replacement of the whole mcount block */ | ||
134 | if (addr == MCOUNT_ADDR) { | ||
135 | if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, | ||
136 | mcount_replace_code, | ||
137 | MCOUNT_BLOCK_SIZE)) | ||
138 | return -EPERM; | ||
139 | return 0; | ||
140 | } | ||
141 | #endif | ||
119 | if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, | 142 | if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, |
120 | MCOUNT_INSN_SIZE)) | 143 | MCOUNT_INSN_SIZE)) |
121 | return -EPERM; | 144 | return -EPERM; |