diff options
Diffstat (limited to 'arch/s390/kernel/ftrace.c')
-rw-r--r-- | arch/s390/kernel/ftrace.c | 238 |
1 files changed, 117 insertions, 121 deletions
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 6a83d0581317..78bdf0e5dff7 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright IBM Corp. 2009 | 4 | * Copyright IBM Corp. 2009 |
5 | * | 5 | * |
6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | 6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, |
7 | * | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/hardirq.h> | 10 | #include <linux/hardirq.h> |
@@ -12,176 +12,144 @@ | |||
12 | #include <linux/ftrace.h> | 12 | #include <linux/ftrace.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/kprobes.h> | ||
15 | #include <trace/syscall.h> | 16 | #include <trace/syscall.h> |
16 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
17 | 18 | ||
19 | #ifdef CONFIG_64BIT | ||
20 | #define MCOUNT_OFFSET_RET 12 | ||
21 | #else | ||
22 | #define MCOUNT_OFFSET_RET 22 | ||
23 | #endif | ||
24 | |||
18 | #ifdef CONFIG_DYNAMIC_FTRACE | 25 | #ifdef CONFIG_DYNAMIC_FTRACE |
19 | 26 | ||
20 | void ftrace_disable_code(void); | 27 | void ftrace_disable_code(void); |
21 | void ftrace_disable_return(void); | 28 | void ftrace_enable_insn(void); |
22 | void ftrace_call_code(void); | ||
23 | void ftrace_nop_code(void); | ||
24 | |||
25 | #define FTRACE_INSN_SIZE 4 | ||
26 | 29 | ||
27 | #ifdef CONFIG_64BIT | 30 | #ifdef CONFIG_64BIT |
28 | 31 | /* | |
32 | * The 64-bit mcount code looks like this: | ||
33 | * stg %r14,8(%r15) # offset 0 | ||
34 | * > larl %r1,<&counter> # offset 6 | ||
35 | * > brasl %r14,_mcount # offset 12 | ||
36 | * lg %r14,8(%r15) # offset 18 | ||
37 | * Total length is 24 bytes. The middle two instructions of the mcount | ||
38 | * block get overwritten by ftrace_make_nop / ftrace_make_call. | ||
39 | * The 64-bit enabled ftrace code block looks like this: | ||
40 | * stg %r14,8(%r15) # offset 0 | ||
41 | * > lg %r1,__LC_FTRACE_FUNC # offset 6 | ||
42 | * > lgr %r0,%r0 # offset 12 | ||
43 | * > basr %r14,%r1 # offset 16 | ||
44 | * lg %r14,8(%15) # offset 18 | ||
45 | * The return points of the mcount/ftrace function have the same offset 18. | ||
46 | * The 64-bit disable ftrace code block looks like this: | ||
47 | * stg %r14,8(%r15) # offset 0 | ||
48 | * > jg .+18 # offset 6 | ||
49 | * > lgr %r0,%r0 # offset 12 | ||
50 | * > basr %r14,%r1 # offset 16 | ||
51 | * lg %r14,8(%15) # offset 18 | ||
52 | * The jg instruction branches to offset 24 to skip as many instructions | ||
53 | * as possible. | ||
54 | */ | ||
29 | asm( | 55 | asm( |
30 | " .align 4\n" | 56 | " .align 4\n" |
31 | "ftrace_disable_code:\n" | 57 | "ftrace_disable_code:\n" |
32 | " j 0f\n" | 58 | " jg 0f\n" |
33 | " .word 0x0024\n" | ||
34 | " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" | ||
35 | " basr %r14,%r1\n" | ||
36 | "ftrace_disable_return:\n" | ||
37 | " lg %r14,8(15)\n" | ||
38 | " lgr %r0,%r0\n" | 59 | " lgr %r0,%r0\n" |
39 | "0:\n"); | 60 | " basr %r14,%r1\n" |
40 | 61 | "0:\n" | |
41 | asm( | ||
42 | " .align 4\n" | 62 | " .align 4\n" |
43 | "ftrace_nop_code:\n" | 63 | "ftrace_enable_insn:\n" |
44 | " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); | 64 | " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); |
45 | 65 | ||
46 | asm( | 66 | #define FTRACE_INSN_SIZE 6 |
47 | " .align 4\n" | ||
48 | "ftrace_call_code:\n" | ||
49 | " stg %r14,8(%r15)\n"); | ||
50 | 67 | ||
51 | #else /* CONFIG_64BIT */ | 68 | #else /* CONFIG_64BIT */ |
52 | 69 | /* | |
70 | * The 31-bit mcount code looks like this: | ||
71 | * st %r14,4(%r15) # offset 0 | ||
72 | * > bras %r1,0f # offset 4 | ||
73 | * > .long _mcount # offset 8 | ||
74 | * > .long <&counter> # offset 12 | ||
75 | * > 0: l %r14,0(%r1) # offset 16 | ||
76 | * > l %r1,4(%r1) # offset 20 | ||
77 | * basr %r14,%r14 # offset 24 | ||
78 | * l %r14,4(%r15) # offset 26 | ||
79 | * Total length is 30 bytes. The twenty bytes starting from offset 4 | ||
80 | * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call. | ||
81 | * The 31-bit enabled ftrace code block looks like this: | ||
82 | * st %r14,4(%r15) # offset 0 | ||
83 | * > l %r14,__LC_FTRACE_FUNC # offset 4 | ||
84 | * > j 0f # offset 8 | ||
85 | * > .fill 12,1,0x07 # offset 12 | ||
86 | * 0: basr %r14,%r14 # offset 24 | ||
87 | * l %r14,4(%r14) # offset 26 | ||
88 | * The return points of the mcount/ftrace function have the same offset 26. | ||
89 | * The 31-bit disabled ftrace code block looks like this: | ||
90 | * st %r14,4(%r15) # offset 0 | ||
91 | * > j .+26 # offset 4 | ||
92 | * > j 0f # offset 8 | ||
93 | * > .fill 12,1,0x07 # offset 12 | ||
94 | * 0: basr %r14,%r14 # offset 24 | ||
95 | * l %r14,4(%r14) # offset 26 | ||
96 | * The j instruction branches to offset 30 to skip as many instructions | ||
97 | * as possible. | ||
98 | */ | ||
53 | asm( | 99 | asm( |
54 | " .align 4\n" | 100 | " .align 4\n" |
55 | "ftrace_disable_code:\n" | 101 | "ftrace_disable_code:\n" |
102 | " j 1f\n" | ||
56 | " j 0f\n" | 103 | " j 0f\n" |
57 | " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" | 104 | " .fill 12,1,0x07\n" |
58 | " basr %r14,%r1\n" | 105 | "0: basr %r14,%r14\n" |
59 | "ftrace_disable_return:\n" | 106 | "1:\n" |
60 | " l %r14,4(%r15)\n" | ||
61 | " j 0f\n" | ||
62 | " bcr 0,%r7\n" | ||
63 | " bcr 0,%r7\n" | ||
64 | " bcr 0,%r7\n" | ||
65 | " bcr 0,%r7\n" | ||
66 | " bcr 0,%r7\n" | ||
67 | " bcr 0,%r7\n" | ||
68 | "0:\n"); | ||
69 | |||
70 | asm( | ||
71 | " .align 4\n" | 107 | " .align 4\n" |
72 | "ftrace_nop_code:\n" | 108 | "ftrace_enable_insn:\n" |
73 | " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); | 109 | " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n"); |
74 | 110 | ||
75 | asm( | 111 | #define FTRACE_INSN_SIZE 4 |
76 | " .align 4\n" | ||
77 | "ftrace_call_code:\n" | ||
78 | " st %r14,4(%r15)\n"); | ||
79 | 112 | ||
80 | #endif /* CONFIG_64BIT */ | 113 | #endif /* CONFIG_64BIT */ |
81 | 114 | ||
82 | static int ftrace_modify_code(unsigned long ip, | ||
83 | void *old_code, int old_size, | ||
84 | void *new_code, int new_size) | ||
85 | { | ||
86 | unsigned char replaced[MCOUNT_INSN_SIZE]; | ||
87 | |||
88 | /* | ||
89 | * Note: Due to modules code can disappear and change. | ||
90 | * We need to protect against faulting as well as code | ||
91 | * changing. We do this by using the probe_kernel_* | ||
92 | * functions. | ||
93 | * This however is just a simple sanity check. | ||
94 | */ | ||
95 | if (probe_kernel_read(replaced, (void *)ip, old_size)) | ||
96 | return -EFAULT; | ||
97 | if (memcmp(replaced, old_code, old_size) != 0) | ||
98 | return -EINVAL; | ||
99 | if (probe_kernel_write((void *)ip, new_code, new_size)) | ||
100 | return -EPERM; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec, | ||
105 | unsigned long addr) | ||
106 | { | ||
107 | return ftrace_modify_code(rec->ip, | ||
108 | ftrace_call_code, FTRACE_INSN_SIZE, | ||
109 | ftrace_disable_code, MCOUNT_INSN_SIZE); | ||
110 | } | ||
111 | 115 | ||
112 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | 116 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, |
113 | unsigned long addr) | 117 | unsigned long addr) |
114 | { | 118 | { |
115 | if (addr == MCOUNT_ADDR) | 119 | if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, |
116 | return ftrace_make_initial_nop(mod, rec, addr); | 120 | MCOUNT_INSN_SIZE)) |
117 | return ftrace_modify_code(rec->ip, | 121 | return -EPERM; |
118 | ftrace_call_code, FTRACE_INSN_SIZE, | 122 | return 0; |
119 | ftrace_nop_code, FTRACE_INSN_SIZE); | ||
120 | } | 123 | } |
121 | 124 | ||
122 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 125 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
123 | { | 126 | { |
124 | return ftrace_modify_code(rec->ip, | 127 | if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, |
125 | ftrace_nop_code, FTRACE_INSN_SIZE, | 128 | FTRACE_INSN_SIZE)) |
126 | ftrace_call_code, FTRACE_INSN_SIZE); | 129 | return -EPERM; |
130 | return 0; | ||
127 | } | 131 | } |
128 | 132 | ||
129 | int ftrace_update_ftrace_func(ftrace_func_t func) | 133 | int ftrace_update_ftrace_func(ftrace_func_t func) |
130 | { | 134 | { |
131 | ftrace_dyn_func = (unsigned long)func; | ||
132 | return 0; | 135 | return 0; |
133 | } | 136 | } |
134 | 137 | ||
135 | int __init ftrace_dyn_arch_init(void *data) | 138 | int __init ftrace_dyn_arch_init(void *data) |
136 | { | 139 | { |
137 | *(unsigned long *)data = 0; | 140 | *(unsigned long *) data = 0; |
138 | return 0; | 141 | return 0; |
139 | } | 142 | } |
140 | 143 | ||
141 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 144 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
142 | 145 | ||
143 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 146 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
144 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
145 | /* | ||
146 | * Patch the kernel code at ftrace_graph_caller location: | ||
147 | * The instruction there is branch relative on condition. The condition mask | ||
148 | * is either all ones (always branch aka disable ftrace_graph_caller) or all | ||
149 | * zeroes (nop aka enable ftrace_graph_caller). | ||
150 | * Instruction format for brc is a7m4xxxx where m is the condition mask. | ||
151 | */ | ||
152 | int ftrace_enable_ftrace_graph_caller(void) | ||
153 | { | ||
154 | unsigned short opcode = 0xa704; | ||
155 | |||
156 | return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); | ||
157 | } | ||
158 | |||
159 | int ftrace_disable_ftrace_graph_caller(void) | ||
160 | { | ||
161 | unsigned short opcode = 0xa7f4; | ||
162 | |||
163 | return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); | ||
164 | } | ||
165 | |||
166 | static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) | ||
167 | { | ||
168 | return addr - (ftrace_disable_return - ftrace_disable_code); | ||
169 | } | ||
170 | |||
171 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
172 | |||
173 | static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) | ||
174 | { | ||
175 | return addr - MCOUNT_OFFSET_RET; | ||
176 | } | ||
177 | |||
178 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
179 | |||
180 | /* | 147 | /* |
181 | * Hook the return address and push it in the stack of return addresses | 148 | * Hook the return address and push it in the stack of return addresses |
182 | * in current thread info. | 149 | * in current thread info. |
183 | */ | 150 | */ |
184 | unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) | 151 | unsigned long __kprobes prepare_ftrace_return(unsigned long parent, |
152 | unsigned long ip) | ||
185 | { | 153 | { |
186 | struct ftrace_graph_ent trace; | 154 | struct ftrace_graph_ent trace; |
187 | 155 | ||
@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) | |||
189 | goto out; | 157 | goto out; |
190 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) | 158 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) |
191 | goto out; | 159 | goto out; |
192 | trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; | 160 | trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; |
193 | /* Only trace if the calling function expects to. */ | 161 | /* Only trace if the calling function expects to. */ |
194 | if (!ftrace_graph_entry(&trace)) { | 162 | if (!ftrace_graph_entry(&trace)) { |
195 | current->curr_ret_stack--; | 163 | current->curr_ret_stack--; |
196 | goto out; | 164 | goto out; |
197 | } | 165 | } |
198 | parent = (unsigned long)return_to_handler; | 166 | parent = (unsigned long) return_to_handler; |
199 | out: | 167 | out: |
200 | return parent; | 168 | return parent; |
201 | } | 169 | } |
170 | |||
171 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
172 | /* | ||
173 | * Patch the kernel code at ftrace_graph_caller location. The instruction | ||
174 | * there is branch relative and save to prepare_ftrace_return. To disable | ||
175 | * the call to prepare_ftrace_return we patch the bras offset to point | ||
176 | * directly after the instructions. To enable the call we calculate | ||
177 | * the original offset to prepare_ftrace_return and put it back. | ||
178 | */ | ||
179 | int ftrace_enable_ftrace_graph_caller(void) | ||
180 | { | ||
181 | unsigned short offset; | ||
182 | |||
183 | offset = ((void *) prepare_ftrace_return - | ||
184 | (void *) ftrace_graph_caller) / 2; | ||
185 | return probe_kernel_write(ftrace_graph_caller + 2, | ||
186 | &offset, sizeof(offset)); | ||
187 | } | ||
188 | |||
189 | int ftrace_disable_ftrace_graph_caller(void) | ||
190 | { | ||
191 | static unsigned short offset = 0x0002; | ||
192 | |||
193 | return probe_kernel_write(ftrace_graph_caller + 2, | ||
194 | &offset, sizeof(offset)); | ||
195 | } | ||
196 | |||
197 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
202 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 198 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |