diff options
Diffstat (limited to 'arch/s390/kernel/ftrace.c')
-rw-r--r-- | arch/s390/kernel/ftrace.c | 260 |
1 files changed, 260 insertions, 0 deletions
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c new file mode 100644 index 000000000000..82ddfd3a75af --- /dev/null +++ b/arch/s390/kernel/ftrace.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | * Dynamic function tracer architecture backend. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/hardirq.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/ftrace.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <trace/syscall.h> | ||
16 | #include <asm/lowcore.h> | ||
17 | |||
18 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
19 | |||
20 | void ftrace_disable_code(void); | ||
21 | void ftrace_disable_return(void); | ||
22 | void ftrace_call_code(void); | ||
23 | void ftrace_nop_code(void); | ||
24 | |||
25 | #define FTRACE_INSN_SIZE 4 | ||
26 | |||
27 | #ifdef CONFIG_64BIT | ||
28 | |||
29 | asm( | ||
30 | " .align 4\n" | ||
31 | "ftrace_disable_code:\n" | ||
32 | " j 0f\n" | ||
33 | " .word 0x0024\n" | ||
34 | " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" | ||
35 | " basr %r14,%r1\n" | ||
36 | "ftrace_disable_return:\n" | ||
37 | " lg %r14,8(15)\n" | ||
38 | " lgr %r0,%r0\n" | ||
39 | "0:\n"); | ||
40 | |||
41 | asm( | ||
42 | " .align 4\n" | ||
43 | "ftrace_nop_code:\n" | ||
44 | " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); | ||
45 | |||
46 | asm( | ||
47 | " .align 4\n" | ||
48 | "ftrace_call_code:\n" | ||
49 | " stg %r14,8(%r15)\n"); | ||
50 | |||
51 | #else /* CONFIG_64BIT */ | ||
52 | |||
53 | asm( | ||
54 | " .align 4\n" | ||
55 | "ftrace_disable_code:\n" | ||
56 | " j 0f\n" | ||
57 | " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" | ||
58 | " basr %r14,%r1\n" | ||
59 | "ftrace_disable_return:\n" | ||
60 | " l %r14,4(%r15)\n" | ||
61 | " j 0f\n" | ||
62 | " bcr 0,%r7\n" | ||
63 | " bcr 0,%r7\n" | ||
64 | " bcr 0,%r7\n" | ||
65 | " bcr 0,%r7\n" | ||
66 | " bcr 0,%r7\n" | ||
67 | " bcr 0,%r7\n" | ||
68 | "0:\n"); | ||
69 | |||
70 | asm( | ||
71 | " .align 4\n" | ||
72 | "ftrace_nop_code:\n" | ||
73 | " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); | ||
74 | |||
75 | asm( | ||
76 | " .align 4\n" | ||
77 | "ftrace_call_code:\n" | ||
78 | " st %r14,4(%r15)\n"); | ||
79 | |||
80 | #endif /* CONFIG_64BIT */ | ||
81 | |||
82 | static int ftrace_modify_code(unsigned long ip, | ||
83 | void *old_code, int old_size, | ||
84 | void *new_code, int new_size) | ||
85 | { | ||
86 | unsigned char replaced[MCOUNT_INSN_SIZE]; | ||
87 | |||
88 | /* | ||
89 | * Note: Due to modules code can disappear and change. | ||
90 | * We need to protect against faulting as well as code | ||
91 | * changing. We do this by using the probe_kernel_* | ||
92 | * functions. | ||
93 | * This however is just a simple sanity check. | ||
94 | */ | ||
95 | if (probe_kernel_read(replaced, (void *)ip, old_size)) | ||
96 | return -EFAULT; | ||
97 | if (memcmp(replaced, old_code, old_size) != 0) | ||
98 | return -EINVAL; | ||
99 | if (probe_kernel_write((void *)ip, new_code, new_size)) | ||
100 | return -EPERM; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec, | ||
105 | unsigned long addr) | ||
106 | { | ||
107 | return ftrace_modify_code(rec->ip, | ||
108 | ftrace_call_code, FTRACE_INSN_SIZE, | ||
109 | ftrace_disable_code, MCOUNT_INSN_SIZE); | ||
110 | } | ||
111 | |||
112 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | ||
113 | unsigned long addr) | ||
114 | { | ||
115 | if (addr == MCOUNT_ADDR) | ||
116 | return ftrace_make_initial_nop(mod, rec, addr); | ||
117 | return ftrace_modify_code(rec->ip, | ||
118 | ftrace_call_code, FTRACE_INSN_SIZE, | ||
119 | ftrace_nop_code, FTRACE_INSN_SIZE); | ||
120 | } | ||
121 | |||
122 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
123 | { | ||
124 | return ftrace_modify_code(rec->ip, | ||
125 | ftrace_nop_code, FTRACE_INSN_SIZE, | ||
126 | ftrace_call_code, FTRACE_INSN_SIZE); | ||
127 | } | ||
128 | |||
129 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
130 | { | ||
131 | ftrace_dyn_func = (unsigned long)func; | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | int __init ftrace_dyn_arch_init(void *data) | ||
136 | { | ||
137 | *(unsigned long *)data = 0; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
142 | |||
143 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
144 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
145 | /* | ||
146 | * Patch the kernel code at ftrace_graph_caller location: | ||
147 | * The instruction there is branch relative on condition. The condition mask | ||
148 | * is either all ones (always branch aka disable ftrace_graph_caller) or all | ||
149 | * zeroes (nop aka enable ftrace_graph_caller). | ||
150 | * Instruction format for brc is a7m4xxxx where m is the condition mask. | ||
151 | */ | ||
152 | int ftrace_enable_ftrace_graph_caller(void) | ||
153 | { | ||
154 | unsigned short opcode = 0xa704; | ||
155 | |||
156 | return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); | ||
157 | } | ||
158 | |||
159 | int ftrace_disable_ftrace_graph_caller(void) | ||
160 | { | ||
161 | unsigned short opcode = 0xa7f4; | ||
162 | |||
163 | return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); | ||
164 | } | ||
165 | |||
166 | static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) | ||
167 | { | ||
168 | return addr - (ftrace_disable_return - ftrace_disable_code); | ||
169 | } | ||
170 | |||
171 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
172 | |||
173 | static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) | ||
174 | { | ||
175 | return addr - MCOUNT_OFFSET_RET; | ||
176 | } | ||
177 | |||
178 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
179 | |||
180 | /* | ||
181 | * Hook the return address and push it in the stack of return addresses | ||
182 | * in current thread info. | ||
183 | */ | ||
184 | unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) | ||
185 | { | ||
186 | struct ftrace_graph_ent trace; | ||
187 | |||
188 | /* Nmi's are currently unsupported. */ | ||
189 | if (unlikely(in_nmi())) | ||
190 | goto out; | ||
191 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
192 | goto out; | ||
193 | if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) | ||
194 | goto out; | ||
195 | trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; | ||
196 | /* Only trace if the calling function expects to. */ | ||
197 | if (!ftrace_graph_entry(&trace)) { | ||
198 | current->curr_ret_stack--; | ||
199 | goto out; | ||
200 | } | ||
201 | parent = (unsigned long)return_to_handler; | ||
202 | out: | ||
203 | return parent; | ||
204 | } | ||
205 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
206 | |||
207 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
208 | |||
209 | extern unsigned long __start_syscalls_metadata[]; | ||
210 | extern unsigned long __stop_syscalls_metadata[]; | ||
211 | extern unsigned int sys_call_table[]; | ||
212 | |||
213 | static struct syscall_metadata **syscalls_metadata; | ||
214 | |||
215 | struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
216 | { | ||
217 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) | ||
218 | return NULL; | ||
219 | |||
220 | return syscalls_metadata[nr]; | ||
221 | } | ||
222 | |||
223 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | ||
224 | { | ||
225 | struct syscall_metadata *start; | ||
226 | struct syscall_metadata *stop; | ||
227 | char str[KSYM_SYMBOL_LEN]; | ||
228 | |||
229 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
230 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
231 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); | ||
232 | |||
233 | for ( ; start < stop; start++) { | ||
234 | if (start->name && !strcmp(start->name + 3, str + 3)) | ||
235 | return start; | ||
236 | } | ||
237 | return NULL; | ||
238 | } | ||
239 | |||
240 | void arch_init_ftrace_syscalls(void) | ||
241 | { | ||
242 | struct syscall_metadata *meta; | ||
243 | int i; | ||
244 | static atomic_t refs; | ||
245 | |||
246 | if (atomic_inc_return(&refs) != 1) | ||
247 | goto out; | ||
248 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, | ||
249 | GFP_KERNEL); | ||
250 | if (!syscalls_metadata) | ||
251 | goto out; | ||
252 | for (i = 0; i < NR_syscalls; i++) { | ||
253 | meta = find_syscall_meta((unsigned long)sys_call_table[i]); | ||
254 | syscalls_metadata[i] = meta; | ||
255 | } | ||
256 | return; | ||
257 | out: | ||
258 | atomic_dec(&refs); | ||
259 | } | ||
260 | #endif | ||