diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/sh/kernel/ftrace.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/sh/kernel/ftrace.c')
-rw-r--r-- | arch/sh/kernel/ftrace.c | 227 |
1 files changed, 145 insertions, 82 deletions
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 2c48e267256e..30e13196d35b 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
@@ -62,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
62 | return ftrace_replaced_code; | 62 | return ftrace_replaced_code; |
63 | } | 63 | } |
64 | 64 | ||
65 | /* | ||
66 | * Modifying code must take extra care. On an SMP machine, if | ||
67 | * the code being modified is also being executed on another CPU | ||
68 | * that CPU will have undefined results and possibly take a GPF. | ||
69 | * We use kstop_machine to stop other CPUS from exectuing code. | ||
70 | * But this does not stop NMIs from happening. We still need | ||
71 | * to protect against that. We separate out the modification of | ||
72 | * the code to take care of this. | ||
73 | * | ||
74 | * Two buffers are added: An IP buffer and a "code" buffer. | ||
75 | * | ||
76 | * 1) Put the instruction pointer into the IP buffer | ||
77 | * and the new code into the "code" buffer. | ||
78 | * 2) Wait for any running NMIs to finish and set a flag that says | ||
79 | * we are modifying code, it is done in an atomic operation. | ||
80 | * 3) Write the code | ||
81 | * 4) clear the flag. | ||
82 | * 5) Wait for any running NMIs to finish. | ||
83 | * | ||
84 | * If an NMI is executed, the first thing it does is to call | ||
85 | * "ftrace_nmi_enter". This will check if the flag is set to write | ||
86 | * and if it is, it will write what is in the IP and "code" buffers. | ||
87 | * | ||
88 | * The trick is, it does not matter if everyone is writing the same | ||
89 | * content to the code location. Also, if a CPU is executing code | ||
90 | * it is OK to write to that code location if the contents being written | ||
91 | * are the same as what exists. | ||
92 | */ | ||
93 | #define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */ | ||
94 | static atomic_t nmi_running = ATOMIC_INIT(0); | ||
95 | static int mod_code_status; /* holds return value of text write */ | ||
96 | static void *mod_code_ip; /* holds the IP to write to */ | ||
97 | static void *mod_code_newcode; /* holds the text to write to the IP */ | ||
98 | |||
99 | static unsigned nmi_wait_count; | ||
100 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | ||
101 | |||
102 | int ftrace_arch_read_dyn_info(char *buf, int size) | ||
103 | { | ||
104 | int r; | ||
105 | |||
106 | r = snprintf(buf, size, "%u %u", | ||
107 | nmi_wait_count, | ||
108 | atomic_read(&nmi_update_count)); | ||
109 | return r; | ||
110 | } | ||
111 | |||
112 | static void clear_mod_flag(void) | ||
113 | { | ||
114 | int old = atomic_read(&nmi_running); | ||
115 | |||
116 | for (;;) { | ||
117 | int new = old & ~MOD_CODE_WRITE_FLAG; | ||
118 | |||
119 | if (old == new) | ||
120 | break; | ||
121 | |||
122 | old = atomic_cmpxchg(&nmi_running, old, new); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static void ftrace_mod_code(void) | ||
127 | { | ||
128 | /* | ||
129 | * Yes, more than one CPU process can be writing to mod_code_status. | ||
130 | * (and the code itself) | ||
131 | * But if one were to fail, then they all should, and if one were | ||
132 | * to succeed, then they all should. | ||
133 | */ | ||
134 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | ||
135 | MCOUNT_INSN_SIZE); | ||
136 | |||
137 | /* if we fail, then kill any new writers */ | ||
138 | if (mod_code_status) | ||
139 | clear_mod_flag(); | ||
140 | } | ||
141 | |||
142 | void ftrace_nmi_enter(void) | ||
143 | { | ||
144 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { | ||
145 | smp_rmb(); | ||
146 | ftrace_mod_code(); | ||
147 | atomic_inc(&nmi_update_count); | ||
148 | } | ||
149 | /* Must have previous changes seen before executions */ | ||
150 | smp_mb(); | ||
151 | } | ||
152 | |||
153 | void ftrace_nmi_exit(void) | ||
154 | { | ||
155 | /* Finish all executions before clearing nmi_running */ | ||
156 | smp_mb(); | ||
157 | atomic_dec(&nmi_running); | ||
158 | } | ||
159 | |||
160 | static void wait_for_nmi_and_set_mod_flag(void) | ||
161 | { | ||
162 | if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) | ||
163 | return; | ||
164 | |||
165 | do { | ||
166 | cpu_relax(); | ||
167 | } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); | ||
168 | |||
169 | nmi_wait_count++; | ||
170 | } | ||
171 | |||
172 | static void wait_for_nmi(void) | ||
173 | { | ||
174 | if (!atomic_read(&nmi_running)) | ||
175 | return; | ||
176 | |||
177 | do { | ||
178 | cpu_relax(); | ||
179 | } while (atomic_read(&nmi_running)); | ||
180 | |||
181 | nmi_wait_count++; | ||
182 | } | ||
183 | |||
184 | static int | ||
185 | do_ftrace_mod_code(unsigned long ip, void *new_code) | ||
186 | { | ||
187 | mod_code_ip = (void *)ip; | ||
188 | mod_code_newcode = new_code; | ||
189 | |||
190 | /* The buffers need to be visible before we let NMIs write them */ | ||
191 | smp_mb(); | ||
192 | |||
193 | wait_for_nmi_and_set_mod_flag(); | ||
194 | |||
195 | /* Make sure all running NMIs have finished before we write the code */ | ||
196 | smp_mb(); | ||
197 | |||
198 | ftrace_mod_code(); | ||
199 | |||
200 | /* Make sure the write happens before clearing the bit */ | ||
201 | smp_mb(); | ||
202 | |||
203 | clear_mod_flag(); | ||
204 | wait_for_nmi(); | ||
205 | |||
206 | return mod_code_status; | ||
207 | } | ||
208 | |||
65 | static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 209 | static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
66 | unsigned char *new_code) | 210 | unsigned char *new_code) |
67 | { | 211 | { |
@@ -86,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
86 | return -EINVAL; | 230 | return -EINVAL; |
87 | 231 | ||
88 | /* replace the text with the new text */ | 232 | /* replace the text with the new text */ |
89 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) | 233 | if (do_ftrace_mod_code(ip, new_code)) |
90 | return -EPERM; | 234 | return -EPERM; |
91 | 235 | ||
92 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); | 236 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); |
@@ -255,84 +399,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
255 | } | 399 | } |
256 | } | 400 | } |
257 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 401 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
258 | |||
259 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
260 | |||
261 | extern unsigned long __start_syscalls_metadata[]; | ||
262 | extern unsigned long __stop_syscalls_metadata[]; | ||
263 | extern unsigned long *sys_call_table; | ||
264 | |||
265 | static struct syscall_metadata **syscalls_metadata; | ||
266 | |||
267 | static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | ||
268 | { | ||
269 | struct syscall_metadata *start; | ||
270 | struct syscall_metadata *stop; | ||
271 | char str[KSYM_SYMBOL_LEN]; | ||
272 | |||
273 | |||
274 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
275 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
276 | kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); | ||
277 | |||
278 | for ( ; start < stop; start++) { | ||
279 | if (start->name && !strcmp(start->name, str)) | ||
280 | return start; | ||
281 | } | ||
282 | |||
283 | return NULL; | ||
284 | } | ||
285 | |||
286 | struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
287 | { | ||
288 | if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) | ||
289 | return NULL; | ||
290 | |||
291 | return syscalls_metadata[nr]; | ||
292 | } | ||
293 | |||
294 | int syscall_name_to_nr(char *name) | ||
295 | { | ||
296 | int i; | ||
297 | |||
298 | if (!syscalls_metadata) | ||
299 | return -1; | ||
300 | for (i = 0; i < NR_syscalls; i++) | ||
301 | if (syscalls_metadata[i]) | ||
302 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
303 | return i; | ||
304 | return -1; | ||
305 | } | ||
306 | |||
307 | void set_syscall_enter_id(int num, int id) | ||
308 | { | ||
309 | syscalls_metadata[num]->enter_id = id; | ||
310 | } | ||
311 | |||
312 | void set_syscall_exit_id(int num, int id) | ||
313 | { | ||
314 | syscalls_metadata[num]->exit_id = id; | ||
315 | } | ||
316 | |||
317 | static int __init arch_init_ftrace_syscalls(void) | ||
318 | { | ||
319 | int i; | ||
320 | struct syscall_metadata *meta; | ||
321 | unsigned long **psys_syscall_table = &sys_call_table; | ||
322 | |||
323 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | ||
324 | FTRACE_SYSCALL_MAX, GFP_KERNEL); | ||
325 | if (!syscalls_metadata) { | ||
326 | WARN_ON(1); | ||
327 | return -ENOMEM; | ||
328 | } | ||
329 | |||
330 | for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { | ||
331 | meta = find_syscall_meta(psys_syscall_table[i]); | ||
332 | syscalls_metadata[i] = meta; | ||
333 | } | ||
334 | |||
335 | return 0; | ||
336 | } | ||
337 | arch_initcall(arch_init_ftrace_syscalls); | ||
338 | #endif /* CONFIG_FTRACE_SYSCALLS */ | ||