diff options
Diffstat (limited to 'arch/sparc64/kernel/kprobes.c')
-rw-r--r-- | arch/sparc64/kernel/kprobes.c | 593 |
1 files changed, 0 insertions, 593 deletions
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c deleted file mode 100644 index 201a6e547e4a..000000000000 --- a/arch/sparc64/kernel/kprobes.c +++ /dev/null | |||
@@ -1,593 +0,0 @@ | |||
1 | /* arch/sparc64/kernel/kprobes.c | ||
2 | * | ||
3 | * Copyright (C) 2004 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/kprobes.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/kdebug.h> | ||
10 | #include <asm/signal.h> | ||
11 | #include <asm/cacheflush.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | |||
14 | /* We do not have hardware single-stepping on sparc64. | ||
15 | * So we implement software single-stepping with breakpoint | ||
16 | * traps. The top-level scheme is similar to that used | ||
17 | * in the x86 kprobes implementation. | ||
18 | * | ||
19 | * In the kprobe->ainsn.insn[] array we store the original | ||
20 | * instruction at index zero and a break instruction at | ||
21 | * index one. | ||
22 | * | ||
23 | * When we hit a kprobe we: | ||
24 | * - Run the pre-handler | ||
25 | * - Remember "regs->tnpc" and interrupt level stored in | ||
26 | * "regs->tstate" so we can restore them later | ||
27 | * - Disable PIL interrupts | ||
28 | * - Set regs->tpc to point to kprobe->ainsn.insn[0] | ||
29 | * - Set regs->tnpc to point to kprobe->ainsn.insn[1] | ||
30 | * - Mark that we are actively in a kprobe | ||
31 | * | ||
32 | * At this point we wait for the second breakpoint at | ||
33 | * kprobe->ainsn.insn[1] to hit. When it does we: | ||
34 | * - Run the post-handler | ||
35 | * - Set regs->tpc to "remembered" regs->tnpc stored above, | ||
36 | * restore the PIL interrupt level in "regs->tstate" as well | ||
37 | * - Make any adjustments necessary to regs->tnpc in order | ||
38 | * to handle relative branches correctly. See below. | ||
39 | * - Mark that we are no longer actively in a kprobe. | ||
40 | */ | ||
41 | |||
42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | ||
43 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | ||
44 | |||
45 | struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; | ||
46 | |||
47 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | ||
48 | { | ||
49 | p->ainsn.insn[0] = *p->addr; | ||
50 | flushi(&p->ainsn.insn[0]); | ||
51 | |||
52 | p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; | ||
53 | flushi(&p->ainsn.insn[1]); | ||
54 | |||
55 | p->opcode = *p->addr; | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | void __kprobes arch_arm_kprobe(struct kprobe *p) | ||
60 | { | ||
61 | *p->addr = BREAKPOINT_INSTRUCTION; | ||
62 | flushi(p->addr); | ||
63 | } | ||
64 | |||
65 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | ||
66 | { | ||
67 | *p->addr = p->opcode; | ||
68 | flushi(p->addr); | ||
69 | } | ||
70 | |||
71 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
72 | { | ||
73 | kcb->prev_kprobe.kp = kprobe_running(); | ||
74 | kcb->prev_kprobe.status = kcb->kprobe_status; | ||
75 | kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc; | ||
76 | kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil; | ||
77 | } | ||
78 | |||
79 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
80 | { | ||
81 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | ||
82 | kcb->kprobe_status = kcb->prev_kprobe.status; | ||
83 | kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; | ||
84 | kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; | ||
85 | } | ||
86 | |||
87 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | ||
88 | struct kprobe_ctlblk *kcb) | ||
89 | { | ||
90 | __get_cpu_var(current_kprobe) = p; | ||
91 | kcb->kprobe_orig_tnpc = regs->tnpc; | ||
92 | kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); | ||
93 | } | ||
94 | |||
95 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
96 | struct kprobe_ctlblk *kcb) | ||
97 | { | ||
98 | regs->tstate |= TSTATE_PIL; | ||
99 | |||
100 | /*single step inline, if it a breakpoint instruction*/ | ||
101 | if (p->opcode == BREAKPOINT_INSTRUCTION) { | ||
102 | regs->tpc = (unsigned long) p->addr; | ||
103 | regs->tnpc = kcb->kprobe_orig_tnpc; | ||
104 | } else { | ||
105 | regs->tpc = (unsigned long) &p->ainsn.insn[0]; | ||
106 | regs->tnpc = (unsigned long) &p->ainsn.insn[1]; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static int __kprobes kprobe_handler(struct pt_regs *regs) | ||
111 | { | ||
112 | struct kprobe *p; | ||
113 | void *addr = (void *) regs->tpc; | ||
114 | int ret = 0; | ||
115 | struct kprobe_ctlblk *kcb; | ||
116 | |||
117 | /* | ||
118 | * We don't want to be preempted for the entire | ||
119 | * duration of kprobe processing | ||
120 | */ | ||
121 | preempt_disable(); | ||
122 | kcb = get_kprobe_ctlblk(); | ||
123 | |||
124 | if (kprobe_running()) { | ||
125 | p = get_kprobe(addr); | ||
126 | if (p) { | ||
127 | if (kcb->kprobe_status == KPROBE_HIT_SS) { | ||
128 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | ||
129 | kcb->kprobe_orig_tstate_pil); | ||
130 | goto no_kprobe; | ||
131 | } | ||
132 | /* We have reentered the kprobe_handler(), since | ||
133 | * another probe was hit while within the handler. | ||
134 | * We here save the original kprobes variables and | ||
135 | * just single step on the instruction of the new probe | ||
136 | * without calling any user handlers. | ||
137 | */ | ||
138 | save_previous_kprobe(kcb); | ||
139 | set_current_kprobe(p, regs, kcb); | ||
140 | kprobes_inc_nmissed_count(p); | ||
141 | kcb->kprobe_status = KPROBE_REENTER; | ||
142 | prepare_singlestep(p, regs, kcb); | ||
143 | return 1; | ||
144 | } else { | ||
145 | if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { | ||
146 | /* The breakpoint instruction was removed by | ||
147 | * another cpu right after we hit, no further | ||
148 | * handling of this interrupt is appropriate | ||
149 | */ | ||
150 | ret = 1; | ||
151 | goto no_kprobe; | ||
152 | } | ||
153 | p = __get_cpu_var(current_kprobe); | ||
154 | if (p->break_handler && p->break_handler(p, regs)) | ||
155 | goto ss_probe; | ||
156 | } | ||
157 | goto no_kprobe; | ||
158 | } | ||
159 | |||
160 | p = get_kprobe(addr); | ||
161 | if (!p) { | ||
162 | if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { | ||
163 | /* | ||
164 | * The breakpoint instruction was removed right | ||
165 | * after we hit it. Another cpu has removed | ||
166 | * either a probepoint or a debugger breakpoint | ||
167 | * at this address. In either case, no further | ||
168 | * handling of this interrupt is appropriate. | ||
169 | */ | ||
170 | ret = 1; | ||
171 | } | ||
172 | /* Not one of ours: let kernel handle it */ | ||
173 | goto no_kprobe; | ||
174 | } | ||
175 | |||
176 | set_current_kprobe(p, regs, kcb); | ||
177 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
178 | if (p->pre_handler && p->pre_handler(p, regs)) | ||
179 | return 1; | ||
180 | |||
181 | ss_probe: | ||
182 | prepare_singlestep(p, regs, kcb); | ||
183 | kcb->kprobe_status = KPROBE_HIT_SS; | ||
184 | return 1; | ||
185 | |||
186 | no_kprobe: | ||
187 | preempt_enable_no_resched(); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | /* If INSN is a relative control transfer instruction, | ||
192 | * return the corrected branch destination value. | ||
193 | * | ||
194 | * regs->tpc and regs->tnpc still hold the values of the | ||
195 | * program counters at the time of trap due to the execution | ||
196 | * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1] | ||
197 | * | ||
198 | */ | ||
199 | static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p, | ||
200 | struct pt_regs *regs) | ||
201 | { | ||
202 | unsigned long real_pc = (unsigned long) p->addr; | ||
203 | |||
204 | /* Branch not taken, no mods necessary. */ | ||
205 | if (regs->tnpc == regs->tpc + 0x4UL) | ||
206 | return real_pc + 0x8UL; | ||
207 | |||
208 | /* The three cases are call, branch w/prediction, | ||
209 | * and traditional branch. | ||
210 | */ | ||
211 | if ((insn & 0xc0000000) == 0x40000000 || | ||
212 | (insn & 0xc1c00000) == 0x00400000 || | ||
213 | (insn & 0xc1c00000) == 0x00800000) { | ||
214 | unsigned long ainsn_addr; | ||
215 | |||
216 | ainsn_addr = (unsigned long) &p->ainsn.insn[0]; | ||
217 | |||
218 | /* The instruction did all the work for us | ||
219 | * already, just apply the offset to the correct | ||
220 | * instruction location. | ||
221 | */ | ||
222 | return (real_pc + (regs->tnpc - ainsn_addr)); | ||
223 | } | ||
224 | |||
225 | /* It is jmpl or some other absolute PC modification instruction, | ||
226 | * leave NPC as-is. | ||
227 | */ | ||
228 | return regs->tnpc; | ||
229 | } | ||
230 | |||
231 | /* If INSN is an instruction which writes it's PC location | ||
232 | * into a destination register, fix that up. | ||
233 | */ | ||
234 | static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn, | ||
235 | unsigned long real_pc) | ||
236 | { | ||
237 | unsigned long *slot = NULL; | ||
238 | |||
239 | /* Simplest case is 'call', which always uses %o7 */ | ||
240 | if ((insn & 0xc0000000) == 0x40000000) { | ||
241 | slot = ®s->u_regs[UREG_I7]; | ||
242 | } | ||
243 | |||
244 | /* 'jmpl' encodes the register inside of the opcode */ | ||
245 | if ((insn & 0xc1f80000) == 0x81c00000) { | ||
246 | unsigned long rd = ((insn >> 25) & 0x1f); | ||
247 | |||
248 | if (rd <= 15) { | ||
249 | slot = ®s->u_regs[rd]; | ||
250 | } else { | ||
251 | /* Hard case, it goes onto the stack. */ | ||
252 | flushw_all(); | ||
253 | |||
254 | rd -= 16; | ||
255 | slot = (unsigned long *) | ||
256 | (regs->u_regs[UREG_FP] + STACK_BIAS); | ||
257 | slot += rd; | ||
258 | } | ||
259 | } | ||
260 | if (slot != NULL) | ||
261 | *slot = real_pc; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * Called after single-stepping. p->addr is the address of the | ||
266 | * instruction which has been replaced by the breakpoint | ||
267 | * instruction. To avoid the SMP problems that can occur when we | ||
268 | * temporarily put back the original opcode to single-step, we | ||
269 | * single-stepped a copy of the instruction. The address of this | ||
270 | * copy is &p->ainsn.insn[0]. | ||
271 | * | ||
272 | * This function prepares to return from the post-single-step | ||
273 | * breakpoint trap. | ||
274 | */ | ||
275 | static void __kprobes resume_execution(struct kprobe *p, | ||
276 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | ||
277 | { | ||
278 | u32 insn = p->ainsn.insn[0]; | ||
279 | |||
280 | regs->tnpc = relbranch_fixup(insn, p, regs); | ||
281 | |||
282 | /* This assignment must occur after relbranch_fixup() */ | ||
283 | regs->tpc = kcb->kprobe_orig_tnpc; | ||
284 | |||
285 | retpc_fixup(regs, insn, (unsigned long) p->addr); | ||
286 | |||
287 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | ||
288 | kcb->kprobe_orig_tstate_pil); | ||
289 | } | ||
290 | |||
291 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) | ||
292 | { | ||
293 | struct kprobe *cur = kprobe_running(); | ||
294 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
295 | |||
296 | if (!cur) | ||
297 | return 0; | ||
298 | |||
299 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { | ||
300 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
301 | cur->post_handler(cur, regs, 0); | ||
302 | } | ||
303 | |||
304 | resume_execution(cur, regs, kcb); | ||
305 | |||
306 | /*Restore back the original saved kprobes variables and continue. */ | ||
307 | if (kcb->kprobe_status == KPROBE_REENTER) { | ||
308 | restore_previous_kprobe(kcb); | ||
309 | goto out; | ||
310 | } | ||
311 | reset_current_kprobe(); | ||
312 | out: | ||
313 | preempt_enable_no_resched(); | ||
314 | |||
315 | return 1; | ||
316 | } | ||
317 | |||
318 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | ||
319 | { | ||
320 | struct kprobe *cur = kprobe_running(); | ||
321 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
322 | const struct exception_table_entry *entry; | ||
323 | |||
324 | switch(kcb->kprobe_status) { | ||
325 | case KPROBE_HIT_SS: | ||
326 | case KPROBE_REENTER: | ||
327 | /* | ||
328 | * We are here because the instruction being single | ||
329 | * stepped caused a page fault. We reset the current | ||
330 | * kprobe and the tpc points back to the probe address | ||
331 | * and allow the page fault handler to continue as a | ||
332 | * normal page fault. | ||
333 | */ | ||
334 | regs->tpc = (unsigned long)cur->addr; | ||
335 | regs->tnpc = kcb->kprobe_orig_tnpc; | ||
336 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | ||
337 | kcb->kprobe_orig_tstate_pil); | ||
338 | if (kcb->kprobe_status == KPROBE_REENTER) | ||
339 | restore_previous_kprobe(kcb); | ||
340 | else | ||
341 | reset_current_kprobe(); | ||
342 | preempt_enable_no_resched(); | ||
343 | break; | ||
344 | case KPROBE_HIT_ACTIVE: | ||
345 | case KPROBE_HIT_SSDONE: | ||
346 | /* | ||
347 | * We increment the nmissed count for accounting, | ||
348 | * we can also use npre/npostfault count for accouting | ||
349 | * these specific fault cases. | ||
350 | */ | ||
351 | kprobes_inc_nmissed_count(cur); | ||
352 | |||
353 | /* | ||
354 | * We come here because instructions in the pre/post | ||
355 | * handler caused the page_fault, this could happen | ||
356 | * if handler tries to access user space by | ||
357 | * copy_from_user(), get_user() etc. Let the | ||
358 | * user-specified handler try to fix it first. | ||
359 | */ | ||
360 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
361 | return 1; | ||
362 | |||
363 | /* | ||
364 | * In case the user-specified fault handler returned | ||
365 | * zero, try to fix up. | ||
366 | */ | ||
367 | |||
368 | entry = search_exception_tables(regs->tpc); | ||
369 | if (entry) { | ||
370 | regs->tpc = entry->fixup; | ||
371 | regs->tnpc = regs->tpc + 4; | ||
372 | return 1; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * fixup_exception() could not handle it, | ||
377 | * Let do_page_fault() fix it. | ||
378 | */ | ||
379 | break; | ||
380 | default: | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * Wrapper routine to for handling exceptions. | ||
389 | */ | ||
390 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | ||
391 | unsigned long val, void *data) | ||
392 | { | ||
393 | struct die_args *args = (struct die_args *)data; | ||
394 | int ret = NOTIFY_DONE; | ||
395 | |||
396 | if (args->regs && user_mode(args->regs)) | ||
397 | return ret; | ||
398 | |||
399 | switch (val) { | ||
400 | case DIE_DEBUG: | ||
401 | if (kprobe_handler(args->regs)) | ||
402 | ret = NOTIFY_STOP; | ||
403 | break; | ||
404 | case DIE_DEBUG_2: | ||
405 | if (post_kprobe_handler(args->regs)) | ||
406 | ret = NOTIFY_STOP; | ||
407 | break; | ||
408 | default: | ||
409 | break; | ||
410 | } | ||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, | ||
415 | struct pt_regs *regs) | ||
416 | { | ||
417 | BUG_ON(trap_level != 0x170 && trap_level != 0x171); | ||
418 | |||
419 | if (user_mode(regs)) { | ||
420 | local_irq_enable(); | ||
421 | bad_trap(regs, trap_level); | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | /* trap_level == 0x170 --> ta 0x70 | ||
426 | * trap_level == 0x171 --> ta 0x71 | ||
427 | */ | ||
428 | if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2, | ||
429 | (trap_level == 0x170) ? "debug" : "debug_2", | ||
430 | regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) | ||
431 | bad_trap(regs, trap_level); | ||
432 | } | ||
433 | |||
434 | /* Jprobes support. */ | ||
435 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | ||
436 | { | ||
437 | struct jprobe *jp = container_of(p, struct jprobe, kp); | ||
438 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
439 | |||
440 | memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); | ||
441 | |||
442 | regs->tpc = (unsigned long) jp->entry; | ||
443 | regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; | ||
444 | regs->tstate |= TSTATE_PIL; | ||
445 | |||
446 | return 1; | ||
447 | } | ||
448 | |||
449 | void __kprobes jprobe_return(void) | ||
450 | { | ||
451 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
452 | register unsigned long orig_fp asm("g1"); | ||
453 | |||
454 | orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP]; | ||
455 | __asm__ __volatile__("\n" | ||
456 | "1: cmp %%sp, %0\n\t" | ||
457 | "blu,a,pt %%xcc, 1b\n\t" | ||
458 | " restore\n\t" | ||
459 | ".globl jprobe_return_trap_instruction\n" | ||
460 | "jprobe_return_trap_instruction:\n\t" | ||
461 | "ta 0x70" | ||
462 | : /* no outputs */ | ||
463 | : "r" (orig_fp)); | ||
464 | } | ||
465 | |||
466 | extern void jprobe_return_trap_instruction(void); | ||
467 | |||
468 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | ||
469 | { | ||
470 | u32 *addr = (u32 *) regs->tpc; | ||
471 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
472 | |||
473 | if (addr == (u32 *) jprobe_return_trap_instruction) { | ||
474 | memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); | ||
475 | preempt_enable_no_resched(); | ||
476 | return 1; | ||
477 | } | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | /* The value stored in the return address register is actually 2 | ||
482 | * instructions before where the callee will return to. | ||
483 | * Sequences usually look something like this | ||
484 | * | ||
485 | * call some_function <--- return register points here | ||
486 | * nop <--- call delay slot | ||
487 | * whatever <--- where callee returns to | ||
488 | * | ||
489 | * To keep trampoline_probe_handler logic simpler, we normalize the | ||
490 | * value kept in ri->ret_addr so we don't need to keep adjusting it | ||
491 | * back and forth. | ||
492 | */ | ||
493 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | ||
494 | struct pt_regs *regs) | ||
495 | { | ||
496 | ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); | ||
497 | |||
498 | /* Replace the return addr with trampoline addr */ | ||
499 | regs->u_regs[UREG_RETPC] = | ||
500 | ((unsigned long)kretprobe_trampoline) - 8; | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * Called when the probe at kretprobe trampoline is hit | ||
505 | */ | ||
506 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | ||
507 | { | ||
508 | struct kretprobe_instance *ri = NULL; | ||
509 | struct hlist_head *head, empty_rp; | ||
510 | struct hlist_node *node, *tmp; | ||
511 | unsigned long flags, orig_ret_address = 0; | ||
512 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; | ||
513 | |||
514 | INIT_HLIST_HEAD(&empty_rp); | ||
515 | kretprobe_hash_lock(current, &head, &flags); | ||
516 | |||
517 | /* | ||
518 | * It is possible to have multiple instances associated with a given | ||
519 | * task either because an multiple functions in the call path | ||
520 | * have a return probe installed on them, and/or more then one return | ||
521 | * return probe was registered for a target function. | ||
522 | * | ||
523 | * We can handle this because: | ||
524 | * - instances are always inserted at the head of the list | ||
525 | * - when multiple return probes are registered for the same | ||
526 | * function, the first instance's ret_addr will point to the | ||
527 | * real return address, and all the rest will point to | ||
528 | * kretprobe_trampoline | ||
529 | */ | ||
530 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
531 | if (ri->task != current) | ||
532 | /* another task is sharing our hash bucket */ | ||
533 | continue; | ||
534 | |||
535 | if (ri->rp && ri->rp->handler) | ||
536 | ri->rp->handler(ri, regs); | ||
537 | |||
538 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
539 | recycle_rp_inst(ri, &empty_rp); | ||
540 | |||
541 | if (orig_ret_address != trampoline_address) | ||
542 | /* | ||
543 | * This is the real return address. Any other | ||
544 | * instances associated with this task are for | ||
545 | * other calls deeper on the call stack | ||
546 | */ | ||
547 | break; | ||
548 | } | ||
549 | |||
550 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
551 | regs->tpc = orig_ret_address; | ||
552 | regs->tnpc = orig_ret_address + 4; | ||
553 | |||
554 | reset_current_kprobe(); | ||
555 | kretprobe_hash_unlock(current, &flags); | ||
556 | preempt_enable_no_resched(); | ||
557 | |||
558 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | ||
559 | hlist_del(&ri->hlist); | ||
560 | kfree(ri); | ||
561 | } | ||
562 | /* | ||
563 | * By returning a non-zero value, we are telling | ||
564 | * kprobe_handler() that we don't want the post_handler | ||
565 | * to run (and have re-enabled preemption) | ||
566 | */ | ||
567 | return 1; | ||
568 | } | ||
569 | |||
570 | void kretprobe_trampoline_holder(void) | ||
571 | { | ||
572 | asm volatile(".global kretprobe_trampoline\n" | ||
573 | "kretprobe_trampoline:\n" | ||
574 | "\tnop\n" | ||
575 | "\tnop\n"); | ||
576 | } | ||
577 | static struct kprobe trampoline_p = { | ||
578 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
579 | .pre_handler = trampoline_probe_handler | ||
580 | }; | ||
581 | |||
582 | int __init arch_init_kprobes(void) | ||
583 | { | ||
584 | return register_kprobe(&trampoline_p); | ||
585 | } | ||
586 | |||
587 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | ||
588 | { | ||
589 | if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) | ||
590 | return 1; | ||
591 | |||
592 | return 0; | ||
593 | } | ||