aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/kprobes.c103
-rw-r--r--arch/ia64/kernel/process.c16
-rw-r--r--include/asm-ia64/kprobes.h13
3 files changed, 125 insertions, 7 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 5978823d5c63..c97e18e634ca 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -290,6 +290,94 @@ static inline void set_current_kprobe(struct kprobe *p)
290 current_kprobe = p; 290 current_kprobe = p;
291} 291}
292 292
293static void kretprobe_trampoline(void)
294{
295}
296
297/*
298 * At this point the target function has been tricked into
299 * returning into our trampoline. Lookup the associated instance
300 * and then:
301 * - call the handler function
302 * - cleanup by marking the instance as unused
303 * - long jump back to the original return address
304 */
305int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
306{
307 struct kretprobe_instance *ri = NULL;
308 struct hlist_head *head;
309 struct hlist_node *node, *tmp;
310 unsigned long orig_ret_address = 0;
311 unsigned long trampoline_address =
312 ((struct fnptr *)kretprobe_trampoline)->ip;
313
314 head = kretprobe_inst_table_head(current);
315
316 /*
317 * It is possible to have multiple instances associated with a given
318 * task either because an multiple functions in the call path
319 * have a return probe installed on them, and/or more then one return
320 * return probe was registered for a target function.
321 *
322 * We can handle this because:
323 * - instances are always inserted at the head of the list
324 * - when multiple return probes are registered for the same
325 * function, the first instance's ret_addr will point to the
326 * real return address, and all the rest will point to
327 * kretprobe_trampoline
328 */
329 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
330 if (ri->task != current)
331 /* another task is sharing our hash bucket */
332 continue;
333
334 if (ri->rp && ri->rp->handler)
335 ri->rp->handler(ri, regs);
336
337 orig_ret_address = (unsigned long)ri->ret_addr;
338 recycle_rp_inst(ri);
339
340 if (orig_ret_address != trampoline_address)
341 /*
342 * This is the real return address. Any other
343 * instances associated with this task are for
344 * other calls deeper on the call stack
345 */
346 break;
347 }
348
349 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
350 regs->cr_iip = orig_ret_address;
351
352 unlock_kprobes();
353 preempt_enable_no_resched();
354
355 /*
356 * By returning a non-zero value, we are telling
357 * kprobe_handler() that we have handled unlocking
358 * and re-enabling preemption.
359 */
360 return 1;
361}
362
363void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
364{
365 struct kretprobe_instance *ri;
366
367 if ((ri = get_free_rp_inst(rp)) != NULL) {
368 ri->rp = rp;
369 ri->task = current;
370 ri->ret_addr = (kprobe_opcode_t *)regs->b0;
371
372 /* Replace the return addr with trampoline addr */
373 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
374
375 add_rp_inst(ri);
376 } else {
377 rp->nmissed++;
378 }
379}
380
293int arch_prepare_kprobe(struct kprobe *p) 381int arch_prepare_kprobe(struct kprobe *p)
294{ 382{
295 unsigned long addr = (unsigned long) p->addr; 383 unsigned long addr = (unsigned long) p->addr;
@@ -492,8 +580,8 @@ static int pre_kprobes_handler(struct die_args *args)
492 if (p->pre_handler && p->pre_handler(p, regs)) 580 if (p->pre_handler && p->pre_handler(p, regs))
493 /* 581 /*
494 * Our pre-handler is specifically requesting that we just 582 * Our pre-handler is specifically requesting that we just
495 * do a return. This is handling the case where the 583 * do a return. This is used for both the jprobe pre-handler
496 * pre-handler is really our special jprobe pre-handler. 584 * and the kretprobe trampoline
497 */ 585 */
498 return 1; 586 return 1;
499 587
@@ -599,3 +687,14 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
599 *regs = jprobe_saved_regs; 687 *regs = jprobe_saved_regs;
600 return 1; 688 return 1;
601} 689}
690
691static struct kprobe trampoline_p = {
692 .pre_handler = trampoline_probe_handler
693};
694
695int __init arch_init(void)
696{
697 trampoline_p.addr =
698 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip;
699 return register_kprobe(&trampoline_p);
700}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index ebb71f3d6d19..6e35bff05d59 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -27,6 +27,7 @@
27#include <linux/efi.h> 27#include <linux/efi.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/kprobes.h>
30 31
31#include <asm/cpu.h> 32#include <asm/cpu.h>
32#include <asm/delay.h> 33#include <asm/delay.h>
@@ -707,6 +708,13 @@ kernel_thread_helper (int (*fn)(void *), void *arg)
707void 708void
708flush_thread (void) 709flush_thread (void)
709{ 710{
711 /*
712 * Remove function-return probe instances associated with this task
713 * and put them back on the free list. Do not insert an exit probe for
714 * this function, it will be disabled by kprobe_flush_task if you do.
715 */
716 kprobe_flush_task(current);
717
710 /* drop floating-point and debug-register state if it exists: */ 718 /* drop floating-point and debug-register state if it exists: */
711 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 719 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
712 ia64_drop_fpu(current); 720 ia64_drop_fpu(current);
@@ -721,6 +729,14 @@ flush_thread (void)
721void 729void
722exit_thread (void) 730exit_thread (void)
723{ 731{
732
733 /*
734 * Remove function-return probe instances associated with this task
735 * and put them back on the free list. Do not insert an exit probe for
736 * this function, it will be disabled by kprobe_flush_task if you do.
737 */
738 kprobe_flush_task(current);
739
724 ia64_drop_fpu(current); 740 ia64_drop_fpu(current);
725#ifdef CONFIG_PERFMON 741#ifdef CONFIG_PERFMON
726 /* if needed, stop monitoring and flush state to perfmon context */ 742 /* if needed, stop monitoring and flush state to perfmon context */
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 25d8b1edfcba..bf36a32e37e4 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -64,6 +64,8 @@ typedef struct _bundle {
64 64
65#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 65#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
66 66
67#define ARCH_SUPPORTS_KRETPROBES
68
67#define SLOT0_OPCODE_SHIFT (37) 69#define SLOT0_OPCODE_SHIFT (37)
68#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46)) 70#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46))
69#define SLOT2_OPCODE_SHIFT (37) 71#define SLOT2_OPCODE_SHIFT (37)
@@ -95,11 +97,6 @@ struct arch_specific_insn {
95}; 97};
96 98
97/* ia64 does not need this */ 99/* ia64 does not need this */
98static inline void jprobe_return(void)
99{
100}
101
102/* ia64 does not need this */
103static inline void arch_copy_kprobe(struct kprobe *p) 100static inline void arch_copy_kprobe(struct kprobe *p)
104{ 101{
105} 102}
@@ -107,6 +104,12 @@ static inline void arch_copy_kprobe(struct kprobe *p)
107#ifdef CONFIG_KPROBES 104#ifdef CONFIG_KPROBES
108extern int kprobe_exceptions_notify(struct notifier_block *self, 105extern int kprobe_exceptions_notify(struct notifier_block *self,
109 unsigned long val, void *data); 106 unsigned long val, void *data);
107
108/* ia64 does not need this */
109static inline void jprobe_return(void)
110{
111}
112
110#else /* !CONFIG_KPROBES */ 113#else /* !CONFIG_KPROBES */
111static inline int kprobe_exceptions_notify(struct notifier_block *self, 114static inline int kprobe_exceptions_notify(struct notifier_block *self,
112 unsigned long val, void *data) 115 unsigned long val, void *data)