aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/kprobes.c
diff options
context:
space:
mode:
authorHien Nguyen <hien@us.ibm.com>2005-06-23 03:09:19 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-23 12:45:21 -0400
commitb94cce926b2b902b79380ccba370d6f9f2980de0 (patch)
treeda2680b1ec36eae6423ba446d09284d2642ae82b /arch/i386/kernel/kprobes.c
parent2fa389c5eb8c97d621653184d2adf5fdbd4a3167 (diff)
[PATCH] kprobes: function-return probes
This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/kprobes.c')
-rw-r--r--arch/i386/kernel/kprobes.c102
1 files changed, 101 insertions, 1 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 59ff9b455069..048f754bbe23 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -23,6 +23,9 @@
23 * Rusty Russell). 23 * Rusty Russell).
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments. 25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
26 */ 29 */
27 30
28#include <linux/config.h> 31#include <linux/config.h>
@@ -91,6 +94,53 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
91 regs->eip = (unsigned long)&p->ainsn.insn; 94 regs->eip = (unsigned long)&p->ainsn.insn;
92} 95}
93 96
97struct task_struct *arch_get_kprobe_task(void *ptr)
98{
99 return ((struct thread_info *) (((unsigned long) ptr) &
100 (~(THREAD_SIZE -1))))->task;
101}
102
103void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
104{
105 unsigned long *sara = (unsigned long *)&regs->esp;
106 struct kretprobe_instance *ri;
107 static void *orig_ret_addr;
108
109 /*
110 * Save the return address when the return probe hits
111 * the first time, and use it to populate the (krprobe
112 * instance)->ret_addr for subsequent return probes at
113 * the same addrress since stack address would have
114 * the kretprobe_trampoline by then.
115 */
116 if (((void*) *sara) != kretprobe_trampoline)
117 orig_ret_addr = (void*) *sara;
118
119 if ((ri = get_free_rp_inst(rp)) != NULL) {
120 ri->rp = rp;
121 ri->stack_addr = sara;
122 ri->ret_addr = orig_ret_addr;
123 add_rp_inst(ri);
124 /* Replace the return addr with trampoline addr */
125 *sara = (unsigned long) &kretprobe_trampoline;
126 } else {
127 rp->nmissed++;
128 }
129}
130
131void arch_kprobe_flush_task(struct task_struct *tk, spinlock_t *kp_lock)
132{
133 unsigned long flags = 0;
134 struct kretprobe_instance *ri;
135 spin_lock_irqsave(kp_lock, flags);
136 while ((ri = get_rp_inst_tsk(tk)) != NULL) {
137 *((unsigned long *)(ri->stack_addr)) =
138 (unsigned long) ri->ret_addr;
139 recycle_rp_inst(ri);
140 }
141 spin_unlock_irqrestore(kp_lock, flags);
142}
143
94/* 144/*
95 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 145 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
96 * remain disabled thorough out this function. 146 * remain disabled thorough out this function.
@@ -184,6 +234,55 @@ no_kprobe:
184} 234}
185 235
186/* 236/*
237 * For function-return probes, init_kprobes() establishes a probepoint
238 * here. When a retprobed function returns, this probe is hit and
239 * trampoline_probe_handler() runs, calling the kretprobe's handler.
240 */
241 void kretprobe_trampoline_holder(void)
242 {
243 asm volatile ( ".global kretprobe_trampoline\n"
244 "kretprobe_trampoline: \n"
245 "nop\n");
246 }
247
248/*
249 * Called when we hit the probe point at kretprobe_trampoline
250 */
251int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
252{
253 struct task_struct *tsk;
254 struct kretprobe_instance *ri;
255 struct hlist_head *head;
256 struct hlist_node *node;
257 unsigned long *sara = ((unsigned long *) &regs->esp) - 1;
258
259 tsk = arch_get_kprobe_task(sara);
260 head = kretprobe_inst_table_head(tsk);
261
262 hlist_for_each_entry(ri, node, head, hlist) {
263 if (ri->stack_addr == sara && ri->rp) {
264 if (ri->rp->handler)
265 ri->rp->handler(ri, regs);
266 }
267 }
268 return 0;
269}
270
271void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
272 unsigned long flags)
273{
274 struct kretprobe_instance *ri;
275 /* RA already popped */
276 unsigned long *sara = ((unsigned long *)&regs->esp) - 1;
277
278 while ((ri = get_rp_inst(sara))) {
279 regs->eip = (unsigned long)ri->ret_addr;
280 recycle_rp_inst(ri);
281 }
282 regs->eflags &= ~TF_MASK;
283}
284
285/*
187 * Called after single-stepping. p->addr is the address of the 286 * Called after single-stepping. p->addr is the address of the
188 * instruction whose first byte has been replaced by the "int 3" 287 * instruction whose first byte has been replaced by the "int 3"
189 * instruction. To avoid the SMP problems that can occur when we 288 * instruction. To avoid the SMP problems that can occur when we
@@ -266,7 +365,8 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
266 if (current_kprobe->post_handler) 365 if (current_kprobe->post_handler)
267 current_kprobe->post_handler(current_kprobe, regs, 0); 366 current_kprobe->post_handler(current_kprobe, regs, 0);
268 367
269 resume_execution(current_kprobe, regs); 368 if (current_kprobe->post_handler != trampoline_post_handler)
369 resume_execution(current_kprobe, regs);
270 regs->eflags |= kprobe_saved_eflags; 370 regs->eflags |= kprobe_saved_eflags;
271 371
272 unlock_kprobes(); 372 unlock_kprobes();