aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorHien Nguyen <hien@us.ibm.com>2005-06-23 03:09:19 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-23 12:45:21 -0400
commitb94cce926b2b902b79380ccba370d6f9f2980de0 (patch)
treeda2680b1ec36eae6423ba446d09284d2642ae82b /kernel
parent2fa389c5eb8c97d621653184d2adf5fdbd4a3167 (diff)
[PATCH] kprobes: function-return probes
This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kprobes.c213
1 files changed, 208 insertions, 5 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 037142b72a49..692fbf75ab49 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -27,6 +27,9 @@
27 * interface to access function arguments. 27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list. 29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
30 */ 33 */
31#include <linux/kprobes.h> 34#include <linux/kprobes.h>
32#include <linux/spinlock.h> 35#include <linux/spinlock.h>
@@ -41,6 +44,7 @@
41#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 44#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
42 45
43static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 46static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
47static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
44 48
45unsigned int kprobe_cpu = NR_CPUS; 49unsigned int kprobe_cpu = NR_CPUS;
46static DEFINE_SPINLOCK(kprobe_lock); 50static DEFINE_SPINLOCK(kprobe_lock);
@@ -78,7 +82,7 @@ struct kprobe *get_kprobe(void *addr)
78 * Aggregate handlers for multiple kprobes support - these handlers 82 * Aggregate handlers for multiple kprobes support - these handlers
79 * take care of invoking the individual kprobe handlers on p->list 83 * take care of invoking the individual kprobe handlers on p->list
80 */ 84 */
81int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 85static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
82{ 86{
83 struct kprobe *kp; 87 struct kprobe *kp;
84 88
@@ -92,8 +96,8 @@ int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
92 return 0; 96 return 0;
93} 97}
94 98
95void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 99static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
96 unsigned long flags) 100 unsigned long flags)
97{ 101{
98 struct kprobe *kp; 102 struct kprobe *kp;
99 103
@@ -107,7 +111,8 @@ void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
107 return; 111 return;
108} 112}
109 113
110int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) 114static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
115 int trapnr)
111{ 116{
112 /* 117 /*
113 * if we faulted "during" the execution of a user specified 118 * if we faulted "during" the execution of a user specified
@@ -120,6 +125,135 @@ int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
120 return 0; 125 return 0;
121} 126}
122 127
128struct kprobe trampoline_p = {
129 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
130 .pre_handler = trampoline_probe_handler,
131 .post_handler = trampoline_post_handler
132};
133
134struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
135{
136 struct hlist_node *node;
137 struct kretprobe_instance *ri;
138 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
139 return ri;
140 return NULL;
141}
142
143static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
144{
145 struct hlist_node *node;
146 struct kretprobe_instance *ri;
147 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
148 return ri;
149 return NULL;
150}
151
152struct kretprobe_instance *get_rp_inst(void *sara)
153{
154 struct hlist_head *head;
155 struct hlist_node *node;
156 struct task_struct *tsk;
157 struct kretprobe_instance *ri;
158
159 tsk = arch_get_kprobe_task(sara);
160 head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
161 hlist_for_each_entry(ri, node, head, hlist) {
162 if (ri->stack_addr == sara)
163 return ri;
164 }
165 return NULL;
166}
167
168void add_rp_inst(struct kretprobe_instance *ri)
169{
170 struct task_struct *tsk;
171 /*
172 * Remove rp inst off the free list -
173 * Add it back when probed function returns
174 */
175 hlist_del(&ri->uflist);
176 tsk = arch_get_kprobe_task(ri->stack_addr);
177 /* Add rp inst onto table */
178 INIT_HLIST_NODE(&ri->hlist);
179 hlist_add_head(&ri->hlist,
180 &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]);
181
182 /* Also add this rp inst to the used list. */
183 INIT_HLIST_NODE(&ri->uflist);
184 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
185}
186
187void recycle_rp_inst(struct kretprobe_instance *ri)
188{
189 /* remove rp inst off the rprobe_inst_table */
190 hlist_del(&ri->hlist);
191 if (ri->rp) {
192 /* remove rp inst off the used list */
193 hlist_del(&ri->uflist);
194 /* put rp inst back onto the free list */
195 INIT_HLIST_NODE(&ri->uflist);
196 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
197 } else
198 /* Unregistering */
199 kfree(ri);
200}
201
202struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
203{
204 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
205}
206
207struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
208{
209 struct task_struct *tsk;
210 struct hlist_head *head;
211 struct hlist_node *node;
212 struct kretprobe_instance *ri;
213
214 head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
215
216 hlist_for_each_entry(ri, node, head, hlist) {
217 tsk = arch_get_kprobe_task(ri->stack_addr);
218 if (tsk == tk)
219 return ri;
220 }
221 return NULL;
222}
223
224/*
225 * This function is called from do_exit or do_execv when task tk's stack is
226 * about to be recycled. Recycle any function-return probe instances
227 * associated with this task. These represent probed functions that have
228 * been called but may never return.
229 */
230void kprobe_flush_task(struct task_struct *tk)
231{
232 arch_kprobe_flush_task(tk, &kprobe_lock);
233}
234
235/*
236 * This kprobe pre_handler is registered with every kretprobe. When probe
237 * hits it will set up the return probe.
238 */
239static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
240{
241 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
242
243 /*TODO: consider to only swap the RA after the last pre_handler fired */
244 arch_prepare_kretprobe(rp, regs);
245 return 0;
246}
247
248static inline void free_rp_inst(struct kretprobe *rp)
249{
250 struct kretprobe_instance *ri;
251 while ((ri = get_free_rp_inst(rp)) != NULL) {
252 hlist_del(&ri->uflist);
253 kfree(ri);
254 }
255}
256
123/* 257/*
124 * Fill in the required fields of the "manager kprobe". Replace the 258 * Fill in the required fields of the "manager kprobe". Replace the
125 * earlier kprobe in the hlist with the manager kprobe 259 * earlier kprobe in the hlist with the manager kprobe
@@ -257,16 +391,82 @@ void unregister_jprobe(struct jprobe *jp)
257 unregister_kprobe(&jp->kp); 391 unregister_kprobe(&jp->kp);
258} 392}
259 393
394#ifdef ARCH_SUPPORTS_KRETPROBES
395
396int register_kretprobe(struct kretprobe *rp)
397{
398 int ret = 0;
399 struct kretprobe_instance *inst;
400 int i;
401
402 rp->kp.pre_handler = pre_handler_kretprobe;
403
404 /* Pre-allocate memory for max kretprobe instances */
405 if (rp->maxactive <= 0) {
406#ifdef CONFIG_PREEMPT
407 rp->maxactive = max(10, 2 * NR_CPUS);
408#else
409 rp->maxactive = NR_CPUS;
410#endif
411 }
412 INIT_HLIST_HEAD(&rp->used_instances);
413 INIT_HLIST_HEAD(&rp->free_instances);
414 for (i = 0; i < rp->maxactive; i++) {
415 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
416 if (inst == NULL) {
417 free_rp_inst(rp);
418 return -ENOMEM;
419 }
420 INIT_HLIST_NODE(&inst->uflist);
421 hlist_add_head(&inst->uflist, &rp->free_instances);
422 }
423
424 rp->nmissed = 0;
425 /* Establish function entry probe point */
426 if ((ret = register_kprobe(&rp->kp)) != 0)
427 free_rp_inst(rp);
428 return ret;
429}
430
431#else /* ARCH_SUPPORTS_KRETPROBES */
432
433int register_kretprobe(struct kretprobe *rp)
434{
435 return -ENOSYS;
436}
437
438#endif /* ARCH_SUPPORTS_KRETPROBES */
439
440void unregister_kretprobe(struct kretprobe *rp)
441{
442 unsigned long flags;
443 struct kretprobe_instance *ri;
444
445 unregister_kprobe(&rp->kp);
446 /* No race here */
447 spin_lock_irqsave(&kprobe_lock, flags);
448 free_rp_inst(rp);
449 while ((ri = get_used_rp_inst(rp)) != NULL) {
450 ri->rp = NULL;
451 hlist_del(&ri->uflist);
452 }
453 spin_unlock_irqrestore(&kprobe_lock, flags);
454}
455
260static int __init init_kprobes(void) 456static int __init init_kprobes(void)
261{ 457{
262 int i, err = 0; 458 int i, err = 0;
263 459
264 /* FIXME allocate the probe table, currently defined statically */ 460 /* FIXME allocate the probe table, currently defined statically */
265 /* initialize all list heads */ 461 /* initialize all list heads */
266 for (i = 0; i < KPROBE_TABLE_SIZE; i++) 462 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
267 INIT_HLIST_HEAD(&kprobe_table[i]); 463 INIT_HLIST_HEAD(&kprobe_table[i]);
464 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
465 }
268 466
269 err = register_die_notifier(&kprobe_exceptions_nb); 467 err = register_die_notifier(&kprobe_exceptions_nb);
468 /* Register the trampoline probe for return probe */
469 register_kprobe(&trampoline_p);
270 return err; 470 return err;
271} 471}
272 472
@@ -277,3 +477,6 @@ EXPORT_SYMBOL_GPL(unregister_kprobe);
277EXPORT_SYMBOL_GPL(register_jprobe); 477EXPORT_SYMBOL_GPL(register_jprobe);
278EXPORT_SYMBOL_GPL(unregister_jprobe); 478EXPORT_SYMBOL_GPL(unregister_jprobe);
279EXPORT_SYMBOL_GPL(jprobe_return); 479EXPORT_SYMBOL_GPL(jprobe_return);
480EXPORT_SYMBOL_GPL(register_kretprobe);
481EXPORT_SYMBOL_GPL(unregister_kretprobe);
482