aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c213
1 files changed, 208 insertions, 5 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 037142b72a49..692fbf75ab49 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -27,6 +27,9 @@
27 * interface to access function arguments. 27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list. 29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
30 */ 33 */
31#include <linux/kprobes.h> 34#include <linux/kprobes.h>
32#include <linux/spinlock.h> 35#include <linux/spinlock.h>
@@ -41,6 +44,7 @@
41#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 44#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
42 45
43static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 46static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
47static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
44 48
45unsigned int kprobe_cpu = NR_CPUS; 49unsigned int kprobe_cpu = NR_CPUS;
46static DEFINE_SPINLOCK(kprobe_lock); 50static DEFINE_SPINLOCK(kprobe_lock);
@@ -78,7 +82,7 @@ struct kprobe *get_kprobe(void *addr)
78 * Aggregate handlers for multiple kprobes support - these handlers 82 * Aggregate handlers for multiple kprobes support - these handlers
79 * take care of invoking the individual kprobe handlers on p->list 83 * take care of invoking the individual kprobe handlers on p->list
80 */ 84 */
81int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 85static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
82{ 86{
83 struct kprobe *kp; 87 struct kprobe *kp;
84 88
@@ -92,8 +96,8 @@ int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
92 return 0; 96 return 0;
93} 97}
94 98
95void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 99static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
96 unsigned long flags) 100 unsigned long flags)
97{ 101{
98 struct kprobe *kp; 102 struct kprobe *kp;
99 103
@@ -107,7 +111,8 @@ void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
107 return; 111 return;
108} 112}
109 113
110int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) 114static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
115 int trapnr)
111{ 116{
112 /* 117 /*
113 * if we faulted "during" the execution of a user specified 118 * if we faulted "during" the execution of a user specified
@@ -120,6 +125,135 @@ int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
120 return 0; 125 return 0;
121} 126}
122 127
128struct kprobe trampoline_p = {
129 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
130 .pre_handler = trampoline_probe_handler,
131 .post_handler = trampoline_post_handler
132};
133
134struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
135{
136 struct hlist_node *node;
137 struct kretprobe_instance *ri;
138 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
139 return ri;
140 return NULL;
141}
142
143static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
144{
145 struct hlist_node *node;
146 struct kretprobe_instance *ri;
147 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
148 return ri;
149 return NULL;
150}
151
152struct kretprobe_instance *get_rp_inst(void *sara)
153{
154 struct hlist_head *head;
155 struct hlist_node *node;
156 struct task_struct *tsk;
157 struct kretprobe_instance *ri;
158
159 tsk = arch_get_kprobe_task(sara);
160 head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
161 hlist_for_each_entry(ri, node, head, hlist) {
162 if (ri->stack_addr == sara)
163 return ri;
164 }
165 return NULL;
166}
167
168void add_rp_inst(struct kretprobe_instance *ri)
169{
170 struct task_struct *tsk;
171 /*
172 * Remove rp inst off the free list -
173 * Add it back when probed function returns
174 */
175 hlist_del(&ri->uflist);
176 tsk = arch_get_kprobe_task(ri->stack_addr);
177 /* Add rp inst onto table */
178 INIT_HLIST_NODE(&ri->hlist);
179 hlist_add_head(&ri->hlist,
180 &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]);
181
182 /* Also add this rp inst to the used list. */
183 INIT_HLIST_NODE(&ri->uflist);
184 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
185}
186
187void recycle_rp_inst(struct kretprobe_instance *ri)
188{
189 /* remove rp inst off the rprobe_inst_table */
190 hlist_del(&ri->hlist);
191 if (ri->rp) {
192 /* remove rp inst off the used list */
193 hlist_del(&ri->uflist);
194 /* put rp inst back onto the free list */
195 INIT_HLIST_NODE(&ri->uflist);
196 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
197 } else
198 /* Unregistering */
199 kfree(ri);
200}
201
202struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
203{
204 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
205}
206
207struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
208{
209 struct task_struct *tsk;
210 struct hlist_head *head;
211 struct hlist_node *node;
212 struct kretprobe_instance *ri;
213
214 head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
215
216 hlist_for_each_entry(ri, node, head, hlist) {
217 tsk = arch_get_kprobe_task(ri->stack_addr);
218 if (tsk == tk)
219 return ri;
220 }
221 return NULL;
222}
223
224/*
225 * This function is called from do_exit or do_execv when task tk's stack is
226 * about to be recycled. Recycle any function-return probe instances
227 * associated with this task. These represent probed functions that have
228 * been called but may never return.
229 */
230void kprobe_flush_task(struct task_struct *tk)
231{
232 arch_kprobe_flush_task(tk, &kprobe_lock);
233}
234
235/*
236 * This kprobe pre_handler is registered with every kretprobe. When probe
237 * hits it will set up the return probe.
238 */
239static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
240{
241 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
242
243 /*TODO: consider to only swap the RA after the last pre_handler fired */
244 arch_prepare_kretprobe(rp, regs);
245 return 0;
246}
247
248static inline void free_rp_inst(struct kretprobe *rp)
249{
250 struct kretprobe_instance *ri;
251 while ((ri = get_free_rp_inst(rp)) != NULL) {
252 hlist_del(&ri->uflist);
253 kfree(ri);
254 }
255}
256
123/* 257/*
124 * Fill in the required fields of the "manager kprobe". Replace the 258 * Fill in the required fields of the "manager kprobe". Replace the
125 * earlier kprobe in the hlist with the manager kprobe 259 * earlier kprobe in the hlist with the manager kprobe
@@ -257,16 +391,82 @@ void unregister_jprobe(struct jprobe *jp)
257 unregister_kprobe(&jp->kp); 391 unregister_kprobe(&jp->kp);
258} 392}
259 393
394#ifdef ARCH_SUPPORTS_KRETPROBES
395
396int register_kretprobe(struct kretprobe *rp)
397{
398 int ret = 0;
399 struct kretprobe_instance *inst;
400 int i;
401
402 rp->kp.pre_handler = pre_handler_kretprobe;
403
404 /* Pre-allocate memory for max kretprobe instances */
405 if (rp->maxactive <= 0) {
406#ifdef CONFIG_PREEMPT
407 rp->maxactive = max(10, 2 * NR_CPUS);
408#else
409 rp->maxactive = NR_CPUS;
410#endif
411 }
412 INIT_HLIST_HEAD(&rp->used_instances);
413 INIT_HLIST_HEAD(&rp->free_instances);
414 for (i = 0; i < rp->maxactive; i++) {
415 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
416 if (inst == NULL) {
417 free_rp_inst(rp);
418 return -ENOMEM;
419 }
420 INIT_HLIST_NODE(&inst->uflist);
421 hlist_add_head(&inst->uflist, &rp->free_instances);
422 }
423
424 rp->nmissed = 0;
425 /* Establish function entry probe point */
426 if ((ret = register_kprobe(&rp->kp)) != 0)
427 free_rp_inst(rp);
428 return ret;
429}
430
431#else /* ARCH_SUPPORTS_KRETPROBES */
432
433int register_kretprobe(struct kretprobe *rp)
434{
435 return -ENOSYS;
436}
437
438#endif /* ARCH_SUPPORTS_KRETPROBES */
439
440void unregister_kretprobe(struct kretprobe *rp)
441{
442 unsigned long flags;
443 struct kretprobe_instance *ri;
444
445 unregister_kprobe(&rp->kp);
446 /* No race here */
447 spin_lock_irqsave(&kprobe_lock, flags);
448 free_rp_inst(rp);
449 while ((ri = get_used_rp_inst(rp)) != NULL) {
450 ri->rp = NULL;
451 hlist_del(&ri->uflist);
452 }
453 spin_unlock_irqrestore(&kprobe_lock, flags);
454}
455
260static int __init init_kprobes(void) 456static int __init init_kprobes(void)
261{ 457{
262 int i, err = 0; 458 int i, err = 0;
263 459
264 /* FIXME allocate the probe table, currently defined statically */ 460 /* FIXME allocate the probe table, currently defined statically */
265 /* initialize all list heads */ 461 /* initialize all list heads */
266 for (i = 0; i < KPROBE_TABLE_SIZE; i++) 462 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
267 INIT_HLIST_HEAD(&kprobe_table[i]); 463 INIT_HLIST_HEAD(&kprobe_table[i]);
464 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
465 }
268 466
269 err = register_die_notifier(&kprobe_exceptions_nb); 467 err = register_die_notifier(&kprobe_exceptions_nb);
468 /* Register the trampoline probe for return probe */
469 register_kprobe(&trampoline_p);
270 return err; 470 return err;
271} 471}
272 472
@@ -277,3 +477,6 @@ EXPORT_SYMBOL_GPL(unregister_kprobe);
277EXPORT_SYMBOL_GPL(register_jprobe); 477EXPORT_SYMBOL_GPL(register_jprobe);
278EXPORT_SYMBOL_GPL(unregister_jprobe); 478EXPORT_SYMBOL_GPL(unregister_jprobe);
279EXPORT_SYMBOL_GPL(jprobe_return); 479EXPORT_SYMBOL_GPL(jprobe_return);
480EXPORT_SYMBOL_GPL(register_kretprobe);
481EXPORT_SYMBOL_GPL(unregister_kretprobe);
482