aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c288
1 files changed, 266 insertions, 22 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 037142b72a49..334f37472c56 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -27,6 +27,9 @@
27 * interface to access function arguments. 27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list. 29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
30 */ 33 */
31#include <linux/kprobes.h> 34#include <linux/kprobes.h>
32#include <linux/spinlock.h> 35#include <linux/spinlock.h>
@@ -41,6 +44,7 @@
41#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 44#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
42 45
43static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 46static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
47static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
44 48
45unsigned int kprobe_cpu = NR_CPUS; 49unsigned int kprobe_cpu = NR_CPUS;
46static DEFINE_SPINLOCK(kprobe_lock); 50static DEFINE_SPINLOCK(kprobe_lock);
@@ -78,22 +82,23 @@ struct kprobe *get_kprobe(void *addr)
78 * Aggregate handlers for multiple kprobes support - these handlers 82 * Aggregate handlers for multiple kprobes support - these handlers
79 * take care of invoking the individual kprobe handlers on p->list 83 * take care of invoking the individual kprobe handlers on p->list
80 */ 84 */
81int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 85static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
82{ 86{
83 struct kprobe *kp; 87 struct kprobe *kp;
84 88
85 list_for_each_entry(kp, &p->list, list) { 89 list_for_each_entry(kp, &p->list, list) {
86 if (kp->pre_handler) { 90 if (kp->pre_handler) {
87 curr_kprobe = kp; 91 curr_kprobe = kp;
88 kp->pre_handler(kp, regs); 92 if (kp->pre_handler(kp, regs))
89 curr_kprobe = NULL; 93 return 1;
90 } 94 }
95 curr_kprobe = NULL;
91 } 96 }
92 return 0; 97 return 0;
93} 98}
94 99
95void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 100static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
96 unsigned long flags) 101 unsigned long flags)
97{ 102{
98 struct kprobe *kp; 103 struct kprobe *kp;
99 104
@@ -107,7 +112,8 @@ void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
107 return; 112 return;
108} 113}
109 114
110int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) 115static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
116 int trapnr)
111{ 117{
112 /* 118 /*
113 * if we faulted "during" the execution of a user specified 119 * if we faulted "during" the execution of a user specified
@@ -120,19 +126,191 @@ int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
120 return 0; 126 return 0;
121} 127}
122 128
129static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
130{
131 struct kprobe *kp = curr_kprobe;
132 if (curr_kprobe && kp->break_handler) {
133 if (kp->break_handler(kp, regs)) {
134 curr_kprobe = NULL;
135 return 1;
136 }
137 }
138 curr_kprobe = NULL;
139 return 0;
140}
141
142struct kprobe trampoline_p = {
143 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
144 .pre_handler = trampoline_probe_handler,
145 .post_handler = trampoline_post_handler
146};
147
148struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
149{
150 struct hlist_node *node;
151 struct kretprobe_instance *ri;
152 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
153 return ri;
154 return NULL;
155}
156
157static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
158{
159 struct hlist_node *node;
160 struct kretprobe_instance *ri;
161 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
162 return ri;
163 return NULL;
164}
165
166struct kretprobe_instance *get_rp_inst(void *sara)
167{
168 struct hlist_head *head;
169 struct hlist_node *node;
170 struct task_struct *tsk;
171 struct kretprobe_instance *ri;
172
173 tsk = arch_get_kprobe_task(sara);
174 head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
175 hlist_for_each_entry(ri, node, head, hlist) {
176 if (ri->stack_addr == sara)
177 return ri;
178 }
179 return NULL;
180}
181
182void add_rp_inst(struct kretprobe_instance *ri)
183{
184 struct task_struct *tsk;
185 /*
186 * Remove rp inst off the free list -
187 * Add it back when probed function returns
188 */
189 hlist_del(&ri->uflist);
190 tsk = arch_get_kprobe_task(ri->stack_addr);
191 /* Add rp inst onto table */
192 INIT_HLIST_NODE(&ri->hlist);
193 hlist_add_head(&ri->hlist,
194 &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]);
195
196 /* Also add this rp inst to the used list. */
197 INIT_HLIST_NODE(&ri->uflist);
198 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
199}
200
201void recycle_rp_inst(struct kretprobe_instance *ri)
202{
203 /* remove rp inst off the rprobe_inst_table */
204 hlist_del(&ri->hlist);
205 if (ri->rp) {
206 /* remove rp inst off the used list */
207 hlist_del(&ri->uflist);
208 /* put rp inst back onto the free list */
209 INIT_HLIST_NODE(&ri->uflist);
210 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
211 } else
212 /* Unregistering */
213 kfree(ri);
214}
215
216struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
217{
218 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
219}
220
221struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
222{
223 struct task_struct *tsk;
224 struct hlist_head *head;
225 struct hlist_node *node;
226 struct kretprobe_instance *ri;
227
228 head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
229
230 hlist_for_each_entry(ri, node, head, hlist) {
231 tsk = arch_get_kprobe_task(ri->stack_addr);
232 if (tsk == tk)
233 return ri;
234 }
235 return NULL;
236}
237
238/*
239 * This function is called from do_exit or do_execv when task tk's stack is
240 * about to be recycled. Recycle any function-return probe instances
241 * associated with this task. These represent probed functions that have
242 * been called but may never return.
243 */
244void kprobe_flush_task(struct task_struct *tk)
245{
246 unsigned long flags = 0;
247 spin_lock_irqsave(&kprobe_lock, flags);
248 arch_kprobe_flush_task(tk);
249 spin_unlock_irqrestore(&kprobe_lock, flags);
250}
251
252/*
253 * This kprobe pre_handler is registered with every kretprobe. When probe
254 * hits it will set up the return probe.
255 */
256static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
257{
258 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
259
260 /*TODO: consider to only swap the RA after the last pre_handler fired */
261 arch_prepare_kretprobe(rp, regs);
262 return 0;
263}
264
265static inline void free_rp_inst(struct kretprobe *rp)
266{
267 struct kretprobe_instance *ri;
268 while ((ri = get_free_rp_inst(rp)) != NULL) {
269 hlist_del(&ri->uflist);
270 kfree(ri);
271 }
272}
273
274/*
275 * Keep all fields in the kprobe consistent
276 */
277static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
278{
279 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
280 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
281}
282
283/*
284* Add the new probe to old_p->list. Fail if this is the
285* second jprobe at the address - two jprobes can't coexist
286*/
287static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
288{
289 struct kprobe *kp;
290
291 if (p->break_handler) {
292 list_for_each_entry(kp, &old_p->list, list) {
293 if (kp->break_handler)
294 return -EEXIST;
295 }
296 list_add_tail(&p->list, &old_p->list);
297 } else
298 list_add(&p->list, &old_p->list);
299 return 0;
300}
301
123/* 302/*
124 * Fill in the required fields of the "manager kprobe". Replace the 303 * Fill in the required fields of the "manager kprobe". Replace the
125 * earlier kprobe in the hlist with the manager kprobe 304 * earlier kprobe in the hlist with the manager kprobe
126 */ 305 */
127static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 306static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
128{ 307{
308 copy_kprobe(p, ap);
129 ap->addr = p->addr; 309 ap->addr = p->addr;
130 ap->opcode = p->opcode;
131 memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn));
132
133 ap->pre_handler = aggr_pre_handler; 310 ap->pre_handler = aggr_pre_handler;
134 ap->post_handler = aggr_post_handler; 311 ap->post_handler = aggr_post_handler;
135 ap->fault_handler = aggr_fault_handler; 312 ap->fault_handler = aggr_fault_handler;
313 ap->break_handler = aggr_break_handler;
136 314
137 INIT_LIST_HEAD(&ap->list); 315 INIT_LIST_HEAD(&ap->list);
138 list_add(&p->list, &ap->list); 316 list_add(&p->list, &ap->list);
@@ -153,16 +331,16 @@ static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
153 int ret = 0; 331 int ret = 0;
154 struct kprobe *ap; 332 struct kprobe *ap;
155 333
156 if (old_p->break_handler || p->break_handler) { 334 if (old_p->pre_handler == aggr_pre_handler) {
157 ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */ 335 copy_kprobe(old_p, p);
158 } else if (old_p->pre_handler == aggr_pre_handler) { 336 ret = add_new_kprobe(old_p, p);
159 list_add(&p->list, &old_p->list);
160 } else { 337 } else {
161 ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); 338 ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
162 if (!ap) 339 if (!ap)
163 return -ENOMEM; 340 return -ENOMEM;
164 add_aggr_kprobe(ap, old_p); 341 add_aggr_kprobe(ap, old_p);
165 list_add(&p->list, &ap->list); 342 copy_kprobe(ap, p);
343 ret = add_new_kprobe(ap, p);
166 } 344 }
167 return ret; 345 return ret;
168} 346}
@@ -170,10 +348,8 @@ static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
170/* kprobe removal house-keeping routines */ 348/* kprobe removal house-keeping routines */
171static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) 349static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
172{ 350{
173 *p->addr = p->opcode; 351 arch_disarm_kprobe(p);
174 hlist_del(&p->hlist); 352 hlist_del(&p->hlist);
175 flush_icache_range((unsigned long) p->addr,
176 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
177 spin_unlock_irqrestore(&kprobe_lock, flags); 353 spin_unlock_irqrestore(&kprobe_lock, flags);
178 arch_remove_kprobe(p); 354 arch_remove_kprobe(p);
179} 355}
@@ -200,6 +376,7 @@ int register_kprobe(struct kprobe *p)
200 } 376 }
201 spin_lock_irqsave(&kprobe_lock, flags); 377 spin_lock_irqsave(&kprobe_lock, flags);
202 old_p = get_kprobe(p->addr); 378 old_p = get_kprobe(p->addr);
379 p->nmissed = 0;
203 if (old_p) { 380 if (old_p) {
204 ret = register_aggr_kprobe(old_p, p); 381 ret = register_aggr_kprobe(old_p, p);
205 goto out; 382 goto out;
@@ -210,10 +387,8 @@ int register_kprobe(struct kprobe *p)
210 hlist_add_head(&p->hlist, 387 hlist_add_head(&p->hlist,
211 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 388 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
212 389
213 p->opcode = *p->addr; 390 arch_arm_kprobe(p);
214 *p->addr = BREAKPOINT_INSTRUCTION; 391
215 flush_icache_range((unsigned long) p->addr,
216 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
217out: 392out:
218 spin_unlock_irqrestore(&kprobe_lock, flags); 393 spin_unlock_irqrestore(&kprobe_lock, flags);
219rm_kprobe: 394rm_kprobe:
@@ -257,16 +432,82 @@ void unregister_jprobe(struct jprobe *jp)
257 unregister_kprobe(&jp->kp); 432 unregister_kprobe(&jp->kp);
258} 433}
259 434
435#ifdef ARCH_SUPPORTS_KRETPROBES
436
437int register_kretprobe(struct kretprobe *rp)
438{
439 int ret = 0;
440 struct kretprobe_instance *inst;
441 int i;
442
443 rp->kp.pre_handler = pre_handler_kretprobe;
444
445 /* Pre-allocate memory for max kretprobe instances */
446 if (rp->maxactive <= 0) {
447#ifdef CONFIG_PREEMPT
448 rp->maxactive = max(10, 2 * NR_CPUS);
449#else
450 rp->maxactive = NR_CPUS;
451#endif
452 }
453 INIT_HLIST_HEAD(&rp->used_instances);
454 INIT_HLIST_HEAD(&rp->free_instances);
455 for (i = 0; i < rp->maxactive; i++) {
456 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
457 if (inst == NULL) {
458 free_rp_inst(rp);
459 return -ENOMEM;
460 }
461 INIT_HLIST_NODE(&inst->uflist);
462 hlist_add_head(&inst->uflist, &rp->free_instances);
463 }
464
465 rp->nmissed = 0;
466 /* Establish function entry probe point */
467 if ((ret = register_kprobe(&rp->kp)) != 0)
468 free_rp_inst(rp);
469 return ret;
470}
471
472#else /* ARCH_SUPPORTS_KRETPROBES */
473
474int register_kretprobe(struct kretprobe *rp)
475{
476 return -ENOSYS;
477}
478
479#endif /* ARCH_SUPPORTS_KRETPROBES */
480
481void unregister_kretprobe(struct kretprobe *rp)
482{
483 unsigned long flags;
484 struct kretprobe_instance *ri;
485
486 unregister_kprobe(&rp->kp);
487 /* No race here */
488 spin_lock_irqsave(&kprobe_lock, flags);
489 free_rp_inst(rp);
490 while ((ri = get_used_rp_inst(rp)) != NULL) {
491 ri->rp = NULL;
492 hlist_del(&ri->uflist);
493 }
494 spin_unlock_irqrestore(&kprobe_lock, flags);
495}
496
260static int __init init_kprobes(void) 497static int __init init_kprobes(void)
261{ 498{
262 int i, err = 0; 499 int i, err = 0;
263 500
264 /* FIXME allocate the probe table, currently defined statically */ 501 /* FIXME allocate the probe table, currently defined statically */
265 /* initialize all list heads */ 502 /* initialize all list heads */
266 for (i = 0; i < KPROBE_TABLE_SIZE; i++) 503 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
267 INIT_HLIST_HEAD(&kprobe_table[i]); 504 INIT_HLIST_HEAD(&kprobe_table[i]);
505 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
506 }
268 507
269 err = register_die_notifier(&kprobe_exceptions_nb); 508 err = register_die_notifier(&kprobe_exceptions_nb);
509 /* Register the trampoline probe for return probe */
510 register_kprobe(&trampoline_p);
270 return err; 511 return err;
271} 512}
272 513
@@ -277,3 +518,6 @@ EXPORT_SYMBOL_GPL(unregister_kprobe);
277EXPORT_SYMBOL_GPL(register_jprobe); 518EXPORT_SYMBOL_GPL(register_jprobe);
278EXPORT_SYMBOL_GPL(unregister_jprobe); 519EXPORT_SYMBOL_GPL(unregister_jprobe);
279EXPORT_SYMBOL_GPL(jprobe_return); 520EXPORT_SYMBOL_GPL(jprobe_return);
521EXPORT_SYMBOL_GPL(register_kretprobe);
522EXPORT_SYMBOL_GPL(unregister_kretprobe);
523