aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c360
1 files changed, 337 insertions, 23 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 037142b72a49..90c0e82b650c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -27,12 +27,16 @@
27 * interface to access function arguments. 27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list. 29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
30 */ 33 */
31#include <linux/kprobes.h> 34#include <linux/kprobes.h>
32#include <linux/spinlock.h> 35#include <linux/spinlock.h>
33#include <linux/hash.h> 36#include <linux/hash.h>
34#include <linux/init.h> 37#include <linux/init.h>
35#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleloader.h>
36#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
37#include <asm/errno.h> 41#include <asm/errno.h>
38#include <asm/kdebug.h> 42#include <asm/kdebug.h>
@@ -41,11 +45,112 @@
41#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 45#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
42 46
43static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 47static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
48static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
44 49
45unsigned int kprobe_cpu = NR_CPUS; 50unsigned int kprobe_cpu = NR_CPUS;
46static DEFINE_SPINLOCK(kprobe_lock); 51static DEFINE_SPINLOCK(kprobe_lock);
47static struct kprobe *curr_kprobe; 52static struct kprobe *curr_kprobe;
48 53
54/*
55 * kprobe->ainsn.insn points to the copy of the instruction to be
56 * single-stepped. x86_64, POWER4 and above have no-exec support and
57 * stepping on the instruction on a vmalloced/kmalloced/data page
58 * is a recipe for disaster
59 */
60#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
61
62struct kprobe_insn_page {
63 struct hlist_node hlist;
64 kprobe_opcode_t *insns; /* Page of instruction slots */
65 char slot_used[INSNS_PER_PAGE];
66 int nused;
67};
68
69static struct hlist_head kprobe_insn_pages;
70
71/**
72 * get_insn_slot() - Find a slot on an executable page for an instruction.
73 * We allocate an executable page if there's no room on existing ones.
74 */
75kprobe_opcode_t *get_insn_slot(void)
76{
77 struct kprobe_insn_page *kip;
78 struct hlist_node *pos;
79
80 hlist_for_each(pos, &kprobe_insn_pages) {
81 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
82 if (kip->nused < INSNS_PER_PAGE) {
83 int i;
84 for (i = 0; i < INSNS_PER_PAGE; i++) {
85 if (!kip->slot_used[i]) {
86 kip->slot_used[i] = 1;
87 kip->nused++;
88 return kip->insns + (i * MAX_INSN_SIZE);
89 }
90 }
91 /* Surprise! No unused slots. Fix kip->nused. */
92 kip->nused = INSNS_PER_PAGE;
93 }
94 }
95
96 /* All out of space. Need to allocate a new page. Use slot 0.*/
97 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
98 if (!kip) {
99 return NULL;
100 }
101
102 /*
103 * Use module_alloc so this page is within +/- 2GB of where the
104 * kernel image and loaded module images reside. This is required
105 * so x86_64 can correctly handle the %rip-relative fixups.
106 */
107 kip->insns = module_alloc(PAGE_SIZE);
108 if (!kip->insns) {
109 kfree(kip);
110 return NULL;
111 }
112 INIT_HLIST_NODE(&kip->hlist);
113 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
114 memset(kip->slot_used, 0, INSNS_PER_PAGE);
115 kip->slot_used[0] = 1;
116 kip->nused = 1;
117 return kip->insns;
118}
119
120void free_insn_slot(kprobe_opcode_t *slot)
121{
122 struct kprobe_insn_page *kip;
123 struct hlist_node *pos;
124
125 hlist_for_each(pos, &kprobe_insn_pages) {
126 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
127 if (kip->insns <= slot &&
128 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
129 int i = (slot - kip->insns) / MAX_INSN_SIZE;
130 kip->slot_used[i] = 0;
131 kip->nused--;
132 if (kip->nused == 0) {
133 /*
134 * Page is no longer in use. Free it unless
135 * it's the last one. We keep the last one
136 * so as not to have to set it up again the
137 * next time somebody inserts a probe.
138 */
139 hlist_del(&kip->hlist);
140 if (hlist_empty(&kprobe_insn_pages)) {
141 INIT_HLIST_NODE(&kip->hlist);
142 hlist_add_head(&kip->hlist,
143 &kprobe_insn_pages);
144 } else {
145 module_free(NULL, kip->insns);
146 kfree(kip);
147 }
148 }
149 return;
150 }
151 }
152}
153
49/* Locks kprobe: irqs must be disabled */ 154/* Locks kprobe: irqs must be disabled */
50void lock_kprobes(void) 155void lock_kprobes(void)
51{ 156{
@@ -78,22 +183,23 @@ struct kprobe *get_kprobe(void *addr)
78 * Aggregate handlers for multiple kprobes support - these handlers 183 * Aggregate handlers for multiple kprobes support - these handlers
79 * take care of invoking the individual kprobe handlers on p->list 184 * take care of invoking the individual kprobe handlers on p->list
80 */ 185 */
81int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 186static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
82{ 187{
83 struct kprobe *kp; 188 struct kprobe *kp;
84 189
85 list_for_each_entry(kp, &p->list, list) { 190 list_for_each_entry(kp, &p->list, list) {
86 if (kp->pre_handler) { 191 if (kp->pre_handler) {
87 curr_kprobe = kp; 192 curr_kprobe = kp;
88 kp->pre_handler(kp, regs); 193 if (kp->pre_handler(kp, regs))
89 curr_kprobe = NULL; 194 return 1;
90 } 195 }
196 curr_kprobe = NULL;
91 } 197 }
92 return 0; 198 return 0;
93} 199}
94 200
95void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 201static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
96 unsigned long flags) 202 unsigned long flags)
97{ 203{
98 struct kprobe *kp; 204 struct kprobe *kp;
99 205
@@ -107,7 +213,8 @@ void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
107 return; 213 return;
108} 214}
109 215
110int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) 216static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
217 int trapnr)
111{ 218{
112 /* 219 /*
113 * if we faulted "during" the execution of a user specified 220 * if we faulted "during" the execution of a user specified
@@ -120,19 +227,159 @@ int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
120 return 0; 227 return 0;
121} 228}
122 229
230static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
231{
232 struct kprobe *kp = curr_kprobe;
233 if (curr_kprobe && kp->break_handler) {
234 if (kp->break_handler(kp, regs)) {
235 curr_kprobe = NULL;
236 return 1;
237 }
238 }
239 curr_kprobe = NULL;
240 return 0;
241}
242
243struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
244{
245 struct hlist_node *node;
246 struct kretprobe_instance *ri;
247 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
248 return ri;
249 return NULL;
250}
251
252static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
253{
254 struct hlist_node *node;
255 struct kretprobe_instance *ri;
256 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
257 return ri;
258 return NULL;
259}
260
261void add_rp_inst(struct kretprobe_instance *ri)
262{
263 /*
264 * Remove rp inst off the free list -
265 * Add it back when probed function returns
266 */
267 hlist_del(&ri->uflist);
268
269 /* Add rp inst onto table */
270 INIT_HLIST_NODE(&ri->hlist);
271 hlist_add_head(&ri->hlist,
272 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
273
274 /* Also add this rp inst to the used list. */
275 INIT_HLIST_NODE(&ri->uflist);
276 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
277}
278
279void recycle_rp_inst(struct kretprobe_instance *ri)
280{
281 /* remove rp inst off the rprobe_inst_table */
282 hlist_del(&ri->hlist);
283 if (ri->rp) {
284 /* remove rp inst off the used list */
285 hlist_del(&ri->uflist);
286 /* put rp inst back onto the free list */
287 INIT_HLIST_NODE(&ri->uflist);
288 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
289 } else
290 /* Unregistering */
291 kfree(ri);
292}
293
294struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
295{
296 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
297}
298
299/*
300 * This function is called from exit_thread or flush_thread when task tk's
301 * stack is being recycled so that we can recycle any function-return probe
302 * instances associated with this task. These left over instances represent
303 * probed functions that have been called but will never return.
304 */
305void kprobe_flush_task(struct task_struct *tk)
306{
307 struct kretprobe_instance *ri;
308 struct hlist_head *head;
309 struct hlist_node *node, *tmp;
310 unsigned long flags = 0;
311
312 spin_lock_irqsave(&kprobe_lock, flags);
313 head = kretprobe_inst_table_head(current);
314 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
315 if (ri->task == tk)
316 recycle_rp_inst(ri);
317 }
318 spin_unlock_irqrestore(&kprobe_lock, flags);
319}
320
321/*
322 * This kprobe pre_handler is registered with every kretprobe. When probe
323 * hits it will set up the return probe.
324 */
325static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
326{
327 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
328
329 /*TODO: consider to only swap the RA after the last pre_handler fired */
330 arch_prepare_kretprobe(rp, regs);
331 return 0;
332}
333
334static inline void free_rp_inst(struct kretprobe *rp)
335{
336 struct kretprobe_instance *ri;
337 while ((ri = get_free_rp_inst(rp)) != NULL) {
338 hlist_del(&ri->uflist);
339 kfree(ri);
340 }
341}
342
343/*
344 * Keep all fields in the kprobe consistent
345 */
346static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
347{
348 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
349 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
350}
351
352/*
353* Add the new probe to old_p->list. Fail if this is the
354* second jprobe at the address - two jprobes can't coexist
355*/
356static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
357{
358 struct kprobe *kp;
359
360 if (p->break_handler) {
361 list_for_each_entry(kp, &old_p->list, list) {
362 if (kp->break_handler)
363 return -EEXIST;
364 }
365 list_add_tail(&p->list, &old_p->list);
366 } else
367 list_add(&p->list, &old_p->list);
368 return 0;
369}
370
123/* 371/*
124 * Fill in the required fields of the "manager kprobe". Replace the 372 * Fill in the required fields of the "manager kprobe". Replace the
125 * earlier kprobe in the hlist with the manager kprobe 373 * earlier kprobe in the hlist with the manager kprobe
126 */ 374 */
127static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 375static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
128{ 376{
377 copy_kprobe(p, ap);
129 ap->addr = p->addr; 378 ap->addr = p->addr;
130 ap->opcode = p->opcode;
131 memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn));
132
133 ap->pre_handler = aggr_pre_handler; 379 ap->pre_handler = aggr_pre_handler;
134 ap->post_handler = aggr_post_handler; 380 ap->post_handler = aggr_post_handler;
135 ap->fault_handler = aggr_fault_handler; 381 ap->fault_handler = aggr_fault_handler;
382 ap->break_handler = aggr_break_handler;
136 383
137 INIT_LIST_HEAD(&ap->list); 384 INIT_LIST_HEAD(&ap->list);
138 list_add(&p->list, &ap->list); 385 list_add(&p->list, &ap->list);
@@ -153,16 +400,16 @@ static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
153 int ret = 0; 400 int ret = 0;
154 struct kprobe *ap; 401 struct kprobe *ap;
155 402
156 if (old_p->break_handler || p->break_handler) { 403 if (old_p->pre_handler == aggr_pre_handler) {
157 ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */ 404 copy_kprobe(old_p, p);
158 } else if (old_p->pre_handler == aggr_pre_handler) { 405 ret = add_new_kprobe(old_p, p);
159 list_add(&p->list, &old_p->list);
160 } else { 406 } else {
161 ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); 407 ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
162 if (!ap) 408 if (!ap)
163 return -ENOMEM; 409 return -ENOMEM;
164 add_aggr_kprobe(ap, old_p); 410 add_aggr_kprobe(ap, old_p);
165 list_add(&p->list, &ap->list); 411 copy_kprobe(ap, p);
412 ret = add_new_kprobe(ap, p);
166 } 413 }
167 return ret; 414 return ret;
168} 415}
@@ -170,10 +417,8 @@ static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
170/* kprobe removal house-keeping routines */ 417/* kprobe removal house-keeping routines */
171static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) 418static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
172{ 419{
173 *p->addr = p->opcode; 420 arch_disarm_kprobe(p);
174 hlist_del(&p->hlist); 421 hlist_del(&p->hlist);
175 flush_icache_range((unsigned long) p->addr,
176 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
177 spin_unlock_irqrestore(&kprobe_lock, flags); 422 spin_unlock_irqrestore(&kprobe_lock, flags);
178 arch_remove_kprobe(p); 423 arch_remove_kprobe(p);
179} 424}
@@ -200,6 +445,7 @@ int register_kprobe(struct kprobe *p)
200 } 445 }
201 spin_lock_irqsave(&kprobe_lock, flags); 446 spin_lock_irqsave(&kprobe_lock, flags);
202 old_p = get_kprobe(p->addr); 447 old_p = get_kprobe(p->addr);
448 p->nmissed = 0;
203 if (old_p) { 449 if (old_p) {
204 ret = register_aggr_kprobe(old_p, p); 450 ret = register_aggr_kprobe(old_p, p);
205 goto out; 451 goto out;
@@ -210,10 +456,8 @@ int register_kprobe(struct kprobe *p)
210 hlist_add_head(&p->hlist, 456 hlist_add_head(&p->hlist,
211 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 457 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
212 458
213 p->opcode = *p->addr; 459 arch_arm_kprobe(p);
214 *p->addr = BREAKPOINT_INSTRUCTION; 460
215 flush_icache_range((unsigned long) p->addr,
216 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
217out: 461out:
218 spin_unlock_irqrestore(&kprobe_lock, flags); 462 spin_unlock_irqrestore(&kprobe_lock, flags);
219rm_kprobe: 463rm_kprobe:
@@ -257,16 +501,83 @@ void unregister_jprobe(struct jprobe *jp)
257 unregister_kprobe(&jp->kp); 501 unregister_kprobe(&jp->kp);
258} 502}
259 503
504#ifdef ARCH_SUPPORTS_KRETPROBES
505
506int register_kretprobe(struct kretprobe *rp)
507{
508 int ret = 0;
509 struct kretprobe_instance *inst;
510 int i;
511
512 rp->kp.pre_handler = pre_handler_kretprobe;
513
514 /* Pre-allocate memory for max kretprobe instances */
515 if (rp->maxactive <= 0) {
516#ifdef CONFIG_PREEMPT
517 rp->maxactive = max(10, 2 * NR_CPUS);
518#else
519 rp->maxactive = NR_CPUS;
520#endif
521 }
522 INIT_HLIST_HEAD(&rp->used_instances);
523 INIT_HLIST_HEAD(&rp->free_instances);
524 for (i = 0; i < rp->maxactive; i++) {
525 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
526 if (inst == NULL) {
527 free_rp_inst(rp);
528 return -ENOMEM;
529 }
530 INIT_HLIST_NODE(&inst->uflist);
531 hlist_add_head(&inst->uflist, &rp->free_instances);
532 }
533
534 rp->nmissed = 0;
535 /* Establish function entry probe point */
536 if ((ret = register_kprobe(&rp->kp)) != 0)
537 free_rp_inst(rp);
538 return ret;
539}
540
541#else /* ARCH_SUPPORTS_KRETPROBES */
542
543int register_kretprobe(struct kretprobe *rp)
544{
545 return -ENOSYS;
546}
547
548#endif /* ARCH_SUPPORTS_KRETPROBES */
549
550void unregister_kretprobe(struct kretprobe *rp)
551{
552 unsigned long flags;
553 struct kretprobe_instance *ri;
554
555 unregister_kprobe(&rp->kp);
556 /* No race here */
557 spin_lock_irqsave(&kprobe_lock, flags);
558 free_rp_inst(rp);
559 while ((ri = get_used_rp_inst(rp)) != NULL) {
560 ri->rp = NULL;
561 hlist_del(&ri->uflist);
562 }
563 spin_unlock_irqrestore(&kprobe_lock, flags);
564}
565
260static int __init init_kprobes(void) 566static int __init init_kprobes(void)
261{ 567{
262 int i, err = 0; 568 int i, err = 0;
263 569
264 /* FIXME allocate the probe table, currently defined statically */ 570 /* FIXME allocate the probe table, currently defined statically */
265 /* initialize all list heads */ 571 /* initialize all list heads */
266 for (i = 0; i < KPROBE_TABLE_SIZE; i++) 572 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
267 INIT_HLIST_HEAD(&kprobe_table[i]); 573 INIT_HLIST_HEAD(&kprobe_table[i]);
574 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
575 }
576
577 err = arch_init();
578 if (!err)
579 err = register_die_notifier(&kprobe_exceptions_nb);
268 580
269 err = register_die_notifier(&kprobe_exceptions_nb);
270 return err; 581 return err;
271} 582}
272 583
@@ -277,3 +588,6 @@ EXPORT_SYMBOL_GPL(unregister_kprobe);
277EXPORT_SYMBOL_GPL(register_jprobe); 588EXPORT_SYMBOL_GPL(register_jprobe);
278EXPORT_SYMBOL_GPL(unregister_jprobe); 589EXPORT_SYMBOL_GPL(unregister_jprobe);
279EXPORT_SYMBOL_GPL(jprobe_return); 590EXPORT_SYMBOL_GPL(jprobe_return);
591EXPORT_SYMBOL_GPL(register_kretprobe);
592EXPORT_SYMBOL_GPL(unregister_kretprobe);
593