aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c59
1 files changed, 50 insertions, 9 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1fbf466a29aa..3f57dfdc8f92 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -47,11 +47,17 @@
47 47
48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
50static atomic_t kprobe_count;
50 51
51DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 52DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
52DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 53DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
53static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 54static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
54 55
56static struct notifier_block kprobe_page_fault_nb = {
57 .notifier_call = kprobe_exceptions_notify,
58 .priority = 0x7fffffff /* we need to notified first */
59};
60
55#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 61#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
56/* 62/*
57 * kprobe->ainsn.insn points to the copy of the instruction to be 63 * kprobe->ainsn.insn points to the copy of the instruction to be
@@ -368,16 +374,15 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
368*/ 374*/
369static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 375static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
370{ 376{
371 struct kprobe *kp;
372
373 if (p->break_handler) { 377 if (p->break_handler) {
374 list_for_each_entry_rcu(kp, &old_p->list, list) { 378 if (old_p->break_handler)
375 if (kp->break_handler) 379 return -EEXIST;
376 return -EEXIST;
377 }
378 list_add_tail_rcu(&p->list, &old_p->list); 380 list_add_tail_rcu(&p->list, &old_p->list);
381 old_p->break_handler = aggr_break_handler;
379 } else 382 } else
380 list_add_rcu(&p->list, &old_p->list); 383 list_add_rcu(&p->list, &old_p->list);
384 if (p->post_handler && !old_p->post_handler)
385 old_p->post_handler = aggr_post_handler;
381 return 0; 386 return 0;
382} 387}
383 388
@@ -388,11 +393,14 @@ static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
388static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 393static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
389{ 394{
390 copy_kprobe(p, ap); 395 copy_kprobe(p, ap);
396 flush_insn_slot(ap);
391 ap->addr = p->addr; 397 ap->addr = p->addr;
392 ap->pre_handler = aggr_pre_handler; 398 ap->pre_handler = aggr_pre_handler;
393 ap->post_handler = aggr_post_handler;
394 ap->fault_handler = aggr_fault_handler; 399 ap->fault_handler = aggr_fault_handler;
395 ap->break_handler = aggr_break_handler; 400 if (p->post_handler)
401 ap->post_handler = aggr_post_handler;
402 if (p->break_handler)
403 ap->break_handler = aggr_break_handler;
396 404
397 INIT_LIST_HEAD(&ap->list); 405 INIT_LIST_HEAD(&ap->list);
398 list_add_rcu(&p->list, &ap->list); 406 list_add_rcu(&p->list, &ap->list);
@@ -464,6 +472,8 @@ static int __kprobes __register_kprobe(struct kprobe *p,
464 old_p = get_kprobe(p->addr); 472 old_p = get_kprobe(p->addr);
465 if (old_p) { 473 if (old_p) {
466 ret = register_aggr_kprobe(old_p, p); 474 ret = register_aggr_kprobe(old_p, p);
475 if (!ret)
476 atomic_inc(&kprobe_count);
467 goto out; 477 goto out;
468 } 478 }
469 479
@@ -474,6 +484,10 @@ static int __kprobes __register_kprobe(struct kprobe *p,
474 hlist_add_head_rcu(&p->hlist, 484 hlist_add_head_rcu(&p->hlist,
475 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 485 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
476 486
487 if (atomic_add_return(1, &kprobe_count) == \
488 (ARCH_INACTIVE_KPROBE_COUNT + 1))
489 register_page_fault_notifier(&kprobe_page_fault_nb);
490
477 arch_arm_kprobe(p); 491 arch_arm_kprobe(p);
478 492
479out: 493out:
@@ -536,14 +550,40 @@ valid_p:
536 kfree(old_p); 550 kfree(old_p);
537 } 551 }
538 arch_remove_kprobe(p); 552 arch_remove_kprobe(p);
553 } else {
554 mutex_lock(&kprobe_mutex);
555 if (p->break_handler)
556 old_p->break_handler = NULL;
557 if (p->post_handler){
558 list_for_each_entry_rcu(list_p, &old_p->list, list){
559 if (list_p->post_handler){
560 cleanup_p = 2;
561 break;
562 }
563 }
564 if (cleanup_p == 0)
565 old_p->post_handler = NULL;
566 }
567 mutex_unlock(&kprobe_mutex);
539 } 568 }
569
570 /* Call unregister_page_fault_notifier()
571 * if no probes are active
572 */
573 mutex_lock(&kprobe_mutex);
574 if (atomic_add_return(-1, &kprobe_count) == \
575 ARCH_INACTIVE_KPROBE_COUNT)
576 unregister_page_fault_notifier(&kprobe_page_fault_nb);
577 mutex_unlock(&kprobe_mutex);
578 return;
540} 579}
541 580
542static struct notifier_block kprobe_exceptions_nb = { 581static struct notifier_block kprobe_exceptions_nb = {
543 .notifier_call = kprobe_exceptions_notify, 582 .notifier_call = kprobe_exceptions_notify,
544 .priority = 0x7fffffff /* we need to notified first */ 583 .priority = 0x7fffffff /* we need to be notified first */
545}; 584};
546 585
586
547int __kprobes register_jprobe(struct jprobe *jp) 587int __kprobes register_jprobe(struct jprobe *jp)
548{ 588{
549 /* Todo: Verify probepoint is a function entry point */ 589 /* Todo: Verify probepoint is a function entry point */
@@ -652,6 +692,7 @@ static int __init init_kprobes(void)
652 INIT_HLIST_HEAD(&kprobe_table[i]); 692 INIT_HLIST_HEAD(&kprobe_table[i]);
653 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 693 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
654 } 694 }
695 atomic_set(&kprobe_count, 0);
655 696
656 err = arch_init_kprobes(); 697 err = arch_init_kprobes();
657 if (!err) 698 if (!err)