aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-10-16 04:24:07 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:50 -0400
commit74a0b5762713a26496db72eac34fbbed46f20fce (patch)
tree4a14df7c07ebc16283454f33713519a0e10b5c43 /kernel
parentd5a7430ddcdb598261d70f7eb1bf450b5be52085 (diff)
x86: optimize page faults like all other achitectures and kill notifier cruft
x86(-64) are the last architectures still using the page fault notifier cruft for the kprobes page fault hook. This patch converts them to the proper direct calls, and removes the now unused pagefault notifier bits aswell as the cruft in kprobes.c that was related to this mess. I know Andi didn't really like this, but all other architecture maintainers agreed the direct calls are much better and besides the obvious cruft removal a common way of dealing with kprobes across architectures is important aswell. [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc64] Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Andi Kleen <ak@suse.de> Cc: <linux-arch@vger.kernel.org> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kprobes.c39
1 files changed, 3 insertions, 36 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 4b8a4493c541..f9798ff7899f 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -64,7 +64,6 @@
64 64
65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67static atomic_t kprobe_count;
68 67
69/* NOTE: change this value only with kprobe_mutex held */ 68/* NOTE: change this value only with kprobe_mutex held */
70static bool kprobe_enabled; 69static bool kprobe_enabled;
@@ -73,11 +72,6 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
73DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 72DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75 74
76static struct notifier_block kprobe_page_fault_nb = {
77 .notifier_call = kprobe_exceptions_notify,
78 .priority = 0x7fffffff /* we need to notified first */
79};
80
81#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 75#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
82/* 76/*
83 * kprobe->ainsn.insn points to the copy of the instruction to be 77 * kprobe->ainsn.insn points to the copy of the instruction to be
@@ -556,8 +550,6 @@ static int __kprobes __register_kprobe(struct kprobe *p,
556 old_p = get_kprobe(p->addr); 550 old_p = get_kprobe(p->addr);
557 if (old_p) { 551 if (old_p) {
558 ret = register_aggr_kprobe(old_p, p); 552 ret = register_aggr_kprobe(old_p, p);
559 if (!ret)
560 atomic_inc(&kprobe_count);
561 goto out; 553 goto out;
562 } 554 }
563 555
@@ -569,13 +561,9 @@ static int __kprobes __register_kprobe(struct kprobe *p,
569 hlist_add_head_rcu(&p->hlist, 561 hlist_add_head_rcu(&p->hlist,
570 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 562 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
571 563
572 if (kprobe_enabled) { 564 if (kprobe_enabled)
573 if (atomic_add_return(1, &kprobe_count) == \
574 (ARCH_INACTIVE_KPROBE_COUNT + 1))
575 register_page_fault_notifier(&kprobe_page_fault_nb);
576
577 arch_arm_kprobe(p); 565 arch_arm_kprobe(p);
578 } 566
579out: 567out:
580 mutex_unlock(&kprobe_mutex); 568 mutex_unlock(&kprobe_mutex);
581 569
@@ -658,16 +646,6 @@ valid_p:
658 } 646 }
659 mutex_unlock(&kprobe_mutex); 647 mutex_unlock(&kprobe_mutex);
660 } 648 }
661
662 /* Call unregister_page_fault_notifier()
663 * if no probes are active
664 */
665 mutex_lock(&kprobe_mutex);
666 if (atomic_add_return(-1, &kprobe_count) == \
667 ARCH_INACTIVE_KPROBE_COUNT)
668 unregister_page_fault_notifier(&kprobe_page_fault_nb);
669 mutex_unlock(&kprobe_mutex);
670 return;
671} 649}
672 650
673static struct notifier_block kprobe_exceptions_nb = { 651static struct notifier_block kprobe_exceptions_nb = {
@@ -815,7 +793,6 @@ static int __init init_kprobes(void)
815 INIT_HLIST_HEAD(&kprobe_table[i]); 793 INIT_HLIST_HEAD(&kprobe_table[i]);
816 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 794 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
817 } 795 }
818 atomic_set(&kprobe_count, 0);
819 796
820 /* By default, kprobes are enabled */ 797 /* By default, kprobes are enabled */
821 kprobe_enabled = true; 798 kprobe_enabled = true;
@@ -921,13 +898,6 @@ static void __kprobes enable_all_kprobes(void)
921 if (kprobe_enabled) 898 if (kprobe_enabled)
922 goto already_enabled; 899 goto already_enabled;
923 900
924 /*
925 * Re-register the page fault notifier only if there are any
926 * active probes at the time of enabling kprobes globally
927 */
928 if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
929 register_page_fault_notifier(&kprobe_page_fault_nb);
930
931 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 901 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
932 head = &kprobe_table[i]; 902 head = &kprobe_table[i];
933 hlist_for_each_entry_rcu(p, node, head, hlist) 903 hlist_for_each_entry_rcu(p, node, head, hlist)
@@ -968,10 +938,7 @@ static void __kprobes disable_all_kprobes(void)
968 mutex_unlock(&kprobe_mutex); 938 mutex_unlock(&kprobe_mutex);
969 /* Allow all currently running kprobes to complete */ 939 /* Allow all currently running kprobes to complete */
970 synchronize_sched(); 940 synchronize_sched();
971 941 return;
972 mutex_lock(&kprobe_mutex);
973 /* Unconditionally unregister the page_fault notifier */
974 unregister_page_fault_notifier(&kprobe_page_fault_nb);
975 942
976already_disabled: 943already_disabled:
977 mutex_unlock(&kprobe_mutex); 944 mutex_unlock(&kprobe_mutex);