diff options
author | Srinivasa Ds <srinivasa@in.ibm.com> | 2008-04-28 05:14:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:32 -0400 |
commit | 3d8d996e0ca5b4093203d3f050b0f70b5c949ae8 (patch) | |
tree | 2b19d3a47bf723c3bf6ff7c8a0d90078feaee08c /kernel | |
parent | 0341a4d0fdd2a0a3d9e2bb3a9afef9f8292c8502 (diff) |
kprobes: prevent probing of preempt_schedule()
Prohibit users from probing preempt_schedule(). One way of prohibiting the
user from probing functions is by marking such functions with __kprobes. But
this method doesn't work for those functions, which are already marked to
different section like preempt_schedule() (belongs to __sched section). So we
use blacklist approach to refuse user from probing these functions.
In blacklist approach we populate the blacklisted function's starting address
and its size in kprobe_blacklist structure. Then we verify the user specified
address against start and end of the blacklisted function. So any attempt to
register probe on blacklisted functions will be rejected.
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Srinivasa DS <srinivasa@in.ibm.com>
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Jim Keniston <jkenisto@us.ibm.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kprobes.c | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index fcfb580c3afc..f02a4311768b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -72,6 +72,18 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | |||
72 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ | 72 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
74 | 74 | ||
75 | /* | ||
76 | * Normally, functions that we'd want to prohibit kprobes in, are marked | ||
77 | * __kprobes. But, there are cases where such functions already belong to | ||
78 | * a different section (__sched for preempt_schedule) | ||
79 | * | ||
80 | * For such cases, we now have a blacklist | ||
81 | */ | ||
82 | struct kprobe_blackpoint kprobe_blacklist[] = { | ||
83 | {"preempt_schedule",}, | ||
84 | {NULL} /* Terminator */ | ||
85 | }; | ||
86 | |||
75 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT | 87 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
76 | /* | 88 | /* |
77 | * kprobe->ainsn.insn points to the copy of the instruction to be | 89 | * kprobe->ainsn.insn points to the copy of the instruction to be |
@@ -492,9 +504,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
492 | 504 | ||
493 | static int __kprobes in_kprobes_functions(unsigned long addr) | 505 | static int __kprobes in_kprobes_functions(unsigned long addr) |
494 | { | 506 | { |
507 | struct kprobe_blackpoint *kb; | ||
508 | |||
495 | if (addr >= (unsigned long)__kprobes_text_start && | 509 | if (addr >= (unsigned long)__kprobes_text_start && |
496 | addr < (unsigned long)__kprobes_text_end) | 510 | addr < (unsigned long)__kprobes_text_end) |
497 | return -EINVAL; | 511 | return -EINVAL; |
512 | /* | ||
513 | * If there exists a kprobe_blacklist, verify and | ||
514 | * fail any probe registration in the prohibited area | ||
515 | */ | ||
516 | for (kb = kprobe_blacklist; kb->name != NULL; kb++) { | ||
517 | if (kb->start_addr) { | ||
518 | if (addr >= kb->start_addr && | ||
519 | addr < (kb->start_addr + kb->range)) | ||
520 | return -EINVAL; | ||
521 | } | ||
522 | } | ||
498 | return 0; | 523 | return 0; |
499 | } | 524 | } |
500 | 525 | ||
@@ -811,6 +836,11 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp) | |||
811 | static int __init init_kprobes(void) | 836 | static int __init init_kprobes(void) |
812 | { | 837 | { |
813 | int i, err = 0; | 838 | int i, err = 0; |
839 | unsigned long offset = 0, size = 0; | ||
840 | char *modname, namebuf[128]; | ||
841 | const char *symbol_name; | ||
842 | void *addr; | ||
843 | struct kprobe_blackpoint *kb; | ||
814 | 844 | ||
815 | /* FIXME allocate the probe table, currently defined statically */ | 845 | /* FIXME allocate the probe table, currently defined statically */ |
816 | /* initialize all list heads */ | 846 | /* initialize all list heads */ |
@@ -819,6 +849,28 @@ static int __init init_kprobes(void) | |||
819 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); | 849 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
820 | } | 850 | } |
821 | 851 | ||
852 | /* | ||
853 | * Lookup and populate the kprobe_blacklist. | ||
854 | * | ||
855 | * Unlike the kretprobe blacklist, we'll need to determine | ||
856 | * the range of addresses that belong to the said functions, | ||
857 | * since a kprobe need not necessarily be at the beginning | ||
858 | * of a function. | ||
859 | */ | ||
860 | for (kb = kprobe_blacklist; kb->name != NULL; kb++) { | ||
861 | kprobe_lookup_name(kb->name, addr); | ||
862 | if (!addr) | ||
863 | continue; | ||
864 | |||
865 | kb->start_addr = (unsigned long)addr; | ||
866 | symbol_name = kallsyms_lookup(kb->start_addr, | ||
867 | &size, &offset, &modname, namebuf); | ||
868 | if (!symbol_name) | ||
869 | kb->range = 0; | ||
870 | else | ||
871 | kb->range = size; | ||
872 | } | ||
873 | |||
822 | if (kretprobe_blacklist_size) { | 874 | if (kretprobe_blacklist_size) { |
823 | /* lookup the function address from its name */ | 875 | /* lookup the function address from its name */ |
824 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { | 876 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { |