diff options
author | Christoph Hellwig <hch@lst.de> | 2007-05-08 03:34:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 14:15:19 -0400 |
commit | 6f716acd5fa20ae6a35ab29ae37fa9189e839ed5 (patch) | |
tree | abefdcd4a24de1879d1bafcf4c827d10db561bc8 /kernel/kprobes.c | |
parent | b0bb501651b467096723dcfcf4565d910a2aadf8 (diff) |
kprobes: codingstyle cleanups
Remove superflous braces and fix indentation aswell as comments.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 55 |
1 files changed, 30 insertions, 25 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 993452324a1f..22857003a65b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -133,7 +133,7 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) | |||
133 | struct kprobe_insn_page *kip; | 133 | struct kprobe_insn_page *kip; |
134 | struct hlist_node *pos; | 134 | struct hlist_node *pos; |
135 | 135 | ||
136 | retry: | 136 | retry: |
137 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { | 137 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { |
138 | if (kip->nused < INSNS_PER_PAGE) { | 138 | if (kip->nused < INSNS_PER_PAGE) { |
139 | int i; | 139 | int i; |
@@ -155,9 +155,8 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) | |||
155 | } | 155 | } |
156 | /* All out of space. Need to allocate a new page. Use slot 0. */ | 156 | /* All out of space. Need to allocate a new page. Use slot 0. */ |
157 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); | 157 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); |
158 | if (!kip) { | 158 | if (!kip) |
159 | return NULL; | 159 | return NULL; |
160 | } | ||
161 | 160 | ||
162 | /* | 161 | /* |
163 | * Use module_alloc so this page is within +/- 2GB of where the | 162 | * Use module_alloc so this page is within +/- 2GB of where the |
@@ -246,9 +245,9 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | |||
246 | break; | 245 | break; |
247 | } | 246 | } |
248 | } | 247 | } |
249 | if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) { | 248 | |
249 | if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) | ||
250 | collect_garbage_slots(); | 250 | collect_garbage_slots(); |
251 | } | ||
252 | } | 251 | } |
253 | #endif | 252 | #endif |
254 | 253 | ||
@@ -314,7 +313,6 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
314 | reset_kprobe_instance(); | 313 | reset_kprobe_instance(); |
315 | } | 314 | } |
316 | } | 315 | } |
317 | return; | ||
318 | } | 316 | } |
319 | 317 | ||
320 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | 318 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
@@ -533,8 +531,8 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
533 | 531 | ||
534 | static int __kprobes in_kprobes_functions(unsigned long addr) | 532 | static int __kprobes in_kprobes_functions(unsigned long addr) |
535 | { | 533 | { |
536 | if (addr >= (unsigned long)__kprobes_text_start | 534 | if (addr >= (unsigned long)__kprobes_text_start && |
537 | && addr < (unsigned long)__kprobes_text_end) | 535 | addr < (unsigned long)__kprobes_text_end) |
538 | return -EINVAL; | 536 | return -EINVAL; |
539 | return 0; | 537 | return 0; |
540 | } | 538 | } |
@@ -561,19 +559,24 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
561 | return -EINVAL; | 559 | return -EINVAL; |
562 | p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset); | 560 | p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset); |
563 | 561 | ||
564 | if ((!kernel_text_address((unsigned long) p->addr)) || | 562 | if (!kernel_text_address((unsigned long) p->addr) || |
565 | in_kprobes_functions((unsigned long) p->addr)) | 563 | in_kprobes_functions((unsigned long) p->addr)) |
566 | return -EINVAL; | 564 | return -EINVAL; |
567 | 565 | ||
568 | p->mod_refcounted = 0; | 566 | p->mod_refcounted = 0; |
569 | /* Check are we probing a module */ | 567 | |
570 | if ((probed_mod = module_text_address((unsigned long) p->addr))) { | 568 | /* |
569 | * Check if are we probing a module. | ||
570 | */ | ||
571 | probed_mod = module_text_address((unsigned long) p->addr); | ||
572 | if (probed_mod) { | ||
571 | struct module *calling_mod = module_text_address(called_from); | 573 | struct module *calling_mod = module_text_address(called_from); |
572 | /* We must allow modules to probe themself and | 574 | /* |
573 | * in this case avoid incrementing the module refcount, | 575 | * We must allow modules to probe themself and in this case |
574 | * so as to allow unloading of self probing modules. | 576 | * avoid incrementing the module refcount, so as to allow |
577 | * unloading of self probing modules. | ||
575 | */ | 578 | */ |
576 | if (calling_mod && (calling_mod != probed_mod)) { | 579 | if (calling_mod && calling_mod != probed_mod) { |
577 | if (unlikely(!try_module_get(probed_mod))) | 580 | if (unlikely(!try_module_get(probed_mod))) |
578 | return -EINVAL; | 581 | return -EINVAL; |
579 | p->mod_refcounted = 1; | 582 | p->mod_refcounted = 1; |
@@ -591,7 +594,8 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
591 | goto out; | 594 | goto out; |
592 | } | 595 | } |
593 | 596 | ||
594 | if ((ret = arch_prepare_kprobe(p)) != 0) | 597 | ret = arch_prepare_kprobe(p); |
598 | if (ret) | ||
595 | goto out; | 599 | goto out; |
596 | 600 | ||
597 | INIT_HLIST_NODE(&p->hlist); | 601 | INIT_HLIST_NODE(&p->hlist); |
@@ -614,8 +618,7 @@ out: | |||
614 | 618 | ||
615 | int __kprobes register_kprobe(struct kprobe *p) | 619 | int __kprobes register_kprobe(struct kprobe *p) |
616 | { | 620 | { |
617 | return __register_kprobe(p, | 621 | return __register_kprobe(p, (unsigned long)__builtin_return_address(0)); |
618 | (unsigned long)__builtin_return_address(0)); | ||
619 | } | 622 | } |
620 | 623 | ||
621 | void __kprobes unregister_kprobe(struct kprobe *p) | 624 | void __kprobes unregister_kprobe(struct kprobe *p) |
@@ -639,9 +642,9 @@ void __kprobes unregister_kprobe(struct kprobe *p) | |||
639 | return; | 642 | return; |
640 | } | 643 | } |
641 | valid_p: | 644 | valid_p: |
642 | if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && | 645 | if (old_p == p || |
643 | (p->list.next == &old_p->list) && | 646 | (old_p->pre_handler == aggr_pre_handler && |
644 | (p->list.prev == &old_p->list))) { | 647 | p->list.next == &old_p->list && p->list.prev == &old_p->list)) { |
645 | /* Only probe on the hash list */ | 648 | /* Only probe on the hash list */ |
646 | arch_disarm_kprobe(p); | 649 | arch_disarm_kprobe(p); |
647 | hlist_del_rcu(&old_p->hlist); | 650 | hlist_del_rcu(&old_p->hlist); |
@@ -654,9 +657,11 @@ valid_p: | |||
654 | mutex_unlock(&kprobe_mutex); | 657 | mutex_unlock(&kprobe_mutex); |
655 | 658 | ||
656 | synchronize_sched(); | 659 | synchronize_sched(); |
657 | if (p->mod_refcounted && | 660 | if (p->mod_refcounted) { |
658 | (mod = module_text_address((unsigned long)p->addr))) | 661 | mod = module_text_address((unsigned long)p->addr); |
659 | module_put(mod); | 662 | if (mod) |
663 | module_put(mod); | ||
664 | } | ||
660 | 665 | ||
661 | if (cleanup_p) { | 666 | if (cleanup_p) { |
662 | if (p != old_p) { | 667 | if (p != old_p) { |