diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 137 |
1 files changed, 79 insertions, 58 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3bb71e63a37e..3ea6325228da 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -48,10 +48,11 @@ | |||
48 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | 48 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
49 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | 49 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
50 | 50 | ||
51 | static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */ | 51 | DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
52 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ | 52 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
53 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 53 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
54 | 54 | ||
55 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT | ||
55 | /* | 56 | /* |
56 | * kprobe->ainsn.insn points to the copy of the instruction to be | 57 | * kprobe->ainsn.insn points to the copy of the instruction to be |
57 | * single-stepped. x86_64, POWER4 and above have no-exec support and | 58 | * single-stepped. x86_64, POWER4 and above have no-exec support and |
@@ -151,6 +152,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot) | |||
151 | } | 152 | } |
152 | } | 153 | } |
153 | } | 154 | } |
155 | #endif | ||
154 | 156 | ||
155 | /* We have preemption disabled.. so it is safe to use __ versions */ | 157 | /* We have preemption disabled.. so it is safe to use __ versions */ |
156 | static inline void set_kprobe_instance(struct kprobe *kp) | 158 | static inline void set_kprobe_instance(struct kprobe *kp) |
@@ -165,7 +167,7 @@ static inline void reset_kprobe_instance(void) | |||
165 | 167 | ||
166 | /* | 168 | /* |
167 | * This routine is called either: | 169 | * This routine is called either: |
168 | * - under the kprobe_lock spinlock - during kprobe_[un]register() | 170 | * - under the kprobe_mutex - during kprobe_[un]register() |
169 | * OR | 171 | * OR |
170 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c | 172 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c |
171 | */ | 173 | */ |
@@ -418,7 +420,6 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
418 | /* | 420 | /* |
419 | * This is the second or subsequent kprobe at the address - handle | 421 | * This is the second or subsequent kprobe at the address - handle |
420 | * the intricacies | 422 | * the intricacies |
421 | * TODO: Move kcalloc outside the spin_lock | ||
422 | */ | 423 | */ |
423 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | 424 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, |
424 | struct kprobe *p) | 425 | struct kprobe *p) |
@@ -430,7 +431,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
430 | copy_kprobe(old_p, p); | 431 | copy_kprobe(old_p, p); |
431 | ret = add_new_kprobe(old_p, p); | 432 | ret = add_new_kprobe(old_p, p); |
432 | } else { | 433 | } else { |
433 | ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); | 434 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
434 | if (!ap) | 435 | if (!ap) |
435 | return -ENOMEM; | 436 | return -ENOMEM; |
436 | add_aggr_kprobe(ap, old_p); | 437 | add_aggr_kprobe(ap, old_p); |
@@ -440,25 +441,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
440 | return ret; | 441 | return ret; |
441 | } | 442 | } |
442 | 443 | ||
443 | /* kprobe removal house-keeping routines */ | ||
444 | static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) | ||
445 | { | ||
446 | arch_disarm_kprobe(p); | ||
447 | hlist_del_rcu(&p->hlist); | ||
448 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
449 | arch_remove_kprobe(p); | ||
450 | } | ||
451 | |||
452 | static inline void cleanup_aggr_kprobe(struct kprobe *old_p, | ||
453 | struct kprobe *p, unsigned long flags) | ||
454 | { | ||
455 | list_del_rcu(&p->list); | ||
456 | if (list_empty(&old_p->list)) | ||
457 | cleanup_kprobe(old_p, flags); | ||
458 | else | ||
459 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
460 | } | ||
461 | |||
462 | static int __kprobes in_kprobes_functions(unsigned long addr) | 444 | static int __kprobes in_kprobes_functions(unsigned long addr) |
463 | { | 445 | { |
464 | if (addr >= (unsigned long)__kprobes_text_start | 446 | if (addr >= (unsigned long)__kprobes_text_start |
@@ -467,33 +449,44 @@ static int __kprobes in_kprobes_functions(unsigned long addr) | |||
467 | return 0; | 449 | return 0; |
468 | } | 450 | } |
469 | 451 | ||
470 | int __kprobes register_kprobe(struct kprobe *p) | 452 | static int __kprobes __register_kprobe(struct kprobe *p, |
453 | unsigned long called_from) | ||
471 | { | 454 | { |
472 | int ret = 0; | 455 | int ret = 0; |
473 | unsigned long flags = 0; | ||
474 | struct kprobe *old_p; | 456 | struct kprobe *old_p; |
475 | struct module *mod; | 457 | struct module *probed_mod; |
476 | 458 | ||
477 | if ((!kernel_text_address((unsigned long) p->addr)) || | 459 | if ((!kernel_text_address((unsigned long) p->addr)) || |
478 | in_kprobes_functions((unsigned long) p->addr)) | 460 | in_kprobes_functions((unsigned long) p->addr)) |
479 | return -EINVAL; | 461 | return -EINVAL; |
480 | 462 | ||
481 | if ((mod = module_text_address((unsigned long) p->addr)) && | 463 | p->mod_refcounted = 0; |
482 | (unlikely(!try_module_get(mod)))) | 464 | /* Check are we probing a module */ |
483 | return -EINVAL; | 465 | if ((probed_mod = module_text_address((unsigned long) p->addr))) { |
484 | 466 | struct module *calling_mod = module_text_address(called_from); | |
485 | if ((ret = arch_prepare_kprobe(p)) != 0) | 467 | /* We must allow modules to probe themself and |
486 | goto rm_kprobe; | 468 | * in this case avoid incrementing the module refcount, |
469 | * so as to allow unloading of self probing modules. | ||
470 | */ | ||
471 | if (calling_mod && (calling_mod != probed_mod)) { | ||
472 | if (unlikely(!try_module_get(probed_mod))) | ||
473 | return -EINVAL; | ||
474 | p->mod_refcounted = 1; | ||
475 | } else | ||
476 | probed_mod = NULL; | ||
477 | } | ||
487 | 478 | ||
488 | p->nmissed = 0; | 479 | p->nmissed = 0; |
489 | spin_lock_irqsave(&kprobe_lock, flags); | 480 | down(&kprobe_mutex); |
490 | old_p = get_kprobe(p->addr); | 481 | old_p = get_kprobe(p->addr); |
491 | if (old_p) { | 482 | if (old_p) { |
492 | ret = register_aggr_kprobe(old_p, p); | 483 | ret = register_aggr_kprobe(old_p, p); |
493 | goto out; | 484 | goto out; |
494 | } | 485 | } |
495 | 486 | ||
496 | arch_copy_kprobe(p); | 487 | if ((ret = arch_prepare_kprobe(p)) != 0) |
488 | goto out; | ||
489 | |||
497 | INIT_HLIST_NODE(&p->hlist); | 490 | INIT_HLIST_NODE(&p->hlist); |
498 | hlist_add_head_rcu(&p->hlist, | 491 | hlist_add_head_rcu(&p->hlist, |
499 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 492 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
@@ -501,40 +494,66 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
501 | arch_arm_kprobe(p); | 494 | arch_arm_kprobe(p); |
502 | 495 | ||
503 | out: | 496 | out: |
504 | spin_unlock_irqrestore(&kprobe_lock, flags); | 497 | up(&kprobe_mutex); |
505 | rm_kprobe: | 498 | |
506 | if (ret == -EEXIST) | 499 | if (ret && probed_mod) |
507 | arch_remove_kprobe(p); | 500 | module_put(probed_mod); |
508 | if (ret && mod) | ||
509 | module_put(mod); | ||
510 | return ret; | 501 | return ret; |
511 | } | 502 | } |
512 | 503 | ||
504 | int __kprobes register_kprobe(struct kprobe *p) | ||
505 | { | ||
506 | return __register_kprobe(p, | ||
507 | (unsigned long)__builtin_return_address(0)); | ||
508 | } | ||
509 | |||
513 | void __kprobes unregister_kprobe(struct kprobe *p) | 510 | void __kprobes unregister_kprobe(struct kprobe *p) |
514 | { | 511 | { |
515 | unsigned long flags; | ||
516 | struct kprobe *old_p; | ||
517 | struct module *mod; | 512 | struct module *mod; |
513 | struct kprobe *old_p, *list_p; | ||
514 | int cleanup_p; | ||
518 | 515 | ||
519 | spin_lock_irqsave(&kprobe_lock, flags); | 516 | down(&kprobe_mutex); |
520 | old_p = get_kprobe(p->addr); | 517 | old_p = get_kprobe(p->addr); |
521 | if (old_p) { | 518 | if (unlikely(!old_p)) { |
522 | /* cleanup_*_kprobe() does the spin_unlock_irqrestore */ | 519 | up(&kprobe_mutex); |
523 | if (old_p->pre_handler == aggr_pre_handler) | 520 | return; |
524 | cleanup_aggr_kprobe(old_p, p, flags); | 521 | } |
525 | else | 522 | if (p != old_p) { |
526 | cleanup_kprobe(p, flags); | 523 | list_for_each_entry_rcu(list_p, &old_p->list, list) |
524 | if (list_p == p) | ||
525 | /* kprobe p is a valid probe */ | ||
526 | goto valid_p; | ||
527 | up(&kprobe_mutex); | ||
528 | return; | ||
529 | } | ||
530 | valid_p: | ||
531 | if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && | ||
532 | (p->list.next == &old_p->list) && | ||
533 | (p->list.prev == &old_p->list))) { | ||
534 | /* Only probe on the hash list */ | ||
535 | arch_disarm_kprobe(p); | ||
536 | hlist_del_rcu(&old_p->hlist); | ||
537 | cleanup_p = 1; | ||
538 | } else { | ||
539 | list_del_rcu(&p->list); | ||
540 | cleanup_p = 0; | ||
541 | } | ||
527 | 542 | ||
528 | synchronize_sched(); | 543 | up(&kprobe_mutex); |
529 | 544 | ||
530 | if ((mod = module_text_address((unsigned long)p->addr))) | 545 | synchronize_sched(); |
531 | module_put(mod); | 546 | if (p->mod_refcounted && |
547 | (mod = module_text_address((unsigned long)p->addr))) | ||
548 | module_put(mod); | ||
532 | 549 | ||
533 | if (old_p->pre_handler == aggr_pre_handler && | 550 | if (cleanup_p) { |
534 | list_empty(&old_p->list)) | 551 | if (p != old_p) { |
552 | list_del_rcu(&p->list); | ||
535 | kfree(old_p); | 553 | kfree(old_p); |
536 | } else | 554 | } |
537 | spin_unlock_irqrestore(&kprobe_lock, flags); | 555 | arch_remove_kprobe(p); |
556 | } | ||
538 | } | 557 | } |
539 | 558 | ||
540 | static struct notifier_block kprobe_exceptions_nb = { | 559 | static struct notifier_block kprobe_exceptions_nb = { |
@@ -548,7 +567,8 @@ int __kprobes register_jprobe(struct jprobe *jp) | |||
548 | jp->kp.pre_handler = setjmp_pre_handler; | 567 | jp->kp.pre_handler = setjmp_pre_handler; |
549 | jp->kp.break_handler = longjmp_break_handler; | 568 | jp->kp.break_handler = longjmp_break_handler; |
550 | 569 | ||
551 | return register_kprobe(&jp->kp); | 570 | return __register_kprobe(&jp->kp, |
571 | (unsigned long)__builtin_return_address(0)); | ||
552 | } | 572 | } |
553 | 573 | ||
554 | void __kprobes unregister_jprobe(struct jprobe *jp) | 574 | void __kprobes unregister_jprobe(struct jprobe *jp) |
@@ -588,7 +608,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
588 | 608 | ||
589 | rp->nmissed = 0; | 609 | rp->nmissed = 0; |
590 | /* Establish function entry probe point */ | 610 | /* Establish function entry probe point */ |
591 | if ((ret = register_kprobe(&rp->kp)) != 0) | 611 | if ((ret = __register_kprobe(&rp->kp, |
612 | (unsigned long)__builtin_return_address(0))) != 0) | ||
592 | free_rp_inst(rp); | 613 | free_rp_inst(rp); |
593 | return ret; | 614 | return ret; |
594 | } | 615 | } |