diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 157 |
1 files changed, 101 insertions, 56 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 5beda378cc75..3ea6325228da 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -48,10 +48,11 @@ | |||
48 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | 48 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
49 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | 49 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
50 | 50 | ||
51 | static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */ | 51 | DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
52 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ | 52 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
53 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 53 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
54 | 54 | ||
55 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT | ||
55 | /* | 56 | /* |
56 | * kprobe->ainsn.insn points to the copy of the instruction to be | 57 | * kprobe->ainsn.insn points to the copy of the instruction to be |
57 | * single-stepped. x86_64, POWER4 and above have no-exec support and | 58 | * single-stepped. x86_64, POWER4 and above have no-exec support and |
@@ -151,6 +152,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot) | |||
151 | } | 152 | } |
152 | } | 153 | } |
153 | } | 154 | } |
155 | #endif | ||
154 | 156 | ||
155 | /* We have preemption disabled.. so it is safe to use __ versions */ | 157 | /* We have preemption disabled.. so it is safe to use __ versions */ |
156 | static inline void set_kprobe_instance(struct kprobe *kp) | 158 | static inline void set_kprobe_instance(struct kprobe *kp) |
@@ -165,7 +167,7 @@ static inline void reset_kprobe_instance(void) | |||
165 | 167 | ||
166 | /* | 168 | /* |
167 | * This routine is called either: | 169 | * This routine is called either: |
168 | * - under the kprobe_lock spinlock - during kprobe_[un]register() | 170 | * - under the kprobe_mutex - during kprobe_[un]register() |
169 | * OR | 171 | * OR |
170 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c | 172 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c |
171 | */ | 173 | */ |
@@ -246,6 +248,19 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
246 | return ret; | 248 | return ret; |
247 | } | 249 | } |
248 | 250 | ||
251 | /* Walks the list and increments nmissed count for multiprobe case */ | ||
252 | void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) | ||
253 | { | ||
254 | struct kprobe *kp; | ||
255 | if (p->pre_handler != aggr_pre_handler) { | ||
256 | p->nmissed++; | ||
257 | } else { | ||
258 | list_for_each_entry_rcu(kp, &p->list, list) | ||
259 | kp->nmissed++; | ||
260 | } | ||
261 | return; | ||
262 | } | ||
263 | |||
249 | /* Called with kretprobe_lock held */ | 264 | /* Called with kretprobe_lock held */ |
250 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) | 265 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) |
251 | { | 266 | { |
@@ -399,16 +414,12 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
399 | INIT_LIST_HEAD(&ap->list); | 414 | INIT_LIST_HEAD(&ap->list); |
400 | list_add_rcu(&p->list, &ap->list); | 415 | list_add_rcu(&p->list, &ap->list); |
401 | 416 | ||
402 | INIT_HLIST_NODE(&ap->hlist); | 417 | hlist_replace_rcu(&p->hlist, &ap->hlist); |
403 | hlist_del_rcu(&p->hlist); | ||
404 | hlist_add_head_rcu(&ap->hlist, | ||
405 | &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]); | ||
406 | } | 418 | } |
407 | 419 | ||
408 | /* | 420 | /* |
409 | * This is the second or subsequent kprobe at the address - handle | 421 | * This is the second or subsequent kprobe at the address - handle |
410 | * the intricacies | 422 | * the intricacies |
411 | * TODO: Move kcalloc outside the spin_lock | ||
412 | */ | 423 | */ |
413 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | 424 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, |
414 | struct kprobe *p) | 425 | struct kprobe *p) |
@@ -420,7 +431,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
420 | copy_kprobe(old_p, p); | 431 | copy_kprobe(old_p, p); |
421 | ret = add_new_kprobe(old_p, p); | 432 | ret = add_new_kprobe(old_p, p); |
422 | } else { | 433 | } else { |
423 | ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); | 434 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
424 | if (!ap) | 435 | if (!ap) |
425 | return -ENOMEM; | 436 | return -ENOMEM; |
426 | add_aggr_kprobe(ap, old_p); | 437 | add_aggr_kprobe(ap, old_p); |
@@ -430,25 +441,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
430 | return ret; | 441 | return ret; |
431 | } | 442 | } |
432 | 443 | ||
433 | /* kprobe removal house-keeping routines */ | ||
434 | static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) | ||
435 | { | ||
436 | arch_disarm_kprobe(p); | ||
437 | hlist_del_rcu(&p->hlist); | ||
438 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
439 | arch_remove_kprobe(p); | ||
440 | } | ||
441 | |||
442 | static inline void cleanup_aggr_kprobe(struct kprobe *old_p, | ||
443 | struct kprobe *p, unsigned long flags) | ||
444 | { | ||
445 | list_del_rcu(&p->list); | ||
446 | if (list_empty(&old_p->list)) | ||
447 | cleanup_kprobe(old_p, flags); | ||
448 | else | ||
449 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
450 | } | ||
451 | |||
452 | static int __kprobes in_kprobes_functions(unsigned long addr) | 444 | static int __kprobes in_kprobes_functions(unsigned long addr) |
453 | { | 445 | { |
454 | if (addr >= (unsigned long)__kprobes_text_start | 446 | if (addr >= (unsigned long)__kprobes_text_start |
@@ -457,26 +449,44 @@ static int __kprobes in_kprobes_functions(unsigned long addr) | |||
457 | return 0; | 449 | return 0; |
458 | } | 450 | } |
459 | 451 | ||
460 | int __kprobes register_kprobe(struct kprobe *p) | 452 | static int __kprobes __register_kprobe(struct kprobe *p, |
453 | unsigned long called_from) | ||
461 | { | 454 | { |
462 | int ret = 0; | 455 | int ret = 0; |
463 | unsigned long flags = 0; | ||
464 | struct kprobe *old_p; | 456 | struct kprobe *old_p; |
457 | struct module *probed_mod; | ||
465 | 458 | ||
466 | if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0) | 459 | if ((!kernel_text_address((unsigned long) p->addr)) || |
467 | return ret; | 460 | in_kprobes_functions((unsigned long) p->addr)) |
468 | if ((ret = arch_prepare_kprobe(p)) != 0) | 461 | return -EINVAL; |
469 | goto rm_kprobe; | 462 | |
463 | p->mod_refcounted = 0; | ||
464 | /* Check are we probing a module */ | ||
465 | if ((probed_mod = module_text_address((unsigned long) p->addr))) { | ||
466 | struct module *calling_mod = module_text_address(called_from); | ||
467 | /* We must allow modules to probe themself and | ||
468 | * in this case avoid incrementing the module refcount, | ||
469 | * so as to allow unloading of self probing modules. | ||
470 | */ | ||
471 | if (calling_mod && (calling_mod != probed_mod)) { | ||
472 | if (unlikely(!try_module_get(probed_mod))) | ||
473 | return -EINVAL; | ||
474 | p->mod_refcounted = 1; | ||
475 | } else | ||
476 | probed_mod = NULL; | ||
477 | } | ||
470 | 478 | ||
471 | p->nmissed = 0; | 479 | p->nmissed = 0; |
472 | spin_lock_irqsave(&kprobe_lock, flags); | 480 | down(&kprobe_mutex); |
473 | old_p = get_kprobe(p->addr); | 481 | old_p = get_kprobe(p->addr); |
474 | if (old_p) { | 482 | if (old_p) { |
475 | ret = register_aggr_kprobe(old_p, p); | 483 | ret = register_aggr_kprobe(old_p, p); |
476 | goto out; | 484 | goto out; |
477 | } | 485 | } |
478 | 486 | ||
479 | arch_copy_kprobe(p); | 487 | if ((ret = arch_prepare_kprobe(p)) != 0) |
488 | goto out; | ||
489 | |||
480 | INIT_HLIST_NODE(&p->hlist); | 490 | INIT_HLIST_NODE(&p->hlist); |
481 | hlist_add_head_rcu(&p->hlist, | 491 | hlist_add_head_rcu(&p->hlist, |
482 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 492 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
@@ -484,33 +494,66 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
484 | arch_arm_kprobe(p); | 494 | arch_arm_kprobe(p); |
485 | 495 | ||
486 | out: | 496 | out: |
487 | spin_unlock_irqrestore(&kprobe_lock, flags); | 497 | up(&kprobe_mutex); |
488 | rm_kprobe: | 498 | |
489 | if (ret == -EEXIST) | 499 | if (ret && probed_mod) |
490 | arch_remove_kprobe(p); | 500 | module_put(probed_mod); |
491 | return ret; | 501 | return ret; |
492 | } | 502 | } |
493 | 503 | ||
504 | int __kprobes register_kprobe(struct kprobe *p) | ||
505 | { | ||
506 | return __register_kprobe(p, | ||
507 | (unsigned long)__builtin_return_address(0)); | ||
508 | } | ||
509 | |||
494 | void __kprobes unregister_kprobe(struct kprobe *p) | 510 | void __kprobes unregister_kprobe(struct kprobe *p) |
495 | { | 511 | { |
496 | unsigned long flags; | 512 | struct module *mod; |
497 | struct kprobe *old_p; | 513 | struct kprobe *old_p, *list_p; |
514 | int cleanup_p; | ||
498 | 515 | ||
499 | spin_lock_irqsave(&kprobe_lock, flags); | 516 | down(&kprobe_mutex); |
500 | old_p = get_kprobe(p->addr); | 517 | old_p = get_kprobe(p->addr); |
501 | if (old_p) { | 518 | if (unlikely(!old_p)) { |
502 | /* cleanup_*_kprobe() does the spin_unlock_irqrestore */ | 519 | up(&kprobe_mutex); |
503 | if (old_p->pre_handler == aggr_pre_handler) | 520 | return; |
504 | cleanup_aggr_kprobe(old_p, p, flags); | 521 | } |
505 | else | 522 | if (p != old_p) { |
506 | cleanup_kprobe(p, flags); | 523 | list_for_each_entry_rcu(list_p, &old_p->list, list) |
507 | 524 | if (list_p == p) | |
508 | synchronize_sched(); | 525 | /* kprobe p is a valid probe */ |
509 | if (old_p->pre_handler == aggr_pre_handler && | 526 | goto valid_p; |
510 | list_empty(&old_p->list)) | 527 | up(&kprobe_mutex); |
528 | return; | ||
529 | } | ||
530 | valid_p: | ||
531 | if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && | ||
532 | (p->list.next == &old_p->list) && | ||
533 | (p->list.prev == &old_p->list))) { | ||
534 | /* Only probe on the hash list */ | ||
535 | arch_disarm_kprobe(p); | ||
536 | hlist_del_rcu(&old_p->hlist); | ||
537 | cleanup_p = 1; | ||
538 | } else { | ||
539 | list_del_rcu(&p->list); | ||
540 | cleanup_p = 0; | ||
541 | } | ||
542 | |||
543 | up(&kprobe_mutex); | ||
544 | |||
545 | synchronize_sched(); | ||
546 | if (p->mod_refcounted && | ||
547 | (mod = module_text_address((unsigned long)p->addr))) | ||
548 | module_put(mod); | ||
549 | |||
550 | if (cleanup_p) { | ||
551 | if (p != old_p) { | ||
552 | list_del_rcu(&p->list); | ||
511 | kfree(old_p); | 553 | kfree(old_p); |
512 | } else | 554 | } |
513 | spin_unlock_irqrestore(&kprobe_lock, flags); | 555 | arch_remove_kprobe(p); |
556 | } | ||
514 | } | 557 | } |
515 | 558 | ||
516 | static struct notifier_block kprobe_exceptions_nb = { | 559 | static struct notifier_block kprobe_exceptions_nb = { |
@@ -524,7 +567,8 @@ int __kprobes register_jprobe(struct jprobe *jp) | |||
524 | jp->kp.pre_handler = setjmp_pre_handler; | 567 | jp->kp.pre_handler = setjmp_pre_handler; |
525 | jp->kp.break_handler = longjmp_break_handler; | 568 | jp->kp.break_handler = longjmp_break_handler; |
526 | 569 | ||
527 | return register_kprobe(&jp->kp); | 570 | return __register_kprobe(&jp->kp, |
571 | (unsigned long)__builtin_return_address(0)); | ||
528 | } | 572 | } |
529 | 573 | ||
530 | void __kprobes unregister_jprobe(struct jprobe *jp) | 574 | void __kprobes unregister_jprobe(struct jprobe *jp) |
@@ -564,7 +608,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
564 | 608 | ||
565 | rp->nmissed = 0; | 609 | rp->nmissed = 0; |
566 | /* Establish function entry probe point */ | 610 | /* Establish function entry probe point */ |
567 | if ((ret = register_kprobe(&rp->kp)) != 0) | 611 | if ((ret = __register_kprobe(&rp->kp, |
612 | (unsigned long)__builtin_return_address(0))) != 0) | ||
568 | free_rp_inst(rp); | 613 | free_rp_inst(rp); |
569 | return ret; | 614 | return ret; |
570 | } | 615 | } |