diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kprobes.c | 91 |
1 files changed, 43 insertions, 48 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3897630d2335..f14ccd35e9b6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -48,7 +48,7 @@ | |||
48 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | 48 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
49 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | 49 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
50 | 50 | ||
51 | static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */ | 51 | static DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
52 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ | 52 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
53 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 53 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
54 | 54 | ||
@@ -167,7 +167,7 @@ static inline void reset_kprobe_instance(void) | |||
167 | 167 | ||
168 | /* | 168 | /* |
169 | * This routine is called either: | 169 | * This routine is called either: |
170 | * - under the kprobe_lock spinlock - during kprobe_[un]register() | 170 | * - under the kprobe_mutex - during kprobe_[un]register() |
171 | * OR | 171 | * OR |
172 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c | 172 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c |
173 | */ | 173 | */ |
@@ -420,7 +420,6 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
420 | /* | 420 | /* |
421 | * This is the second or subsequent kprobe at the address - handle | 421 | * This is the second or subsequent kprobe at the address - handle |
422 | * the intricacies | 422 | * the intricacies |
423 | * TODO: Move kcalloc outside the spin_lock | ||
424 | */ | 423 | */ |
425 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | 424 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, |
426 | struct kprobe *p) | 425 | struct kprobe *p) |
@@ -442,25 +441,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
442 | return ret; | 441 | return ret; |
443 | } | 442 | } |
444 | 443 | ||
445 | /* kprobe removal house-keeping routines */ | ||
446 | static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) | ||
447 | { | ||
448 | arch_disarm_kprobe(p); | ||
449 | hlist_del_rcu(&p->hlist); | ||
450 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
451 | arch_remove_kprobe(p); | ||
452 | } | ||
453 | |||
454 | static inline void cleanup_aggr_kprobe(struct kprobe *old_p, | ||
455 | struct kprobe *p, unsigned long flags) | ||
456 | { | ||
457 | list_del_rcu(&p->list); | ||
458 | if (list_empty(&old_p->list)) | ||
459 | cleanup_kprobe(old_p, flags); | ||
460 | else | ||
461 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
462 | } | ||
463 | |||
464 | static int __kprobes in_kprobes_functions(unsigned long addr) | 444 | static int __kprobes in_kprobes_functions(unsigned long addr) |
465 | { | 445 | { |
466 | if (addr >= (unsigned long)__kprobes_text_start | 446 | if (addr >= (unsigned long)__kprobes_text_start |
@@ -472,7 +452,6 @@ static int __kprobes in_kprobes_functions(unsigned long addr) | |||
472 | int __kprobes register_kprobe(struct kprobe *p) | 452 | int __kprobes register_kprobe(struct kprobe *p) |
473 | { | 453 | { |
474 | int ret = 0; | 454 | int ret = 0; |
475 | unsigned long flags = 0; | ||
476 | struct kprobe *old_p; | 455 | struct kprobe *old_p; |
477 | struct module *mod; | 456 | struct module *mod; |
478 | 457 | ||
@@ -484,18 +463,17 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
484 | (unlikely(!try_module_get(mod)))) | 463 | (unlikely(!try_module_get(mod)))) |
485 | return -EINVAL; | 464 | return -EINVAL; |
486 | 465 | ||
487 | if ((ret = arch_prepare_kprobe(p)) != 0) | ||
488 | goto rm_kprobe; | ||
489 | |||
490 | p->nmissed = 0; | 466 | p->nmissed = 0; |
491 | spin_lock_irqsave(&kprobe_lock, flags); | 467 | down(&kprobe_mutex); |
492 | old_p = get_kprobe(p->addr); | 468 | old_p = get_kprobe(p->addr); |
493 | if (old_p) { | 469 | if (old_p) { |
494 | ret = register_aggr_kprobe(old_p, p); | 470 | ret = register_aggr_kprobe(old_p, p); |
495 | goto out; | 471 | goto out; |
496 | } | 472 | } |
497 | 473 | ||
498 | arch_copy_kprobe(p); | 474 | if ((ret = arch_prepare_kprobe(p)) != 0) |
475 | goto out; | ||
476 | |||
499 | INIT_HLIST_NODE(&p->hlist); | 477 | INIT_HLIST_NODE(&p->hlist); |
500 | hlist_add_head_rcu(&p->hlist, | 478 | hlist_add_head_rcu(&p->hlist, |
501 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 479 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
@@ -503,10 +481,8 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
503 | arch_arm_kprobe(p); | 481 | arch_arm_kprobe(p); |
504 | 482 | ||
505 | out: | 483 | out: |
506 | spin_unlock_irqrestore(&kprobe_lock, flags); | 484 | up(&kprobe_mutex); |
507 | rm_kprobe: | 485 | |
508 | if (ret == -EEXIST) | ||
509 | arch_remove_kprobe(p); | ||
510 | if (ret && mod) | 486 | if (ret && mod) |
511 | module_put(mod); | 487 | module_put(mod); |
512 | return ret; | 488 | return ret; |
@@ -514,29 +490,48 @@ rm_kprobe: | |||
514 | 490 | ||
515 | void __kprobes unregister_kprobe(struct kprobe *p) | 491 | void __kprobes unregister_kprobe(struct kprobe *p) |
516 | { | 492 | { |
517 | unsigned long flags; | ||
518 | struct kprobe *old_p; | ||
519 | struct module *mod; | 493 | struct module *mod; |
494 | struct kprobe *old_p, *cleanup_p; | ||
520 | 495 | ||
521 | spin_lock_irqsave(&kprobe_lock, flags); | 496 | down(&kprobe_mutex); |
522 | old_p = get_kprobe(p->addr); | 497 | old_p = get_kprobe(p->addr); |
523 | if (old_p) { | 498 | if (unlikely(!old_p)) { |
524 | /* cleanup_*_kprobe() does the spin_unlock_irqrestore */ | 499 | up(&kprobe_mutex); |
525 | if (old_p->pre_handler == aggr_pre_handler) | 500 | return; |
526 | cleanup_aggr_kprobe(old_p, p, flags); | 501 | } |
527 | else | ||
528 | cleanup_kprobe(p, flags); | ||
529 | 502 | ||
530 | synchronize_sched(); | 503 | if ((old_p->pre_handler == aggr_pre_handler) && |
504 | (p->list.next == &old_p->list) && | ||
505 | (p->list.prev == &old_p->list)) { | ||
506 | /* Only one element in the aggregate list */ | ||
507 | arch_disarm_kprobe(p); | ||
508 | hlist_del_rcu(&old_p->hlist); | ||
509 | cleanup_p = old_p; | ||
510 | } else if (old_p == p) { | ||
511 | /* Only one kprobe element in the hash list */ | ||
512 | arch_disarm_kprobe(p); | ||
513 | hlist_del_rcu(&p->hlist); | ||
514 | cleanup_p = p; | ||
515 | } else { | ||
516 | list_del_rcu(&p->list); | ||
517 | cleanup_p = NULL; | ||
518 | } | ||
531 | 519 | ||
532 | if ((mod = module_text_address((unsigned long)p->addr))) | 520 | up(&kprobe_mutex); |
533 | module_put(mod); | ||
534 | 521 | ||
535 | if (old_p->pre_handler == aggr_pre_handler && | 522 | synchronize_sched(); |
536 | list_empty(&old_p->list)) | 523 | if ((mod = module_text_address((unsigned long)p->addr))) |
524 | module_put(mod); | ||
525 | |||
526 | if (cleanup_p) { | ||
527 | if (cleanup_p->pre_handler == aggr_pre_handler) { | ||
528 | list_del_rcu(&p->list); | ||
537 | kfree(old_p); | 529 | kfree(old_p); |
538 | } else | 530 | } |
539 | spin_unlock_irqrestore(&kprobe_lock, flags); | 531 | down(&kprobe_mutex); |
532 | arch_remove_kprobe(p); | ||
533 | up(&kprobe_mutex); | ||
534 | } | ||
540 | } | 535 | } |
541 | 536 | ||
542 | static struct notifier_block kprobe_exceptions_nb = { | 537 | static struct notifier_block kprobe_exceptions_nb = { |