diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 33 |
1 files changed, 23 insertions, 10 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3fed7f0cbcdf..6e33498d665c 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | |||
467 | /* Optimization staging list, protected by kprobe_mutex */ | 467 | /* Optimization staging list, protected by kprobe_mutex */ |
468 | static LIST_HEAD(optimizing_list); | 468 | static LIST_HEAD(optimizing_list); |
469 | static LIST_HEAD(unoptimizing_list); | 469 | static LIST_HEAD(unoptimizing_list); |
470 | static LIST_HEAD(freeing_list); | ||
470 | 471 | ||
471 | static void kprobe_optimizer(struct work_struct *work); | 472 | static void kprobe_optimizer(struct work_struct *work); |
472 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); | 473 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
@@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void) | |||
504 | * Unoptimize (replace a jump with a breakpoint and remove the breakpoint | 505 | * Unoptimize (replace a jump with a breakpoint and remove the breakpoint |
505 | * if need) kprobes listed on unoptimizing_list. | 506 | * if need) kprobes listed on unoptimizing_list. |
506 | */ | 507 | */ |
507 | static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | 508 | static __kprobes void do_unoptimize_kprobes(void) |
508 | { | 509 | { |
509 | struct optimized_kprobe *op, *tmp; | 510 | struct optimized_kprobe *op, *tmp; |
510 | 511 | ||
@@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | |||
515 | /* Ditto to do_optimize_kprobes */ | 516 | /* Ditto to do_optimize_kprobes */ |
516 | get_online_cpus(); | 517 | get_online_cpus(); |
517 | mutex_lock(&text_mutex); | 518 | mutex_lock(&text_mutex); |
518 | arch_unoptimize_kprobes(&unoptimizing_list, free_list); | 519 | arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); |
519 | /* Loop free_list for disarming */ | 520 | /* Loop free_list for disarming */ |
520 | list_for_each_entry_safe(op, tmp, free_list, list) { | 521 | list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
521 | /* Disarm probes if marked disabled */ | 522 | /* Disarm probes if marked disabled */ |
522 | if (kprobe_disabled(&op->kp)) | 523 | if (kprobe_disabled(&op->kp)) |
523 | arch_disarm_kprobe(&op->kp); | 524 | arch_disarm_kprobe(&op->kp); |
@@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | |||
536 | } | 537 | } |
537 | 538 | ||
538 | /* Reclaim all kprobes on the free_list */ | 539 | /* Reclaim all kprobes on the free_list */ |
539 | static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) | 540 | static __kprobes void do_free_cleaned_kprobes(void) |
540 | { | 541 | { |
541 | struct optimized_kprobe *op, *tmp; | 542 | struct optimized_kprobe *op, *tmp; |
542 | 543 | ||
543 | list_for_each_entry_safe(op, tmp, free_list, list) { | 544 | list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
544 | BUG_ON(!kprobe_unused(&op->kp)); | 545 | BUG_ON(!kprobe_unused(&op->kp)); |
545 | list_del_init(&op->list); | 546 | list_del_init(&op->list); |
546 | free_aggr_kprobe(&op->kp); | 547 | free_aggr_kprobe(&op->kp); |
@@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void) | |||
556 | /* Kprobe jump optimizer */ | 557 | /* Kprobe jump optimizer */ |
557 | static __kprobes void kprobe_optimizer(struct work_struct *work) | 558 | static __kprobes void kprobe_optimizer(struct work_struct *work) |
558 | { | 559 | { |
559 | LIST_HEAD(free_list); | ||
560 | |||
561 | mutex_lock(&kprobe_mutex); | 560 | mutex_lock(&kprobe_mutex); |
562 | /* Lock modules while optimizing kprobes */ | 561 | /* Lock modules while optimizing kprobes */ |
563 | mutex_lock(&module_mutex); | 562 | mutex_lock(&module_mutex); |
@@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
566 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) | 565 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) |
567 | * kprobes before waiting for quiesence period. | 566 | * kprobes before waiting for quiesence period. |
568 | */ | 567 | */ |
569 | do_unoptimize_kprobes(&free_list); | 568 | do_unoptimize_kprobes(); |
570 | 569 | ||
571 | /* | 570 | /* |
572 | * Step 2: Wait for quiesence period to ensure all running interrupts | 571 | * Step 2: Wait for quiesence period to ensure all running interrupts |
@@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
581 | do_optimize_kprobes(); | 580 | do_optimize_kprobes(); |
582 | 581 | ||
583 | /* Step 4: Free cleaned kprobes after quiesence period */ | 582 | /* Step 4: Free cleaned kprobes after quiesence period */ |
584 | do_free_cleaned_kprobes(&free_list); | 583 | do_free_cleaned_kprobes(); |
585 | 584 | ||
586 | mutex_unlock(&module_mutex); | 585 | mutex_unlock(&module_mutex); |
587 | mutex_unlock(&kprobe_mutex); | 586 | mutex_unlock(&kprobe_mutex); |
@@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p) | |||
723 | if (!list_empty(&op->list)) | 722 | if (!list_empty(&op->list)) |
724 | /* Dequeue from the (un)optimization queue */ | 723 | /* Dequeue from the (un)optimization queue */ |
725 | list_del_init(&op->list); | 724 | list_del_init(&op->list); |
726 | |||
727 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | 725 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
726 | |||
727 | if (kprobe_unused(p)) { | ||
728 | /* Enqueue if it is unused */ | ||
729 | list_add(&op->list, &freeing_list); | ||
730 | /* | ||
731 | * Remove unused probes from the hash list. After waiting | ||
732 | * for synchronization, this probe is reclaimed. | ||
733 | * (reclaiming is done by do_free_cleaned_kprobes().) | ||
734 | */ | ||
735 | hlist_del_rcu(&op->kp.hlist); | ||
736 | } | ||
737 | |||
728 | /* Don't touch the code, because it is already freed. */ | 738 | /* Don't touch the code, because it is already freed. */ |
729 | arch_remove_optimized_kprobe(op); | 739 | arch_remove_optimized_kprobe(op); |
730 | } | 740 | } |
@@ -2322,6 +2332,7 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
2322 | if (copy_from_user(buf, user_buf, buf_size)) | 2332 | if (copy_from_user(buf, user_buf, buf_size)) |
2323 | return -EFAULT; | 2333 | return -EFAULT; |
2324 | 2334 | ||
2335 | buf[buf_size] = '\0'; | ||
2325 | switch (buf[0]) { | 2336 | switch (buf[0]) { |
2326 | case 'y': | 2337 | case 'y': |
2327 | case 'Y': | 2338 | case 'Y': |
@@ -2333,6 +2344,8 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
2333 | case '0': | 2344 | case '0': |
2334 | disarm_all_kprobes(); | 2345 | disarm_all_kprobes(); |
2335 | break; | 2346 | break; |
2347 | default: | ||
2348 | return -EINVAL; | ||
2336 | } | 2349 | } |
2337 | 2350 | ||
2338 | return count; | 2351 | return count; |