diff options
| -rw-r--r-- | kernel/kprobes.c | 43 |
1 files changed, 26 insertions, 17 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ab99caf2b167..1f4f9b9d5c89 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -427,26 +427,14 @@ static void kprobe_optimizer(struct work_struct *work); | |||
| 427 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); | 427 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
| 428 | #define OPTIMIZE_DELAY 5 | 428 | #define OPTIMIZE_DELAY 5 |
| 429 | 429 | ||
| 430 | /* Kprobe jump optimizer */ | 430 | /* |
| 431 | static __kprobes void kprobe_optimizer(struct work_struct *work) | 431 | * Optimize (replace a breakpoint with a jump) kprobes listed on |
| 432 | * optimizing_list. | ||
| 433 | */ | ||
| 434 | static __kprobes void do_optimize_kprobes(void) | ||
| 432 | { | 435 | { |
| 433 | struct optimized_kprobe *op, *tmp; | 436 | struct optimized_kprobe *op, *tmp; |
| 434 | 437 | ||
| 435 | /* Lock modules while optimizing kprobes */ | ||
| 436 | mutex_lock(&module_mutex); | ||
| 437 | mutex_lock(&kprobe_mutex); | ||
| 438 | if (kprobes_all_disarmed || !kprobes_allow_optimization) | ||
| 439 | goto end; | ||
| 440 | |||
| 441 | /* | ||
| 442 | * Wait for quiesence period to ensure all running interrupts | ||
| 443 | * are done. Because optprobe may modify multiple instructions | ||
| 444 | * there is a chance that Nth instruction is interrupted. In that | ||
| 445 | * case, running interrupt can return to 2nd-Nth byte of jump | ||
| 446 | * instruction. This wait is for avoiding it. | ||
| 447 | */ | ||
| 448 | synchronize_sched(); | ||
| 449 | |||
| 450 | /* | 438 | /* |
| 451 | * The optimization/unoptimization refers online_cpus via | 439 | * The optimization/unoptimization refers online_cpus via |
| 452 | * stop_machine() and cpu-hotplug modifies online_cpus. | 440 | * stop_machine() and cpu-hotplug modifies online_cpus. |
| @@ -467,6 +455,27 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
| 467 | } | 455 | } |
| 468 | mutex_unlock(&text_mutex); | 456 | mutex_unlock(&text_mutex); |
| 469 | put_online_cpus(); | 457 | put_online_cpus(); |
| 458 | } | ||
| 459 | |||
| 460 | /* Kprobe jump optimizer */ | ||
| 461 | static __kprobes void kprobe_optimizer(struct work_struct *work) | ||
| 462 | { | ||
| 463 | /* Lock modules while optimizing kprobes */ | ||
| 464 | mutex_lock(&module_mutex); | ||
| 465 | mutex_lock(&kprobe_mutex); | ||
| 466 | if (kprobes_all_disarmed || !kprobes_allow_optimization) | ||
| 467 | goto end; | ||
| 468 | |||
| 469 | /* | ||
| 470 | * Wait for quiesence period to ensure all running interrupts | ||
| 471 | * are done. Because optprobe may modify multiple instructions | ||
| 472 | * there is a chance that Nth instruction is interrupted. In that | ||
| 473 | * case, running interrupt can return to 2nd-Nth byte of jump | ||
| 474 | * instruction. This wait is for avoiding it. | ||
| 475 | */ | ||
| 476 | synchronize_sched(); | ||
| 477 | |||
| 478 | do_optimize_kprobes(); | ||
| 470 | end: | 479 | end: |
| 471 | mutex_unlock(&kprobe_mutex); | 480 | mutex_unlock(&kprobe_mutex); |
| 472 | mutex_unlock(&module_mutex); | 481 | mutex_unlock(&module_mutex); |
