diff options
-rw-r--r-- | arch/x86/kernel/kprobes.c | 40 | ||||
-rw-r--r-- | include/linux/kprobes.h | 2 | ||||
-rw-r--r-- | kernel/kprobes.c | 10 |
3 files changed, 46 insertions, 6 deletions
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 25a8af76feb5..5940282bd2f9 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -1457,6 +1457,46 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist) | |||
1457 | text_poke_smp_batch(jump_poke_params, c); | 1457 | text_poke_smp_batch(jump_poke_params, c); |
1458 | } | 1458 | } |
1459 | 1459 | ||
1460 | static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, | ||
1461 | u8 *insn_buf, | ||
1462 | struct optimized_kprobe *op) | ||
1463 | { | ||
1464 | /* Set int3 to first byte for kprobes */ | ||
1465 | insn_buf[0] = BREAKPOINT_INSTRUCTION; | ||
1466 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
1467 | |||
1468 | tprm->addr = op->kp.addr; | ||
1469 | tprm->opcode = insn_buf; | ||
1470 | tprm->len = RELATIVEJUMP_SIZE; | ||
1471 | } | ||
1472 | |||
1473 | /* | ||
1474 | * Recover original instructions and breakpoints from relative jumps. | ||
1475 | * Caller must call with locking kprobe_mutex. | ||
1476 | */ | ||
1477 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | ||
1478 | struct list_head *done_list) | ||
1479 | { | ||
1480 | struct optimized_kprobe *op, *tmp; | ||
1481 | int c = 0; | ||
1482 | |||
1483 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
1484 | /* Setup param */ | ||
1485 | setup_unoptimize_kprobe(&jump_poke_params[c], | ||
1486 | jump_poke_bufs[c].buf, op); | ||
1487 | list_move(&op->list, done_list); | ||
1488 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
1489 | break; | ||
1490 | } | ||
1491 | |||
1492 | /* | ||
1493 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
1494 | * However, since kprobes itself also doesn't support NMI/MCE | ||
1495 | * code probing, it's not a problem. | ||
1496 | */ | ||
1497 | text_poke_smp_batch(jump_poke_params, c); | ||
1498 | } | ||
1499 | |||
1460 | /* Replace a relative jump with a breakpoint (int3). */ | 1500 | /* Replace a relative jump with a breakpoint (int3). */ |
1461 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | 1501 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) |
1462 | { | 1502 | { |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index fe157ba6aa0e..b78edb58ee66 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -276,6 +276,8 @@ extern int arch_check_optimized_kprobe(struct optimized_kprobe *op); | |||
276 | extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op); | 276 | extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op); |
277 | extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op); | 277 | extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op); |
278 | extern void arch_optimize_kprobes(struct list_head *oplist); | 278 | extern void arch_optimize_kprobes(struct list_head *oplist); |
279 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | ||
280 | struct list_head *done_list); | ||
279 | extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); | 281 | extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); |
280 | extern kprobe_opcode_t *get_optinsn_slot(void); | 282 | extern kprobe_opcode_t *get_optinsn_slot(void); |
281 | extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty); | 283 | extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty); |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 531e10164836..7663e5df0e6f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -517,9 +517,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | |||
517 | /* Ditto to do_optimize_kprobes */ | 517 | /* Ditto to do_optimize_kprobes */ |
518 | get_online_cpus(); | 518 | get_online_cpus(); |
519 | mutex_lock(&text_mutex); | 519 | mutex_lock(&text_mutex); |
520 | list_for_each_entry_safe(op, tmp, &unoptimizing_list, list) { | 520 | arch_unoptimize_kprobes(&unoptimizing_list, free_list); |
521 | /* Unoptimize kprobes */ | 521 | /* Loop free_list for disarming */ |
522 | arch_unoptimize_kprobe(op); | 522 | list_for_each_entry_safe(op, tmp, free_list, list) { |
523 | /* Disarm probes if marked disabled */ | 523 | /* Disarm probes if marked disabled */ |
524 | if (kprobe_disabled(&op->kp)) | 524 | if (kprobe_disabled(&op->kp)) |
525 | arch_disarm_kprobe(&op->kp); | 525 | arch_disarm_kprobe(&op->kp); |
@@ -530,8 +530,6 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | |||
530 | * (reclaiming is done by do_free_cleaned_kprobes.) | 530 | * (reclaiming is done by do_free_cleaned_kprobes.) |
531 | */ | 531 | */ |
532 | hlist_del_rcu(&op->kp.hlist); | 532 | hlist_del_rcu(&op->kp.hlist); |
533 | /* Move only unused probes on free_list */ | ||
534 | list_move(&op->list, free_list); | ||
535 | } else | 533 | } else |
536 | list_del_init(&op->list); | 534 | list_del_init(&op->list); |
537 | } | 535 | } |
@@ -592,7 +590,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
592 | mutex_unlock(&module_mutex); | 590 | mutex_unlock(&module_mutex); |
593 | 591 | ||
594 | /* Step 5: Kick optimizer again if needed */ | 592 | /* Step 5: Kick optimizer again if needed */ |
595 | if (!list_empty(&optimizing_list)) | 593 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) |
596 | kick_kprobe_optimizer(); | 594 | kick_kprobe_optimizer(); |
597 | else | 595 | else |
598 | /* Wake up all waiters */ | 596 | /* Wake up all waiters */ |