diff options
Diffstat (limited to 'arch/x86/kernel/kprobes')
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 128 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/ftrace.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/opt.c | 32 |
3 files changed, 97 insertions, 80 deletions
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 61b17dc2c277..7596df664901 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -112,7 +112,8 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { | |||
112 | 112 | ||
113 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | 113 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); |
114 | 114 | ||
115 | static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) | 115 | static nokprobe_inline void |
116 | __synthesize_relative_insn(void *from, void *to, u8 op) | ||
116 | { | 117 | { |
117 | struct __arch_relative_insn { | 118 | struct __arch_relative_insn { |
118 | u8 op; | 119 | u8 op; |
@@ -125,21 +126,23 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) | |||
125 | } | 126 | } |
126 | 127 | ||
127 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ | 128 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ |
128 | void __kprobes synthesize_reljump(void *from, void *to) | 129 | void synthesize_reljump(void *from, void *to) |
129 | { | 130 | { |
130 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); | 131 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); |
131 | } | 132 | } |
133 | NOKPROBE_SYMBOL(synthesize_reljump); | ||
132 | 134 | ||
133 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ | 135 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ |
134 | void __kprobes synthesize_relcall(void *from, void *to) | 136 | void synthesize_relcall(void *from, void *to) |
135 | { | 137 | { |
136 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); | 138 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); |
137 | } | 139 | } |
140 | NOKPROBE_SYMBOL(synthesize_relcall); | ||
138 | 141 | ||
139 | /* | 142 | /* |
140 | * Skip the prefixes of the instruction. | 143 | * Skip the prefixes of the instruction. |
141 | */ | 144 | */ |
142 | static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) | 145 | static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn) |
143 | { | 146 | { |
144 | insn_attr_t attr; | 147 | insn_attr_t attr; |
145 | 148 | ||
@@ -154,12 +157,13 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) | |||
154 | #endif | 157 | #endif |
155 | return insn; | 158 | return insn; |
156 | } | 159 | } |
160 | NOKPROBE_SYMBOL(skip_prefixes); | ||
157 | 161 | ||
158 | /* | 162 | /* |
159 | * Returns non-zero if opcode is boostable. | 163 | * Returns non-zero if opcode is boostable. |
160 | * RIP relative instructions are adjusted at copying time in 64 bits mode | 164 | * RIP relative instructions are adjusted at copying time in 64 bits mode |
161 | */ | 165 | */ |
162 | int __kprobes can_boost(kprobe_opcode_t *opcodes) | 166 | int can_boost(kprobe_opcode_t *opcodes) |
163 | { | 167 | { |
164 | kprobe_opcode_t opcode; | 168 | kprobe_opcode_t opcode; |
165 | kprobe_opcode_t *orig_opcodes = opcodes; | 169 | kprobe_opcode_t *orig_opcodes = opcodes; |
@@ -260,7 +264,7 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add | |||
260 | } | 264 | } |
261 | 265 | ||
262 | /* Check if paddr is at an instruction boundary */ | 266 | /* Check if paddr is at an instruction boundary */ |
263 | static int __kprobes can_probe(unsigned long paddr) | 267 | static int can_probe(unsigned long paddr) |
264 | { | 268 | { |
265 | unsigned long addr, __addr, offset = 0; | 269 | unsigned long addr, __addr, offset = 0; |
266 | struct insn insn; | 270 | struct insn insn; |
@@ -299,7 +303,7 @@ static int __kprobes can_probe(unsigned long paddr) | |||
299 | /* | 303 | /* |
300 | * Returns non-zero if opcode modifies the interrupt flag. | 304 | * Returns non-zero if opcode modifies the interrupt flag. |
301 | */ | 305 | */ |
302 | static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) | 306 | static int is_IF_modifier(kprobe_opcode_t *insn) |
303 | { | 307 | { |
304 | /* Skip prefixes */ | 308 | /* Skip prefixes */ |
305 | insn = skip_prefixes(insn); | 309 | insn = skip_prefixes(insn); |
@@ -322,7 +326,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) | |||
322 | * If not, return null. | 326 | * If not, return null. |
323 | * Only applicable to 64-bit x86. | 327 | * Only applicable to 64-bit x86. |
324 | */ | 328 | */ |
325 | int __kprobes __copy_instruction(u8 *dest, u8 *src) | 329 | int __copy_instruction(u8 *dest, u8 *src) |
326 | { | 330 | { |
327 | struct insn insn; | 331 | struct insn insn; |
328 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 332 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
@@ -365,7 +369,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) | |||
365 | return insn.length; | 369 | return insn.length; |
366 | } | 370 | } |
367 | 371 | ||
368 | static int __kprobes arch_copy_kprobe(struct kprobe *p) | 372 | static int arch_copy_kprobe(struct kprobe *p) |
369 | { | 373 | { |
370 | int ret; | 374 | int ret; |
371 | 375 | ||
@@ -392,7 +396,7 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p) | |||
392 | return 0; | 396 | return 0; |
393 | } | 397 | } |
394 | 398 | ||
395 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 399 | int arch_prepare_kprobe(struct kprobe *p) |
396 | { | 400 | { |
397 | if (alternatives_text_reserved(p->addr, p->addr)) | 401 | if (alternatives_text_reserved(p->addr, p->addr)) |
398 | return -EINVAL; | 402 | return -EINVAL; |
@@ -407,17 +411,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
407 | return arch_copy_kprobe(p); | 411 | return arch_copy_kprobe(p); |
408 | } | 412 | } |
409 | 413 | ||
410 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 414 | void arch_arm_kprobe(struct kprobe *p) |
411 | { | 415 | { |
412 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); | 416 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
413 | } | 417 | } |
414 | 418 | ||
415 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | 419 | void arch_disarm_kprobe(struct kprobe *p) |
416 | { | 420 | { |
417 | text_poke(p->addr, &p->opcode, 1); | 421 | text_poke(p->addr, &p->opcode, 1); |
418 | } | 422 | } |
419 | 423 | ||
420 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 424 | void arch_remove_kprobe(struct kprobe *p) |
421 | { | 425 | { |
422 | if (p->ainsn.insn) { | 426 | if (p->ainsn.insn) { |
423 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); | 427 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); |
@@ -425,7 +429,8 @@ void __kprobes arch_remove_kprobe(struct kprobe *p) | |||
425 | } | 429 | } |
426 | } | 430 | } |
427 | 431 | ||
428 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | 432 | static nokprobe_inline void |
433 | save_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
429 | { | 434 | { |
430 | kcb->prev_kprobe.kp = kprobe_running(); | 435 | kcb->prev_kprobe.kp = kprobe_running(); |
431 | kcb->prev_kprobe.status = kcb->kprobe_status; | 436 | kcb->prev_kprobe.status = kcb->kprobe_status; |
@@ -433,7 +438,8 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
433 | kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; | 438 | kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; |
434 | } | 439 | } |
435 | 440 | ||
436 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 441 | static nokprobe_inline void |
442 | restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
437 | { | 443 | { |
438 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); | 444 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
439 | kcb->kprobe_status = kcb->prev_kprobe.status; | 445 | kcb->kprobe_status = kcb->prev_kprobe.status; |
@@ -441,8 +447,9 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
441 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; | 447 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; |
442 | } | 448 | } |
443 | 449 | ||
444 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 450 | static nokprobe_inline void |
445 | struct kprobe_ctlblk *kcb) | 451 | set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
452 | struct kprobe_ctlblk *kcb) | ||
446 | { | 453 | { |
447 | __this_cpu_write(current_kprobe, p); | 454 | __this_cpu_write(current_kprobe, p); |
448 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags | 455 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
@@ -451,7 +458,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
451 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; | 458 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; |
452 | } | 459 | } |
453 | 460 | ||
454 | static void __kprobes clear_btf(void) | 461 | static nokprobe_inline void clear_btf(void) |
455 | { | 462 | { |
456 | if (test_thread_flag(TIF_BLOCKSTEP)) { | 463 | if (test_thread_flag(TIF_BLOCKSTEP)) { |
457 | unsigned long debugctl = get_debugctlmsr(); | 464 | unsigned long debugctl = get_debugctlmsr(); |
@@ -461,7 +468,7 @@ static void __kprobes clear_btf(void) | |||
461 | } | 468 | } |
462 | } | 469 | } |
463 | 470 | ||
464 | static void __kprobes restore_btf(void) | 471 | static nokprobe_inline void restore_btf(void) |
465 | { | 472 | { |
466 | if (test_thread_flag(TIF_BLOCKSTEP)) { | 473 | if (test_thread_flag(TIF_BLOCKSTEP)) { |
467 | unsigned long debugctl = get_debugctlmsr(); | 474 | unsigned long debugctl = get_debugctlmsr(); |
@@ -471,8 +478,7 @@ static void __kprobes restore_btf(void) | |||
471 | } | 478 | } |
472 | } | 479 | } |
473 | 480 | ||
474 | void __kprobes | 481 | void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) |
475 | arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) | ||
476 | { | 482 | { |
477 | unsigned long *sara = stack_addr(regs); | 483 | unsigned long *sara = stack_addr(regs); |
478 | 484 | ||
@@ -481,9 +487,10 @@ arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) | |||
481 | /* Replace the return addr with trampoline addr */ | 487 | /* Replace the return addr with trampoline addr */ |
482 | *sara = (unsigned long) &kretprobe_trampoline; | 488 | *sara = (unsigned long) &kretprobe_trampoline; |
483 | } | 489 | } |
490 | NOKPROBE_SYMBOL(arch_prepare_kretprobe); | ||
484 | 491 | ||
485 | static void __kprobes | 492 | static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, |
486 | setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) | 493 | struct kprobe_ctlblk *kcb, int reenter) |
487 | { | 494 | { |
488 | if (setup_detour_execution(p, regs, reenter)) | 495 | if (setup_detour_execution(p, regs, reenter)) |
489 | return; | 496 | return; |
@@ -519,22 +526,24 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k | |||
519 | else | 526 | else |
520 | regs->ip = (unsigned long)p->ainsn.insn; | 527 | regs->ip = (unsigned long)p->ainsn.insn; |
521 | } | 528 | } |
529 | NOKPROBE_SYMBOL(setup_singlestep); | ||
522 | 530 | ||
523 | /* | 531 | /* |
524 | * We have reentered the kprobe_handler(), since another probe was hit while | 532 | * We have reentered the kprobe_handler(), since another probe was hit while |
525 | * within the handler. We save the original kprobes variables and just single | 533 | * within the handler. We save the original kprobes variables and just single |
526 | * step on the instruction of the new probe without calling any user handlers. | 534 | * step on the instruction of the new probe without calling any user handlers. |
527 | */ | 535 | */ |
528 | static int __kprobes | 536 | static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, |
529 | reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) | 537 | struct kprobe_ctlblk *kcb) |
530 | { | 538 | { |
531 | switch (kcb->kprobe_status) { | 539 | switch (kcb->kprobe_status) { |
532 | case KPROBE_HIT_SSDONE: | 540 | case KPROBE_HIT_SSDONE: |
533 | case KPROBE_HIT_ACTIVE: | 541 | case KPROBE_HIT_ACTIVE: |
542 | case KPROBE_HIT_SS: | ||
534 | kprobes_inc_nmissed_count(p); | 543 | kprobes_inc_nmissed_count(p); |
535 | setup_singlestep(p, regs, kcb, 1); | 544 | setup_singlestep(p, regs, kcb, 1); |
536 | break; | 545 | break; |
537 | case KPROBE_HIT_SS: | 546 | case KPROBE_REENTER: |
538 | /* A probe has been hit in the codepath leading up to, or just | 547 | /* A probe has been hit in the codepath leading up to, or just |
539 | * after, single-stepping of a probed instruction. This entire | 548 | * after, single-stepping of a probed instruction. This entire |
540 | * codepath should strictly reside in .kprobes.text section. | 549 | * codepath should strictly reside in .kprobes.text section. |
@@ -553,12 +562,13 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb | |||
553 | 562 | ||
554 | return 1; | 563 | return 1; |
555 | } | 564 | } |
565 | NOKPROBE_SYMBOL(reenter_kprobe); | ||
556 | 566 | ||
557 | /* | 567 | /* |
558 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | 568 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
559 | * remain disabled throughout this function. | 569 | * remain disabled throughout this function. |
560 | */ | 570 | */ |
561 | static int __kprobes kprobe_handler(struct pt_regs *regs) | 571 | int kprobe_int3_handler(struct pt_regs *regs) |
562 | { | 572 | { |
563 | kprobe_opcode_t *addr; | 573 | kprobe_opcode_t *addr; |
564 | struct kprobe *p; | 574 | struct kprobe *p; |
@@ -621,12 +631,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
621 | preempt_enable_no_resched(); | 631 | preempt_enable_no_resched(); |
622 | return 0; | 632 | return 0; |
623 | } | 633 | } |
634 | NOKPROBE_SYMBOL(kprobe_int3_handler); | ||
624 | 635 | ||
625 | /* | 636 | /* |
626 | * When a retprobed function returns, this code saves registers and | 637 | * When a retprobed function returns, this code saves registers and |
627 | * calls trampoline_handler() runs, which calls the kretprobe's handler. | 638 | * calls trampoline_handler() runs, which calls the kretprobe's handler. |
628 | */ | 639 | */ |
629 | static void __used __kprobes kretprobe_trampoline_holder(void) | 640 | static void __used kretprobe_trampoline_holder(void) |
630 | { | 641 | { |
631 | asm volatile ( | 642 | asm volatile ( |
632 | ".global kretprobe_trampoline\n" | 643 | ".global kretprobe_trampoline\n" |
@@ -657,11 +668,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void) | |||
657 | #endif | 668 | #endif |
658 | " ret\n"); | 669 | " ret\n"); |
659 | } | 670 | } |
671 | NOKPROBE_SYMBOL(kretprobe_trampoline_holder); | ||
672 | NOKPROBE_SYMBOL(kretprobe_trampoline); | ||
660 | 673 | ||
661 | /* | 674 | /* |
662 | * Called from kretprobe_trampoline | 675 | * Called from kretprobe_trampoline |
663 | */ | 676 | */ |
664 | __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) | 677 | __visible __used void *trampoline_handler(struct pt_regs *regs) |
665 | { | 678 | { |
666 | struct kretprobe_instance *ri = NULL; | 679 | struct kretprobe_instance *ri = NULL; |
667 | struct hlist_head *head, empty_rp; | 680 | struct hlist_head *head, empty_rp; |
@@ -747,6 +760,7 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
747 | } | 760 | } |
748 | return (void *)orig_ret_address; | 761 | return (void *)orig_ret_address; |
749 | } | 762 | } |
763 | NOKPROBE_SYMBOL(trampoline_handler); | ||
750 | 764 | ||
751 | /* | 765 | /* |
752 | * Called after single-stepping. p->addr is the address of the | 766 | * Called after single-stepping. p->addr is the address of the |
@@ -775,8 +789,8 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
775 | * jump instruction after the copied instruction, that jumps to the next | 789 | * jump instruction after the copied instruction, that jumps to the next |
776 | * instruction after the probepoint. | 790 | * instruction after the probepoint. |
777 | */ | 791 | */ |
778 | static void __kprobes | 792 | static void resume_execution(struct kprobe *p, struct pt_regs *regs, |
779 | resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) | 793 | struct kprobe_ctlblk *kcb) |
780 | { | 794 | { |
781 | unsigned long *tos = stack_addr(regs); | 795 | unsigned long *tos = stack_addr(regs); |
782 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; | 796 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; |
@@ -851,12 +865,13 @@ resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k | |||
851 | no_change: | 865 | no_change: |
852 | restore_btf(); | 866 | restore_btf(); |
853 | } | 867 | } |
868 | NOKPROBE_SYMBOL(resume_execution); | ||
854 | 869 | ||
855 | /* | 870 | /* |
856 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they | 871 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they |
857 | * remain disabled throughout this function. | 872 | * remain disabled throughout this function. |
858 | */ | 873 | */ |
859 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) | 874 | int kprobe_debug_handler(struct pt_regs *regs) |
860 | { | 875 | { |
861 | struct kprobe *cur = kprobe_running(); | 876 | struct kprobe *cur = kprobe_running(); |
862 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 877 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
@@ -891,8 +906,9 @@ out: | |||
891 | 906 | ||
892 | return 1; | 907 | return 1; |
893 | } | 908 | } |
909 | NOKPROBE_SYMBOL(kprobe_debug_handler); | ||
894 | 910 | ||
895 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 911 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
896 | { | 912 | { |
897 | struct kprobe *cur = kprobe_running(); | 913 | struct kprobe *cur = kprobe_running(); |
898 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 914 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
@@ -949,12 +965,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
949 | 965 | ||
950 | return 0; | 966 | return 0; |
951 | } | 967 | } |
968 | NOKPROBE_SYMBOL(kprobe_fault_handler); | ||
952 | 969 | ||
953 | /* | 970 | /* |
954 | * Wrapper routine for handling exceptions. | 971 | * Wrapper routine for handling exceptions. |
955 | */ | 972 | */ |
956 | int __kprobes | 973 | int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, |
957 | kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) | 974 | void *data) |
958 | { | 975 | { |
959 | struct die_args *args = data; | 976 | struct die_args *args = data; |
960 | int ret = NOTIFY_DONE; | 977 | int ret = NOTIFY_DONE; |
@@ -962,22 +979,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d | |||
962 | if (args->regs && user_mode_vm(args->regs)) | 979 | if (args->regs && user_mode_vm(args->regs)) |
963 | return ret; | 980 | return ret; |
964 | 981 | ||
965 | switch (val) { | 982 | if (val == DIE_GPF) { |
966 | case DIE_INT3: | ||
967 | if (kprobe_handler(args->regs)) | ||
968 | ret = NOTIFY_STOP; | ||
969 | break; | ||
970 | case DIE_DEBUG: | ||
971 | if (post_kprobe_handler(args->regs)) { | ||
972 | /* | ||
973 | * Reset the BS bit in dr6 (pointed by args->err) to | ||
974 | * denote completion of processing | ||
975 | */ | ||
976 | (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; | ||
977 | ret = NOTIFY_STOP; | ||
978 | } | ||
979 | break; | ||
980 | case DIE_GPF: | ||
981 | /* | 983 | /* |
982 | * To be potentially processing a kprobe fault and to | 984 | * To be potentially processing a kprobe fault and to |
983 | * trust the result from kprobe_running(), we have | 985 | * trust the result from kprobe_running(), we have |
@@ -986,14 +988,12 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d | |||
986 | if (!preemptible() && kprobe_running() && | 988 | if (!preemptible() && kprobe_running() && |
987 | kprobe_fault_handler(args->regs, args->trapnr)) | 989 | kprobe_fault_handler(args->regs, args->trapnr)) |
988 | ret = NOTIFY_STOP; | 990 | ret = NOTIFY_STOP; |
989 | break; | ||
990 | default: | ||
991 | break; | ||
992 | } | 991 | } |
993 | return ret; | 992 | return ret; |
994 | } | 993 | } |
994 | NOKPROBE_SYMBOL(kprobe_exceptions_notify); | ||
995 | 995 | ||
996 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 996 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
997 | { | 997 | { |
998 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 998 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
999 | unsigned long addr; | 999 | unsigned long addr; |
@@ -1017,8 +1017,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
1017 | regs->ip = (unsigned long)(jp->entry); | 1017 | regs->ip = (unsigned long)(jp->entry); |
1018 | return 1; | 1018 | return 1; |
1019 | } | 1019 | } |
1020 | NOKPROBE_SYMBOL(setjmp_pre_handler); | ||
1020 | 1021 | ||
1021 | void __kprobes jprobe_return(void) | 1022 | void jprobe_return(void) |
1022 | { | 1023 | { |
1023 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1024 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1024 | 1025 | ||
@@ -1034,8 +1035,10 @@ void __kprobes jprobe_return(void) | |||
1034 | " nop \n"::"b" | 1035 | " nop \n"::"b" |
1035 | (kcb->jprobe_saved_sp):"memory"); | 1036 | (kcb->jprobe_saved_sp):"memory"); |
1036 | } | 1037 | } |
1038 | NOKPROBE_SYMBOL(jprobe_return); | ||
1039 | NOKPROBE_SYMBOL(jprobe_return_end); | ||
1037 | 1040 | ||
1038 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 1041 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
1039 | { | 1042 | { |
1040 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1043 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1041 | u8 *addr = (u8 *) (regs->ip - 1); | 1044 | u8 *addr = (u8 *) (regs->ip - 1); |
@@ -1063,13 +1066,22 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
1063 | } | 1066 | } |
1064 | return 0; | 1067 | return 0; |
1065 | } | 1068 | } |
1069 | NOKPROBE_SYMBOL(longjmp_break_handler); | ||
1070 | |||
1071 | bool arch_within_kprobe_blacklist(unsigned long addr) | ||
1072 | { | ||
1073 | return (addr >= (unsigned long)__kprobes_text_start && | ||
1074 | addr < (unsigned long)__kprobes_text_end) || | ||
1075 | (addr >= (unsigned long)__entry_text_start && | ||
1076 | addr < (unsigned long)__entry_text_end); | ||
1077 | } | ||
1066 | 1078 | ||
1067 | int __init arch_init_kprobes(void) | 1079 | int __init arch_init_kprobes(void) |
1068 | { | 1080 | { |
1069 | return 0; | 1081 | return 0; |
1070 | } | 1082 | } |
1071 | 1083 | ||
1072 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | 1084 | int arch_trampoline_kprobe(struct kprobe *p) |
1073 | { | 1085 | { |
1074 | return 0; | 1086 | return 0; |
1075 | } | 1087 | } |
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 23ef5c556f06..717b02a22e67 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c | |||
@@ -25,8 +25,9 @@ | |||
25 | 25 | ||
26 | #include "common.h" | 26 | #include "common.h" |
27 | 27 | ||
28 | static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, | 28 | static nokprobe_inline |
29 | struct kprobe_ctlblk *kcb) | 29 | int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, |
30 | struct kprobe_ctlblk *kcb) | ||
30 | { | 31 | { |
31 | /* | 32 | /* |
32 | * Emulate singlestep (and also recover regs->ip) | 33 | * Emulate singlestep (and also recover regs->ip) |
@@ -41,18 +42,19 @@ static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, | |||
41 | return 1; | 42 | return 1; |
42 | } | 43 | } |
43 | 44 | ||
44 | int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, | 45 | int skip_singlestep(struct kprobe *p, struct pt_regs *regs, |
45 | struct kprobe_ctlblk *kcb) | 46 | struct kprobe_ctlblk *kcb) |
46 | { | 47 | { |
47 | if (kprobe_ftrace(p)) | 48 | if (kprobe_ftrace(p)) |
48 | return __skip_singlestep(p, regs, kcb); | 49 | return __skip_singlestep(p, regs, kcb); |
49 | else | 50 | else |
50 | return 0; | 51 | return 0; |
51 | } | 52 | } |
53 | NOKPROBE_SYMBOL(skip_singlestep); | ||
52 | 54 | ||
53 | /* Ftrace callback handler for kprobes */ | 55 | /* Ftrace callback handler for kprobes */ |
54 | void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | 56 | void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, |
55 | struct ftrace_ops *ops, struct pt_regs *regs) | 57 | struct ftrace_ops *ops, struct pt_regs *regs) |
56 | { | 58 | { |
57 | struct kprobe *p; | 59 | struct kprobe *p; |
58 | struct kprobe_ctlblk *kcb; | 60 | struct kprobe_ctlblk *kcb; |
@@ -84,8 +86,9 @@ void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | |||
84 | end: | 86 | end: |
85 | local_irq_restore(flags); | 87 | local_irq_restore(flags); |
86 | } | 88 | } |
89 | NOKPROBE_SYMBOL(kprobe_ftrace_handler); | ||
87 | 90 | ||
88 | int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) | 91 | int arch_prepare_kprobe_ftrace(struct kprobe *p) |
89 | { | 92 | { |
90 | p->ainsn.insn = NULL; | 93 | p->ainsn.insn = NULL; |
91 | p->ainsn.boostable = -1; | 94 | p->ainsn.boostable = -1; |
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 898160b42e43..f304773285ae 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
@@ -77,7 +77,7 @@ found: | |||
77 | } | 77 | } |
78 | 78 | ||
79 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ | 79 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ |
80 | static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) | 80 | static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) |
81 | { | 81 | { |
82 | #ifdef CONFIG_X86_64 | 82 | #ifdef CONFIG_X86_64 |
83 | *addr++ = 0x48; | 83 | *addr++ = 0x48; |
@@ -138,7 +138,8 @@ asm ( | |||
138 | #define INT3_SIZE sizeof(kprobe_opcode_t) | 138 | #define INT3_SIZE sizeof(kprobe_opcode_t) |
139 | 139 | ||
140 | /* Optimized kprobe call back function: called from optinsn */ | 140 | /* Optimized kprobe call back function: called from optinsn */ |
141 | static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) | 141 | static void |
142 | optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) | ||
142 | { | 143 | { |
143 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 144 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
144 | unsigned long flags; | 145 | unsigned long flags; |
@@ -168,8 +169,9 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_ | |||
168 | } | 169 | } |
169 | local_irq_restore(flags); | 170 | local_irq_restore(flags); |
170 | } | 171 | } |
172 | NOKPROBE_SYMBOL(optimized_callback); | ||
171 | 173 | ||
172 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | 174 | static int copy_optimized_instructions(u8 *dest, u8 *src) |
173 | { | 175 | { |
174 | int len = 0, ret; | 176 | int len = 0, ret; |
175 | 177 | ||
@@ -189,7 +191,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | |||
189 | } | 191 | } |
190 | 192 | ||
191 | /* Check whether insn is indirect jump */ | 193 | /* Check whether insn is indirect jump */ |
192 | static int __kprobes insn_is_indirect_jump(struct insn *insn) | 194 | static int insn_is_indirect_jump(struct insn *insn) |
193 | { | 195 | { |
194 | return ((insn->opcode.bytes[0] == 0xff && | 196 | return ((insn->opcode.bytes[0] == 0xff && |
195 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | 197 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ |
@@ -224,7 +226,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | |||
224 | } | 226 | } |
225 | 227 | ||
226 | /* Decode whole function to ensure any instructions don't jump into target */ | 228 | /* Decode whole function to ensure any instructions don't jump into target */ |
227 | static int __kprobes can_optimize(unsigned long paddr) | 229 | static int can_optimize(unsigned long paddr) |
228 | { | 230 | { |
229 | unsigned long addr, size = 0, offset = 0; | 231 | unsigned long addr, size = 0, offset = 0; |
230 | struct insn insn; | 232 | struct insn insn; |
@@ -275,7 +277,7 @@ static int __kprobes can_optimize(unsigned long paddr) | |||
275 | } | 277 | } |
276 | 278 | ||
277 | /* Check optimized_kprobe can actually be optimized. */ | 279 | /* Check optimized_kprobe can actually be optimized. */ |
278 | int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | 280 | int arch_check_optimized_kprobe(struct optimized_kprobe *op) |
279 | { | 281 | { |
280 | int i; | 282 | int i; |
281 | struct kprobe *p; | 283 | struct kprobe *p; |
@@ -290,15 +292,15 @@ int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | |||
290 | } | 292 | } |
291 | 293 | ||
292 | /* Check the addr is within the optimized instructions. */ | 294 | /* Check the addr is within the optimized instructions. */ |
293 | int __kprobes | 295 | int arch_within_optimized_kprobe(struct optimized_kprobe *op, |
294 | arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) | 296 | unsigned long addr) |
295 | { | 297 | { |
296 | return ((unsigned long)op->kp.addr <= addr && | 298 | return ((unsigned long)op->kp.addr <= addr && |
297 | (unsigned long)op->kp.addr + op->optinsn.size > addr); | 299 | (unsigned long)op->kp.addr + op->optinsn.size > addr); |
298 | } | 300 | } |
299 | 301 | ||
300 | /* Free optimized instruction slot */ | 302 | /* Free optimized instruction slot */ |
301 | static __kprobes | 303 | static |
302 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | 304 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) |
303 | { | 305 | { |
304 | if (op->optinsn.insn) { | 306 | if (op->optinsn.insn) { |
@@ -308,7 +310,7 @@ void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | |||
308 | } | 310 | } |
309 | } | 311 | } |
310 | 312 | ||
311 | void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | 313 | void arch_remove_optimized_kprobe(struct optimized_kprobe *op) |
312 | { | 314 | { |
313 | __arch_remove_optimized_kprobe(op, 1); | 315 | __arch_remove_optimized_kprobe(op, 1); |
314 | } | 316 | } |
@@ -318,7 +320,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | |||
318 | * Target instructions MUST be relocatable (checked inside) | 320 | * Target instructions MUST be relocatable (checked inside) |
319 | * This is called when new aggr(opt)probe is allocated or reused. | 321 | * This is called when new aggr(opt)probe is allocated or reused. |
320 | */ | 322 | */ |
321 | int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | 323 | int arch_prepare_optimized_kprobe(struct optimized_kprobe *op) |
322 | { | 324 | { |
323 | u8 *buf; | 325 | u8 *buf; |
324 | int ret; | 326 | int ret; |
@@ -372,7 +374,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | |||
372 | * Replace breakpoints (int3) with relative jumps. | 374 | * Replace breakpoints (int3) with relative jumps. |
373 | * Caller must call with locking kprobe_mutex and text_mutex. | 375 | * Caller must call with locking kprobe_mutex and text_mutex. |
374 | */ | 376 | */ |
375 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | 377 | void arch_optimize_kprobes(struct list_head *oplist) |
376 | { | 378 | { |
377 | struct optimized_kprobe *op, *tmp; | 379 | struct optimized_kprobe *op, *tmp; |
378 | u8 insn_buf[RELATIVEJUMP_SIZE]; | 380 | u8 insn_buf[RELATIVEJUMP_SIZE]; |
@@ -398,7 +400,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist) | |||
398 | } | 400 | } |
399 | 401 | ||
400 | /* Replace a relative jump with a breakpoint (int3). */ | 402 | /* Replace a relative jump with a breakpoint (int3). */ |
401 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | 403 | void arch_unoptimize_kprobe(struct optimized_kprobe *op) |
402 | { | 404 | { |
403 | u8 insn_buf[RELATIVEJUMP_SIZE]; | 405 | u8 insn_buf[RELATIVEJUMP_SIZE]; |
404 | 406 | ||
@@ -424,8 +426,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist, | |||
424 | } | 426 | } |
425 | } | 427 | } |
426 | 428 | ||
427 | int __kprobes | 429 | int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) |
428 | setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) | ||
429 | { | 430 | { |
430 | struct optimized_kprobe *op; | 431 | struct optimized_kprobe *op; |
431 | 432 | ||
@@ -441,3 +442,4 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) | |||
441 | } | 442 | } |
442 | return 0; | 443 | return 0; |
443 | } | 444 | } |
445 | NOKPROBE_SYMBOL(setup_detour_execution); | ||